file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
python/tvm/relay/transform/infer_layout_utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import """ Relay infer correct layout pass. """ import tvm from tvm.runtime import Object from . import _ffi_api @tvm._ffi.register_object("relay._transform.InferCorrectLayoutOutput") class InferCorrectLayoutOutput(Object): """An output structure to hold results from FInferCorrectLayout calls.""" def __init__(self, input_layouts, output_layouts, new_attrs): self.__init_handle_by_constructor__( _ffi_api.InferCorrectLayoutOutput, input_layouts, output_layouts, new_attrs )
https://github.com/zk-ml/tachikoma
python/tvm/relay/transform/memory_plan.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks """ A pass for manifesting explicit memory allocations. """ from typing import Optional, Dict, List, Tuple from collections import defaultdict import attr from ..expr_functor import ExprMutator from .. import op, expr from ..function import Function from ... import register_func, ir, cpu from ..._ffi.runtime_ctypes import Device from ... import IRModule from .. import transform from . import function_pass def is_primitive(call): return ( hasattr(call, "op") and hasattr(call.op, "attrs") and hasattr(call.op.attrs, "Primitive") and int(call.op.attrs.Primitive) == 1 ) @attr.s(auto_attribs=True) class Region: """ Represents a control-free allocation region. The below pass groups sets of allocations into regions, then replaces the region with a single allocation. """ var: expr.Var size: expr.Expr alignment: Optional[expr.Expr] dtype: Optional[str] device: Device offsets: Dict[expr.Var, Tuple[expr.Expr, expr.Expr]] @staticmethod def empty(region_no): zero = expr.const(0, dtype="int64") assert len(zero.data.shape) == 0 region_var = expr.var(f"region{region_no}") return Region(region_var, zero, None, None, None, {}) def grow( self, old_storage: expr.Var, size: expr.Expr, alignment: expr.Expr, dev: Device, dtype: str, ) -> None: """Grow the region by a given allocation as well as track the old storage for later rewriting the program to use the allocated region. """ if self.dtype: assert self.dtype == dtype, "must have matching dtypes in a region" else: self.dtype = dtype if self.alignment: assert ir.structural_equal( self.alignment, alignment ), "must have matching alignments in a region" else: self.alignment = alignment if self.device: assert ( self.device.device_type == dev.device_type and self.device.device_id == dev.device_id ), "must have matching device" else: assert dev self.device = dev new_size = ( (size + self.alignment - expr.const(1, "int64")) / self.alignment * self.alignment ) # Record the offset at which we allocate the storage. offset_var: expr.RelayExpr = expr.var(f"offset{len(self.offsets)}") self.offsets[old_storage] = (offset_var, self.size) self.size = self.size + new_size def offset_for(self, alloc: expr.Expr) -> expr.Expr: return self.offsets.get(alloc, [None])[0] def to_expr(self, body: expr.Expr) -> expr.Expr: """ Generate the prelude code for a region, wrapping the body in it. The prelude contains the single allocation for a region, and all offset computations. """ if self.device is None: self.device = cpu(0) # Generate bindings for each and every size computation # we must do this to maintain ANF. bindings: List[Tuple[expr.Expr, expr.Expr]] = [] # First compute the total size. total_size = expr.var(f"total_size{hash(body)}") bindings.append((total_size, self.size)) # Allocate the entire region with a single call. alloc = op.memory.alloc_storage(total_size, self.alignment, self.device, self.dtype) bindings.append((self.var, alloc)) # Generate variables which contain all of the offset math. # Ensure we constant evaluate away all the math here. # # In theory we can support dynamic offsets but this # requires another round of memory planning and # potentially colaescing. for alloc in self.offsets: (var, offset) = self.offsets[alloc] bindings.append((var, offset)) body = mk_let(bindings, body) return body def iterative_let(let, each_binding, kont): bindings = [] while isinstance(let, expr.Let): lhs = let.var rhs = let.value bindings.append(each_binding(lhs, rhs)) let = let.body return kont(bindings, let) def mk_let(bindings, body): for var, value in reversed(bindings): assert var assert value assert body body = expr.Let(var, value, body) return body def const_eval(mod, exp): mod = IRModule.from_expr(exp, type_defs=mod.type_definitions) mod = transform.FoldConstant()(mod) return mod["main"] class StorageCoalesce(ExprMutator): """ A pass for coalescing allocations into region/arena allocations. After this pass each allocation comes from the same backing storage, but will never overlap even in time, i.e. the allocations are just packed into a contiguous block of memory. A secondary part of memory planning will perform liveness analysis to overlap these in time, i.e when an early tensor dies we will attempt to reuse its slot. """ def __init__(self): super().__init__() self.regions = [] def enter_scope(self) -> None: region_no = len(self.regions) self.regions.append(defaultdict(lambda: Region.empty(region_no))) def exit_scope(self, body: expr.Expr) -> expr.Expr: """When leaving a scope build a region allocation for the scope.""" dtype_region = self.regions.pop() for _, region in reversed(list(dtype_region.items())): if len(region.offsets) != 0: body = region.to_expr(body) return body def current_region(self, dtype) -> Region: current_scope = self.regions[-1] return current_scope[dtype] def new_region_and_offset(self, old_storage): for dtype_region in reversed(self.regions): for dtype in dtype_region: region = dtype_region[dtype] offset = region.offset_for(old_storage) if offset: return region, offset raise Exception("could not find offset in any valid region") def visit_function(self, fn): """Transform the function body to use region allocation scheme.""" func = fn if getattr(func.attrs, "Primitive", 0) == 1: return super().visit_function(func) else: self.enter_scope() body = self.visit(func.body) body = self.exit_scope(body) return Function( func.params, body, func.ret_type, func.type_params, func.attrs, ) def visit_if(self, ite): self.enter_scope() true_branch = self.visit(ite.true_branch) true_branch = self.exit_scope(true_branch) self.enter_scope() false_branch = self.visit(ite.false_branch) false_branch = self.exit_scope(false_branch) return expr.If(ite.cond, true_branch, false_branch) def mk_let(self, dynamic_regions): """Let bind the dynamic regions""" def _mk_let(bindings, body): for var, value in reversed(bindings): assert var assert value is not None assert body body = expr.Let(var, value, body) if var in dynamic_regions: body = self.exit_scope(body) return body return _mk_let def visit_let(self, let): dynamic_regions = [] def _each_binding(lhs, rhs): if isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_storage"): return self.process_alloc_storage(dynamic_regions, lhs, rhs) elif isinstance(rhs, expr.Call) and rhs.op == op.op.get("memory.alloc_tensor"): return self.process_alloc_tensor(lhs, rhs) else: return lhs, rhs result = iterative_let(let, _each_binding, self.mk_let(dynamic_regions)) assert result return result def process_alloc_storage(self, dynamic_regions, lhs, call): """Process alloc_storage""" size, alignment = call.args dtype = call.attrs.dtype dev = Device(call.attrs.device_type, call.attrs.device_id) if not isinstance(size, expr.Constant): self.enter_scope() dynamic_regions.append(lhs) else: # A new scope is created when entering a new region with different # device device. region = self.current_region(dtype) if region.device and region.device.device_type != dev.device_type: self.enter_scope() dynamic_regions.append(lhs) region = self.current_region(dtype) region.grow(lhs, size, alignment, dev, dtype) return lhs, region.var def process_alloc_tensor(self, lhs, call): """Process alloc tensor. Region and offset are computed""" storage, old_offset, shape = call.args region, offset = self.new_region_and_offset(storage) assert old_offset.data.numpy().item() == 0, "no offsets should yet be allocated" return ( lhs, expr.Call(call.op, [region.var, offset, shape], call.attrs), ) class LiftConst(ExprMutator): """An internal pass to lift constants to the top level of function.""" def __init__(self): self.i = 0 self.constants = [] self.top_level = True super().__init__() def visit_constant(self, const): var = expr.var(f"const{self.i}") self.i += 1 self.constants.append((var, const)) return var def visit_function(self, fn): if int(getattr(fn.attrs, "Primitive", 0)) == 1: return fn outer_constant = self.constants self.constants = [] # Populates self.constants. body = self.visit(fn.body) body = mk_let(self.constants, body) self.constants = outer_constant return Function(fn.params, body, fn.ret_type, fn.type_params, fn.attrs) def visit_let(self, let): bindings = [] while isinstance(let, expr.Let): new_var = self.visit(let.var) new_val = self.visit(let.value) bindings.append((new_var, new_val)) let = let.body new_body = self.visit(let) return mk_let(bindings, new_body) @function_pass(opt_level=0) class MemoryPlan: """An explicit pass wrapper around StorageCoalesce.""" def transform_function(self, func, mod, _): mod.import_from_std("core.rly") sc = StorageCoalesce() func = sc.visit(func) return func register_func("relay.transform.MemoryPlan", MemoryPlan) @function_pass(opt_level=0) class LiftConstants: """An explicit pass wrapper around LiftConst.""" def transform_function(self, func, mod, _): mod.import_from_std("core.rly") func = LiftConst().visit(func) return func register_func("relay.transform.LiftConstants", LiftConstants)
https://github.com/zk-ml/tachikoma
python/tvm/relay/transform/mixed_precision.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long,unused-argument """Default behavior for ops in mixed_precision pass. Import this file to use.""" from typing import List from tvm.relay.op import register_mixed_precision_conversion # MIXED_PRECISION_ALWAYS ops should always be done in lower precision due to the speed and memory # savings. MIXED_PRECISION_FOLLOW ops can be done in lower precision but don't have speedups to # justify a cast. MIXED_PRECISION_NEVER colored ops should not be done in lower precision due to # numerical reasons. MIXED_PRECISION_ALWAYS = 0 MIXED_PRECISION_FOLLOW = 1 MIXED_PRECISION_NEVER = 2 # Default lists inspired from TF's classifications: # github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/core/grappler/optimizers/auto_mixed_precision_lists.h # They have a bias toward Nvidia Tensor Cores so modify lists per your hardware choice. DEFAULT_ALWAYS_LIST = [ "nn.conv1d", "nn.conv2d", "nn.conv3d", "nn.conv1d_transpose", "nn.conv2d_transpose", "nn.conv3d_transpose", "nn.dense", "nn.batch_matmul", ] DEFAULT_FOLLOW_LIST = [ # These ops add new data or change shape "nn.pad", "nn.batch_flatten", "concatenate", "zeros", "split", "squeeze", "transpose", "expand_dims", "reshape", "dyn.reshape", "broadcast_to_like", "dyn.broadcast_to", "strided_slice", "dyn.strided_slice", "take", "argwhere", "where", "tile", "dyn.tile", "scatter", "full", "dyn.full", "nn.depth_to_space", # Comparison "less", "greater", "less_equal", "greater_equal", # By definition copy and cast will depend on inputs for output. "copy", "cast", "cast_like", # Simple arithmetic "add", "subtract", "multiply", "divide", "nn.bias_add", "nn.batch_norm", "sqrt", "shape_of", # Simple activations "max", "min", "maximum", "minimum", "argmax", "argmin", "nn.relu", "nn.leaky_relu", "nn.prelu", "nn.dropout", # Complicated activations which saturate in a narrow range "sigmoid", "tanh", "fast_tanh", # Some coefficients outside of representable range, but probably ok "fast_exp", "fast_erf", "clip", # Usually safe, may result in oddity if clip greater than fp16 range # Pooling operations "nn.max_pool1d", "nn.max_pool2d", "nn.max_pool3d", "nn.avg_pool1d", "nn.avg_pool2d", "nn.avg_pool3d", # "nn.global_max_pool1d", # does not exist yet "nn.global_max_pool2d", # "nn.global_max_pool3d", # does not exist yet "nn.adaptive_max_pool1d", "nn.adaptive_max_pool2d", "nn.adaptive_max_pool3d", "image.resize2d", ] DEFAULT_NEVER_LIST = [ # In general if |f(x)| >> |x| for expected inputs then put the op here. "exp", "power", "nn.cross_entropy", "nn.cross_entropy_with_logits", "nn.softmax", "nn.l2_normalize", # Error function doesn't seem to be able to be lowered into fp16 version in llvm. # Move to follow list when it does. "erf", # Do not allow arange arguments (begin/end) to be fp16. "end" can be a big fp32 number # not representable in fp16. "arange", # Ops that could involve a large summation are not allowed in fp16. "nn.global_avg_pool2d", "nn.adaptive_avg_pool1d", "nn.adaptive_avg_pool2d", "nn.adaptive_avg_pool3d", "sum", "mean", "variance", "nn.layer_norm", ] # Returns a decorator which registers for every given op, the function under FTVMMixedPrecisionConversionType def register_func_to_op_list(list_ops: List): def decorator(func): for op_name in list_ops: register_mixed_precision_conversion(op_name, func=func) return decorator def get_generic_out_dtypes(call_node: "relay.Call", mixed_precision_type: str) -> List[str]: """A function which returns output dtypes in a way which works for most ops. Parameters --------- call_node: relay.Call The call node containing the op. mixed_precision_type: str The target type to run the operation in. Returns ------- output_dtypes : [str, str] A list of two strings. The first represents the datatype used for accumulation in the operation. The second represents the actual output datatype. """ # Assume support accumulation dtypes <---> has out_dtype attr. # This is because there is no better way right now to tell which ops support accumulating # at different data types. # Some discussion here about making this better is here: # https://discuss.tvm.apache.org/t/rfc-relay-fp32-fp16-model-support/9994/4?u=andrewzhaoluo if hasattr(call_node.attrs, "out_dtype"): # TODO (AndrewZhaoLuo): evaluate consistent support for mixed_type accumulators # return ["float32", mixed_precision_type] return [mixed_precision_type, mixed_precision_type] # [accumulation_dtype, output_dtype] for the operations return [mixed_precision_type, mixed_precision_type] # Functions for FTVMMixedPrecisionConversionType which # Take in CallNodes and a DType and returns a conversion type, # an accumulation dtype, and an output_dtype. @register_func_to_op_list(list_ops=DEFAULT_ALWAYS_LIST) def generic_always_op(call_node: "relay.Call", mixed_precision_type: str) -> List: return [MIXED_PRECISION_ALWAYS] + get_generic_out_dtypes(call_node, mixed_precision_type) @register_func_to_op_list(list_ops=DEFAULT_FOLLOW_LIST) def generic_follow_op(call_node: "relay.Call", mixed_precision_type: str) -> List: return [MIXED_PRECISION_FOLLOW] + get_generic_out_dtypes(call_node, mixed_precision_type) @register_func_to_op_list(list_ops=DEFAULT_NEVER_LIST) def generic_never_op(call_node: "relay.Call", mixed_precision_type: str) -> List: return [MIXED_PRECISION_NEVER] + get_generic_out_dtypes(call_node, mixed_precision_type)
https://github.com/zk-ml/tachikoma
python/tvm/relay/transform/recast.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Relay type recasting pass""" import tvm from tvm import relay from tvm.ir import IRModule from .transform import InferType from ..analysis import count_layers from ..expr_functor import ExprMutator, Call class RecastMutator(ExprMutator): """Cast operations to the target type.""" def __init__(self, dtype, out_dtype, valid_ops, valid_op_count, skip_layers): self.dtype = dtype self.out_dtype = out_dtype self.depth_count = 0 self.valid_ops = [relay.op.get(op) for op in valid_ops] self.valid_op_count = valid_op_count self.skip_layers = skip_layers # Convert negative indices to positive ones. for i, layer in enumerate(skip_layers): if layer < 0: skip_layers[i] = self.valid_op_count + layer super().__init__() def visit_call(self, call): # Keep track of our current depth and layer count # so we can know whether to skip this layer or not. current_depth = self.depth_count current_layer = self.valid_op_count - current_depth - 1 if call.op in self.valid_ops: self.depth_count += 1 # Visit current call operation new_fn = self.visit(call.op) # Visit current arguments args = [] for arg in call.args: args.append(self.visit(arg)) self.depth_count = current_depth # Downcast this op if its the correct type and not skipped. if call.op in self.valid_ops and current_layer not in self.skip_layers: # Recast inputs to specified type. if call.op == relay.op.get("concatenate"): if len(call.args) != 1 or not isinstance(call.args[0], relay.expr.Tuple): return Call(new_fn, args, call.attrs) tuple_args = [self.visit(arg) for arg in call.args[0].fields] new_args = list() for arg in tuple_args: new_args.append(relay.cast(arg, dtype=self.dtype)) new_args = [relay.expr.Tuple(new_args)] else: args = [self.visit(arg) for arg in call.args] new_args = list() for arg in args: new_args.append(relay.cast(arg, dtype=self.dtype)) # If out_dtype is in the attributes, we need to update it. orig_dtype = None if call.attrs is not None and "out_dtype" in call.attrs.keys(): new_attr_dict = {} for attr in call.attrs.keys(): attr_value = call.attrs[attr] if isinstance(attr_value, tvm.ir.container.Array): attr_value = tuple(attr_value) new_attr_dict[str(attr)] = attr_value new_attr_dict["out_dtype"] = self.out_dtype attr_type = str(call.attrs).split("(")[0] new_attrs = tvm.ir.make_node(attr_type, **new_attr_dict) if call.attrs["out_dtype"] != "": orig_dtype = call.attrs["out_dtype"] else: new_attrs = call.attrs if orig_dtype is None: # Perform type inference to determine the original type. new_mod = IRModule.from_expr(call) new_mod = InferType()(new_mod) checked_arg = new_mod["main"].body orig_dtype = checked_arg.checked_type.dtype # Recast the output for compatibility with other graph operations. return relay.cast(Call(new_fn, new_args, new_attrs), orig_dtype) # Otherwise return the unchanged call. return Call(new_fn, args, call.attrs) def recast(expr, dtype, out_dtype, ops=None, skip_layers=None): """Convert the types of operations in a graph to a new value. Note that this is primarily useful for testing performance of individual operations at the new datatype. In a real setting, this pass will almost certainly do a poor job converting from one datatype to another as it just applies hard casting. For example, when recasting from float to integer, many small values will simply be set to 0. Although this will allow autotuning and benchmarking to produce proper timings at the new data type, the output of the model will of course be heavily impacted. Parameters --------- expr: tvm.relay.Expr, tvm.relay.Function, or tvm.ir.IRModule The original function that will have its type changed. dtype: str The target type to cast to. out_dtype: str The output type to cast to. ops: List[str] A list of operations that should have their type changed, others will be left as is. skip_layers: List[int] A list of integers indicating operations that should not have their type changed, counted starting with the first valid operation encountered. Negative indices are allowed and indicate starting at the last layer. Returns ------- output_expr : tvm.relay.Expr, tvm.relay.Function, or tvm.ir.IRModule The graph after recasting to the specified datatype. """ return_mod = False if isinstance(expr, tvm.ir.IRModule): expr = expr["main"] return_mod = True if ops is None: ops = ["nn.conv2d"] if skip_layers is None: skip_layers = [] layer_depth = count_layers(expr, ops) recast_pass = RecastMutator(dtype, out_dtype, ops, layer_depth, skip_layers) expr = recast_pass.visit(expr) if return_mod: return tvm.IRModule.from_expr(expr) return expr
https://github.com/zk-ml/tachikoma
python/tvm/relay/transform/transform.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument, missing-docstring, unused-import """ Relay pass transformation infrastructure. """ import functools import inspect import types import warnings import tvm.ir from tvm import relay, te from tvm.runtime import ndarray as _nd from ..backend.utils import mangle_module_name from . import _ffi_api def build_config(opt_level=2, required_pass=None, disabled_pass=None, trace=None): """Configure the build behavior by setting config variables. This function will be deprecated in TVM v0.7. Instead, we should directly use tvm.transform.PassContext. Parameters ---------- opt_level: int, optional Optimization level. The optimization pass name and level are as the following: .. code-block:: python OPT_PASS_LEVEL = { "SimplifyInference": 0, "OpFusion": 1, "FoldConstant": 2, "FoldScaleAxis": 3, "AlterOpLayout": 3, "CanonicalizeOps": 3, "CanonicalizeCast": 3, "EliminateCommonSubexpr": 3, "CombineParallelConv2D": 4, "CombineParallelDense": 4, "CombineParallelBatchMatmul": 4, "FastMath": 4 } required_pass: set of str, optional Optimization passes that are required regardless of optimization level. disabled_pass: set of str, optional Optimization passes to be disabled during optimization. trace: Callable[[IRModule, PassInfo, bool], None] A tracing function for debugging or introspection. Returns ------- pass_context: PassContext The pass context for optimizations. """ warnings.warn( "relay.build_config will be deprecated. Please use \ tvm.transform.PassContext directly", DeprecationWarning, ) return tvm.transform.PassContext(opt_level, required_pass, disabled_pass, trace) @tvm._ffi.register_object("relay.FunctionPass") class FunctionPass(tvm.ir.transform.Pass): """A pass that works on each tvm.relay.Function in a module. A function pass class should be created through `function_pass`. """ def InferType(): """Infer the type of an expr. Returns ------- ret : tvm.transform.Pass The registered type inference pass. """ return _ffi_api.InferType() def InferTypeLocal(expr): """Infer the type of a single expr, reusing type information to do so. This populates the checked_type field in expr. We assume existing type information in the graph is correct! Parameters ---------- expr: relay.Expr The expression we want to know the type of Returns ------- type: relay.Type The type of the expression """ return _ffi_api.InferTypeLocal(expr) def FoldScaleAxis(): """Fold the scaling of axis into weights of conv2d/dense. This pass will invoke both forward and backward scale folding. Returns ------- ret : tvm.transform.Pass The registered pass to fold expressions. Note ---- Internally, we will call backward_fold_scale_axis before using forward_fold_scale_axis as backward folding targets the common conv->bn pattern. """ return _ffi_api.FoldScaleAxis() def BackwardFoldScaleAxis(): """Backward fold axis scaling into weights of conv2d/dense. Returns ------- ret : tvm.transform.Pass The registered pass to backward fold expressions. Note ---- It is recommended to call backward_fold_scale_axis before using forward_fold_scale_axis as backward folding targets the common conv->bn pattern. """ return _ffi_api.BackwardFoldScaleAxis() def RemoveUnusedFunctions(entry_functions=None): """Remove unused global relay functions in a relay module. Parameters ---------- entry_functions: list[string] The set of entry functions to start from. Returns ------- ret : tvm.transform.Pass The registered pass to remove unused functions. """ if entry_functions is None: entry_functions = ["main"] return _ffi_api.RemoveUnusedFunctions(entry_functions) def ForwardFoldScaleAxis(): """Fold the scaling of axis into weights of conv2d/dense. Returns ------- ret : tvm.transform.Pass The registered pass to forward fold expressions. Note ---- It is recommended to call backward_fold_scale_axis before using forward_fold_scale_axis, as backward folding targets the common conv->bn pattern. """ return _ffi_api.ForwardFoldScaleAxis() def SimplifyInference(): """Simplify the data-flow graph for inference phase. An simplified expression which is semantically equal to the input expression will be returned. Note that batch norms will only be simplified if their result is indexed at tuple index 0. Returns ------- ret: tvm.transform.Pass The registered pass to perform operator simplification. """ return _ffi_api.SimplifyInference() def FastMath(): """Converts the expensive non linear functions to their fast but approximate counterparts. Returns ------- ret: tvm.transform.Pass The registered pass to perform fast math operations. """ return _ffi_api.FastMath() def CanonicalizeOps(): """Canonicalize special operators to basic operators. This can simplify followed analysis, e.g. expanding bias_add to expand_dims and broadcast_add. Returns ------- ret: tvm.transform.Pass The registered pass performing the canonicalization. """ return _ffi_api.CanonicalizeOps() def DeadCodeElimination(inline_once=False, ignore_impurity=False): """Remove expressions that do not have any users (dead code). Parameters ---------- inline_once: Optional[Bool] Whether to inline a binding that is referenced exactly once. ignore_impurity: Optional[Bool] Whether to ignore possible side-effects in let-bound expressions. Returns ------- ret: tvm.transform.Pass The registered pass that eliminates the dead code in a Relay program. """ return _ffi_api.DeadCodeElimination(inline_once, ignore_impurity) def LazyGradientInit(): """Reduces memory usage of gradient tensors Parameters ---------- Returns ------- ret: tvm.transform.Pass A pass which delays and/or reduces memory allocation, by lazily allocating 0 or one filled tensors. """ return _ffi_api.LazyGradientInit() def FoldConstantExpr(expr, mod, fold_qnn=False): """Fold the constant expressions in a Relay program. Parameters ---------- expr: Expr The expression to fold mod: IRModule The module the expr lives in (for global calls) fold_qnn: bool Whether to fold constants for QNN operations. Returns ------- new_expr: Expr The expr after Constant Folding """ return _ffi_api.FoldConstantExpr(expr, mod, fold_qnn) def FoldConstant(fold_qnn=False): """Fold the constant expressions in a Relay program. Because of backward compatibility reason it skips QNN primitives from folding by default. There are some transformation passes like FakeQuantizationToInteger, which requires to keep QNN primitives for constant subgraphs. Uncontrolled constant folding of QNN primitives may break applicability of FakeQuantizationToInteger. We suggest to use FoldConstant pass with none default fold_qnn=True value only when all other QNN sensitive passes were already applied. Parameters ---------- fold_qnn: bool Whether to fold constants for QNN operations. Returns ------- ret : tvm.transform.Pass The registered pass for constant folding. """ return _ffi_api.FoldConstant(fold_qnn) def FuseOps(fuse_opt_level=-1): """Fuse operators in an expr to a larger operator according to some rules. Parameters ---------- fuse_opt_level : int The level of fuse optimization. -1 indicates that the level will be inferred from pass context. Returns ------- ret : tvm.transform.Pass The registered pass for operator fusion. """ return _ffi_api.FuseOps(fuse_opt_level) def DefuseOps(): """The inverse operation of FuseOps. It transforms a fused program returned by FuseOps into the program before FuseOps. (i.e., x == DefuseOps(FuseOps(x))) Returns ------- ret : tvm.transform.Pass The registered pass for operator defusion. """ return _ffi_api.DefuseOps() def CombineParallelConv2D(min_num_branches=3): """Combine multiple conv2d operators into one. Parameters ---------- min_num_branches : int The minimum number of required parallel branches for performing this optimization. Returns ------- ret: tvm.transform.Pass The registered pass that combines parallel conv2d operators. """ return _ffi_api.CombineParallelConv2D(min_num_branches) def CombineParallelDense(min_num_branches=3, to_batch=True): """Combine multiple dense operators into one. For example: .. code-block data / \ dense (2,2) dense (2,2) | | elemwise/bcast (2,2) elemwise/bcast (2,2) Would become: .. code-block data | batch_matmul+elemwise/bcast (2,2,2) or (if to_batch=False) .. code-block data | dense+elemwise/bcast (2,2+2) Parameters ---------- min_num_branches : int The minimum number of required parallel branches for performing this optimization. to_batch_matmul : bool If True, combine parallel dense ops into batch_matmul op. If False, combine parallel dense ops into dense op. Returns ------- ret: tvm.transform.Pass The registered pass that combines parallel dense operators. """ return _ffi_api.CombineParallelDense(min_num_branches, to_batch) def CombineParallelBatchMatmul(min_num_branches=3): """Combine multiple batch matmul operators into one. For example: .. code-block data (1, 2, 3) / \ batch_matmul(data, (1, 4, 3)) batch_matmul(data, (1, 5, 3)) | | elemwise/bcast (1, 2, 4) elemwise/bcast (1, 2, 5) Would become: .. code-block data (1, 2, 3) | batch_matmul(data, (1, 4+5, 3)) | elemwise/bcast (1 ,2, 4+5) Parameters ---------- min_num_branches : int The minimum number of required parallel branches for performing this optimization. Returns ------- ret: tvm.transform.Pass The registered pass that combines parallel dense operators. """ return _ffi_api.CombineParallelBatchMatmul(min_num_branches) def BatchingOps(): """Batching parallel operators into one for Conv2D, Dense and BatchMatmul. Returns ------- ret: tvm.transform.Pass The sequential pass which apply batching for different operator types. """ return tvm.transform.Sequential( [CombineParallelConv2D(), CombineParallelDense(), CombineParallelBatchMatmul()] ) def AlterOpLayout(): """Alternate the layouts of operators or replace primitive operators with other expressions. This pass can be used for computing convolution in custom layouts or other general weight pre-transformation. Returns ------- ret : tvm.transform.Pass The registered pass that alters the layout of operators. """ return _ffi_api.AlterOpLayout() class LayoutConfig(object): """A structure for customizing the ConvertLayout pass.""" current = None def __init__(self, skip_layers=None): self.skip_counter = 0 self.skip_layers = skip_layers if skip_layers is not None else [] def check_skip(self): skip = self.skip_counter in self.skip_layers self.skip_counter += 1 return skip def reset(self): self.skip_counter = 0 self.skip_layers = [] def __enter__(self): self._old_manager = LayoutConfig.current LayoutConfig.current = self return self def __exit__(self, ptype, value, trace): LayoutConfig.current = self._old_manager def ConvertLayout(desired_layouts): """Given a dest layout, this pass transforms the expr such that most of the ops input data layout is changed to the dest layout. In ideal situation, there are only 2 layout transforms, one at the start and one at the end. This pass is not a part of relay.build and is expected to be called between framework-relay parser and relay.build call. This is very helpful for hardware backends that support/prefer only type of data layout. RFC - https://discuss.tvm.apache.org/t/layout-conversion-pass/4009 This pass uses most of the AlterOpLayout and InferCorrectLayout infrastructure. We can define new layouts for conv2d ops for now. Most of the other operators try to adapt to their input layout using the InferCorrectLayout infrastructure. Parameters ---------- desired_layouts : map of op_name to list of layouts Specify a mapping of operator names to a list of layouts to convert to, in the order defined by the operator. An example for nn.conv2d could be: {"nn.conv2d", ["NHWC", "OHWI]}, where the first item in the list specifies the data layout and the second specifies the kernel layout. Returns ------- pass: FunctionPass The pass. """ return _ffi_api.ConvertLayout(desired_layouts) def Legalize(legalize_map_attr_name="FTVMLegalize"): """Legalizes an expression with another expression. This pass can be used to replace an expr with another expr for target dependent optimizations. For example, one expr, though semnatically equivalent to the other, can have better performance on a target. This pass can be used to legalize the expr in a target-dependent manner. Parameters ---------- legalize_map_attr_name : str The Op's attr name which corresponds to the legalize rule function. Returns ------- ret : tvm.transform.Pass The registered pass that rewrites an expr. """ return _ffi_api.Legalize(legalize_map_attr_name) def MergeComposite(pattern_table): """Merge multiple operators into a single composite relay function. Parameters ---------- pattern_table : List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Function]] A list of (pattern_name, pattern, check) tuples. The order of the patterns in the list will determine the order of priority in which they are matched. 'check' is a function to check whether an extracted pattern matches. It can be implemented by pattern writer but if not specified it will always return True. Returns ------- ret : tvm.transform.Pass The registered pass that merges operators into a single composite relay function. """ pattern_names = [] patterns = [] checks = [] for tup in pattern_table: if len(tup) == 2: pattern_name, pattern = tup check = lambda extract: True elif len(tup) == 3: pattern_name, pattern, check = tup pattern_names.append(pattern_name) patterns.append(pattern) checks.append(check) return _ffi_api.MergeComposite(pattern_names, patterns, *checks) def MergeCompilerRegions(): """Merge together compiler regions. Returns ------- ret : tvm.transform.Pass The registered pass that merges compiler regions. """ return _ffi_api.MergeCompilerRegions() def ToANormalForm(): """Turn Graph Normal Form expression into A Normal Form Expression. The scope of the root expression is the global scope. The scope of any non root expression is the least common ancestor of all it's scope. Values are ordered by post-DFS order in each scope. Returns ------- ret : Union[tvm.transform.Pass, tvm.relay.Expr] The registered pass that transforms an expression into A Normal Form. """ return _ffi_api.ToANormalForm() def ToANormalFormExpr(e): """ToANormalForm, but on expression level. Parameters ---------- e : Expr The graph expression. Returns ------- ret : Expr The transformed expresion. """ return _ffi_api.ToANormalFormExpr(e) def ToBasicBlockNormalForm(): """Turn an expression to Basic Block Normal Form. We define a block as a group of expressions implied by the scope structure. Each graph node can only belong to a single block. For any value that is being used in multiple blocks, it has to be referred by a Var which is defined in a block, whose scope is the least common ancestor of blocks this value is used. Returns ------- ret: tvm.transform.Pass The registered pass that transforms an expression into Basic Block Normal Form. """ return _ffi_api.ToBasicBlockNormalForm() def ToCPS(expr, mod=None): """ Turn expression into continuation passing style(CPS). Every intermediate compute will be passed to a continuation. Returns ------- result: tvm.transform.Pass The registered pass that transforms an expression into CPS. """ return _ffi_api.to_cps(expr, mod) def EtaExpand(expand_constructor=False, expand_global_var=False): """Add abstraction over a constructor or global variable bound to a function Parameters ---------- expand_constructor: bool Whether to expand constructors. expand_global_var: bool Whether to expand global variables. Returns ------- ret: tvm.transform.Pass The registered pass that eta expands an expression. """ return _ffi_api.EtaExpand(expand_constructor, expand_global_var) def ToGraphNormalForm(): """Turn a Relay program in A Normal Form into Graph Normal Form Returns ------- ret : tvm.transform.Pass The registered pass that transforms an expression into Graph Normal Form. """ return _ffi_api.ToGraphNormalForm() def EliminateCommonSubexpr(fskip=None): """Eliminate common subexpressions. Parameters ---------- fskip: Callable The callback function that decides whether an expression should be skipped. Returns ------- ret : tvm.transform.Pass The registered pass that eliminates common subexpressions. """ return _ffi_api.EliminateCommonSubexpr(fskip) def PartialEvaluate(): """Evaluate the static fragment of the code. Note ---- This transformation could be either `Module -> Module` or `Expr -> Expr`. It will directly transform the input expression to a new one if the target expression is provided. Otherwise, it will rely on the pass manager to carry out transformation. Returns ------- ret: tvm.transform.Pass The registered pass that performs partial evaluation on an expression. """ return _ffi_api.PartialEvaluate() def CanonicalizeCast(): """ Canonicalize cast expressions to make operator fusion more efficient. Returns ------- ret : tvm.transform.Pass The registered pass that canonicalizes cast expression. """ return _ffi_api.CanonicalizeCast() def LambdaLift(): """ Lift the closure to global function. Returns ------- ret : tvm.transform.Pass The registered pass that lifts the lambda function. """ return _ffi_api.LambdaLift() def PartitionGraph(mod_name="default", bind_constants=True): """Partition a Relay program into regions that can be executed on different backends. Parameters ---------- mod_name : string Controls the prefix of the name of each partitioned subraph. If `mod_name` is None, then `tvmgen_` prefix is used. Otherwise, `tvmgen_mod_name_` prefix is used. bind_constants: bool Whether or not to bind constants in partitioned subgraphs. Note that the codegen needs to maintain the bound constants; Otherwise the constants will be maintained by the metadata module. So it is recommended for C-source based codegens to set bind_constants=False to avoid embedding large constants in a C source file. Returns ------- ret: tvm.transform.Pass The registered pass that partitions the Relay program. """ mod_name = mangle_module_name(mod_name) return _ffi_api.PartitionGraph(mod_name, bind_constants) def AnnotateTarget(targets, include_non_call_ops=True): """Annotate ops in an experession with a provied compiler/target and then use it for codegen. Parameters ---------- targets : str or List[str] The list of target compilers used for codegen. include_non_call_ops : boolean If True then non-call ops also will be annotated with targets If False then non-call ops will not be processed Returns ------- ret : tvm.transform.Pass The annotated pass that wrapps ops with subgraph_start and subgraph_end. """ if isinstance(targets, str): targets = [targets] return _ffi_api.AnnotateTarget( [tvm.runtime.container.String(t) for t in targets], include_non_call_ops ) def DynamicToStatic(): """If possible, convert tvm.relay.dynamic* ops to static versions Returns ------- ret : tvm.transform.Pass The registered pass for dynamic->static conversion. """ return _ffi_api.DynamicToStatic() def Inline(): """Perform inlining on the given Relay IR module. The global functions that are marked as `inline` should be always inlined. A cost model will be needed in the future to decide if it is profitable to inline the function. Returns ------- ret: tvm.transform.Pass The registered pass that performs inlining for a Relay IR module. """ return _ffi_api.Inline() def gradient(expr, mod=None, mode="higher_order"): """ Transform the input function, returning a function that calculate the original result, paired with gradient of the input. Parameters ---------- expr : tvm.relay.Expr The input expression, which is a Function or a GlobalVar. mod : Optional[tvm.IRModule] mode : Optional[String] The mode of the automatic differentiation algorithm. 'first_order' only works on first order code, but will not produce reference nor closure. 'higher_order' works on all code using reference and closure. Returns ------- expr : tvm.relay.Expr The transformed expression. """ if mode == "first_order": warnings.warn( "using transform.gradient for first-order AD is deprecated, please use the" "FirstOrderGradient module pass", DeprecationWarning, ) if mod is not None: raise RuntimeError( "to run first-order AD on a module, please use the FirstOrderGradient module pass." ) return FirstOrderGradient()(tvm.IRModule.from_expr(expr))["main"] if mode == "higher_order": return _ffi_api.gradient(expr, mod) raise Exception("unknown mode") def FirstOrderGradient(): """ Transforms all global functions in the module to return the original result, paired with the gradients of the inputs. This pass transforms each global function independently and does not support interprocedural AD. Additionally, this pass does not support any control-flow or references, and should only be used on pure data-flow graphs. Returns ------- ret : tvm.transform.Pass The registered FirstOrderGradient pass. """ return _ffi_api.FirstOrderGradient() def Defunctionalization(func, mod): """ Performs defunctionalization on func, transforming func from a higher-order program to a first-order program. At each call site, the function is cloned and type parameters are substituted in. Function arguments are encoded as datatypes and additional apply functions are used for application. Parameters ---------- func : tvm.relay.Function The input function, which should not be polymorphic or be higher-order. This is because all types must be known and we can't encode function arguments to the program itself. mod : tvm.IRModule The IRModule containing function and type definitions, which is also mutated during this pass. Returns ------- expr : tvm.relay.Function The output function. """ return _ffi_api.Defunctionalization(func, mod) def to_cps(func, mod=None): """ Turn expression into CPS expression. Every intermediate compute will be passed to a continuation. Parameters ---------- func: tvm.relay.Function The input function. mod: Optional[tvm.IRModule] The global module. Returns ------- result: tvm.relay.Function The output function. """ use_mod = mod if mod is not None else tvm.ir.IRModule() return _ffi_api.to_cps(func, use_mod) def un_cps(func): """ Turn an cps function into a Function without the continuation argument. Note that this will not give the exact same interface as before cps: If the input/output is higher order, they will still be in cps form. Parameters ---------- func: tvm.relay.Function The input function Returns ------- result: tvm.relay.Function The output function """ return _ffi_api.un_cps(func) def _wrap_class_function_pass(pass_cls, pass_info): """Wrap a python class as function pass""" class PyFunctionPass(FunctionPass): """Internal wrapper class to create a class instance.""" def __init__(self, *args, **kwargs): # initialize handle in cass pass_cls creation failed.fg self.handle = None inst = pass_cls(*args, **kwargs) # it is important not to capture self to # avoid a cyclic dependency def _pass_func(func, mod, ctx): return inst.transform_function(func, mod, ctx) self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info) self._inst = inst def __getattr__(self, name): # fall back to instance attribute if there is not any return self._inst.__getattribute__(name) functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__) PyFunctionPass.__name__ = pass_cls.__name__ PyFunctionPass.__doc__ = pass_cls.__doc__ PyFunctionPass.__module__ = pass_cls.__module__ return PyFunctionPass def function_pass(pass_func=None, opt_level=None, name=None, required=None): """Decorate a function pass. This function returns a callback when pass_func is provided. Otherwise, it returns the created function pass using the given optimization function. Parameters ---------- pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]] The transformation function or class. opt_level : int The optimization level of this module pass. name : Optional[str] The name of the function pass. The name could be empty. In this case, the name of the optimization function will be used as the pass name. required : Optional[List[str]] The list of passes that the module pass is dependent on. Returns ------- create_function_pass : Union[Callable, FunctionPass] A decorator will be returned if pass_func is not provided, otherwise return the decorated result. The returned decorator has two behaviors depending on the input: A new FunctionPass will be returned when we decorate a pass function. A new FunctionPass class will be returned when we decorate a class type. Examples -------- The following code block decorates a function pass class. .. code-block:: python @relay.transform.function_pass(opt_level=1) class TestReplaceFunc: def __init__(self, new_func): self.new_func = new_func def transform_function(self, func, mod, ctx): # just for demo purposes # transform func to new_func return self.new_func x = relay.var("x", shape=(10, 20)) f1 = relay.Function([x], x) f2 = relay.Function([x], relay.log(x)) # fpass is now a special pass that replaces every # function to f1 fpass = TestReplaceFunc(f1) # now every function in input_mod is replaced by f1 res_mod = fpass(input_mod) The following code creates a function pass by decorating a user defined transform function. .. code-block:: python @relay.transform.function_pass(opt_level=2) def transform(func, mod, ctx): # my transformations here. return func function_pass = transform assert isinstance(function_pass, transform.FunctionPass) assert function_pass.info.opt_level == 2 # Given a module m, the optimization could be invoked as the follwoing: updated_mod = function_pass(m) # Now constant folding should have been applied to every function in # the provided module m. And the updated module will be returned. """ if opt_level is None: raise ValueError("Please provide opt_level for the function pass.") required = required if required else [] if not isinstance(required, (list, tuple)): raise TypeError("Required is expected to be the type of " + "list/tuple.") def create_function_pass(pass_arg): """Internal function that creates a function pass""" fname = name if name else pass_arg.__name__ info = tvm.transform.PassInfo(opt_level, fname, required) if inspect.isclass(pass_arg): return _wrap_class_function_pass(pass_arg, info) if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)): raise TypeError("pass_func must be a callable for Module pass") return _ffi_api.MakeFunctionPass(pass_arg, info) if pass_func: return create_function_pass(pass_func) return create_function_pass @function_pass(opt_level=1) class ChangeBatch: """ Change the batch size. Parameters ---------- data: Dict[relay.Var, int] A dictionary of all the params to change. The keys are all params, and the values are which dimension hold the batch. batch_size: int The batch size to change to. Returns ------- pass: FunctionPass The pass. """ def __init__(self, data, batch_size=16): self.data = data self.batch_size = batch_size def transform_function(self, func, mod, ctx): func = relay.Function(func.params, func.body, None, func.type_params, func.attrs) change_batch = self class ChangeBatchMutator(tvm.relay.ExprMutator): def visit_var(self, var): if var in change_batch.data: ty = var.type_annotation new_shape = list(ty.shape) new_shape[change_batch.data[var]] = change_batch.batch_size return relay.Var(var.name_hint, relay.TensorType(new_shape, ty.dtype)) return var return ChangeBatchMutator().visit(func) def DenseToSparse(weight_name, weight_shape): """ Rewrite qualified ```nn.dense operation``` to ```nn.sparse_dense``` This pass is used in ```data_dep_optimization.bsr_dense``` Parameters of this pass is generated by ```analysis.sparse_dense.process_params``` Parameters ---------- weight_name: Array[String] Names of weights which qualified sparse contrains weight_shape: Array[Array[IntImm]] Weights shape in BSR format. Returns ------- ret : tvm.transform.Pass The registered DenseToSparse pass. """ return _ffi_api.DenseToSparse(weight_name, weight_shape) def Conv2dToSparse(weight_name, weight_shape, layout, kernel_size): """ Rewrite qualified ```nn.conv2d operation``` to ```nn.sparse_conv2d``` Parameters ---------- weight_name: Array[String] Names of weights which qualified sparse contrains weight_shape: Array[Array[IntImm]] Weights shape in BSR format. layout : str layout of data Returns ------- ret : tvm.transform.Pass The registered DenseToSparse pass. """ return _ffi_api.Conv2dToSparse(weight_name, weight_shape, layout, kernel_size) def Conv2dToSparse2(layout, kernel_size, blocksize, sparsity_threshold): """ Rewrite freezed ```nn.conv2d``` operation to ```nn.sparse_conv2d``` Parameters ---------- layout : str layout of data kernel_size : int kernel size of conv2d Returns ------- ret : tvm.transform.Pass The registered DenseToSparse pass. """ return _ffi_api.Conv2dToSparse2(layout, kernel_size, *blocksize, sparsity_threshold) def SimplifyFCTranspose(target_weight_name): """ Rewrite ```y = nn.dense(x, transpose(w, [1, 0]))``` to ```y = nn.dense(x, wt)``` This pass is used in ```data_dep_optimization.simplify_fc_transpose``` Parameters ---------- weight_name: Array[String] Names of weights which qualified ```y = nn.dense(x, transpose(w, [1, 0]))``` This parameter is generated by ```analysis.search_fc_transpose``` function Returns ------- ret : tvm.transform.Pass The registered SimplifyFCTranspose pass. """ return _ffi_api.SimplifyFCTranspose(target_weight_name) def SimplifyExpr(): """ Simplify the Relay expression, including merging consecutive reshapes. Returns ------- ret : tvm.transform.Pass The registered SimplifyExpr pass. """ return _ffi_api.SimplifyExpr() def PlanDevices(config): """ Uses existing "on_device" and "device_copy" calls to infer the virtual device on which every Relay sub-expression should run and the result stored. Captures the result of that analysis using new "on_device" and "device_copy" calls. Sub-expressions which are not otherwise constrained are assigned to the default primitive virtual device describe by config. However data and computations which must be hosted on a CPU (such as shapes and shape functions) use the host virtual device of the config. Parameters ---------- config : tvm.CompilationConfig The compilation configuration, specifying available targets and default devices. Returns ------- ret : tvm.transforms.Pass The pass. """ return _ffi_api.PlanDevices(config) def ManifestLifetimes(): """ Manifest the lifetimes of variables after allocations have been manifested, by inserting kill operations once variables become dead. """ return _ffi_api.ManifestLifetimes() def FoldExplicitPadding(): """ FoldExplicitPadding finds explict padding before an op that can support implicit padding and fuses them. Returns ------- ret : tvm.transform.Pass The registered ImplicitPadding pass. """ return _ffi_api.FoldExplicitPadding() def AnnotateSpans(): """ Annotate a program with span information by first generating its textual representation and then parsing it back into a Relay AST annotated with span information. Returns ------- ret : tvm.transform.Pass The registered AnnotateSpans pass. """ return _ffi_api.AnnotateSpans() def FakeQuantizationToInteger(hard_fail=False, use_qat=False): # pylint: disable=anomalous-backslash-in-string """ Find regions of the graph of the form .. code-block:: text x w | | dq dq \\ / op1 | op2 | q where ``q == qnn.quantize`` and ``dq = qnn.dequantize`` and rewrite them into integer versions of ``op1`` and ``op2`` Rules for rewriting indivdual ops are in fake_quantization_to_integer.py Parameters ---------- hard_fail : boolean How do deal with errors during graph rewriting. If true, raise an error. If false, skip rewriting the subgraph. use_qat : boolean To perform an additional QAT pass - convert enabled operations with dequantized inputs. Example: in the graph above op2 is not registered with the FakeQuantizationToInteger attribute, op1 operation can still be converted. Converted pattern below: .. code-block:: text x w | | \\ / op1 | dq | op2 | q Returns ------- ret : tvm.transform.Pass The registered FakeQuantizationToInteger pass. """ return _ffi_api.FakeQuantizationToInteger(hard_fail, use_qat) def FlattenAtrousConv(): # pylint: disable=anomalous-backslash-in-string """ The purpose of this pass is to find a sequence of space_to_batch_nd-conv2d-batch_to_space_nd operations: .. code-block:: text x w | | s2b | \\ / conv2d | b2s and convert them into subgraphs with a convolution with the modified "dilation" and recalculated "padding" parameters. Returns ------- ret : tvm.transform.Pass The registered FlattenAtrousConv pass. """ return _ffi_api.FlattenAtrousConv() def ToMixedPrecision(mixed_precision_type="float16", missing_op_mode=1): """ Automatic mixed precision rewriter. Rewrite an FP32 relay graph into a version where as many operations as possible are in the target mixed_precision_type. Parameters ---------- mixed_precision_type: str The target datatype to transform operations in the graph to use. missing_op_mode: int Determines how to handle ops not registered with FTVMMixedPrecisionConversionType 0: Does not allow any missing ops. Will throw errors when encountering any. 1: Allow missing ops but emit warnings. 2: Allow missing ops and silently ignore them. relay.ToMixedPrecision.keep_orig_output_dtype: boolean Defines if outputs should be retained in original data type or convert to mixed_precision_type. By default this parameter is False and transformation modifies the data types of outputs to mixed_precision_type. This parameter is not part of explicit arguments of the transformation, but should be passed through tvm.transform.PassContext. Returns ------- ret : tvm.transform.Pass The registered pass. """ if missing_op_mode < 0 or missing_op_mode > 2: raise ValueError("Missing op mode is either 0, 1, or 2") return _ffi_api.ToMixedPrecision(mixed_precision_type, missing_op_mode) def SplitArgs(max_function_args): """Split function with huge number of arguments to smaller pieces. Returns ------- ret : tvm.transform.Pass The registered pass for constant folding. """ return _ffi_api.SplitArgs(max_function_args) def OutlineCompilerFunctionsWithExistingGlobalSymbols(compiler_filter=""): """Outlines all literal functions in direct call positions which have a "Compiler" attribute. The outlined functions are bound to unique global vars according to their existing "global_symbol" attribute. At most one function with the same global symbol is outlined. If compiler_filter is non-empty only functions with that as their attribute value are outlined. This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism to prepare the IRModule before custom lowering. Parameters ---------- compiler_filter : String If non-empty, the "Compiler" attribute to filter on. Returns ------- ret : tvm.transform.Pass The pass. """ return _ffi_api.OutlineCompilerFunctionsWithExistingGlobalSymbols(compiler_filter) def MarkCompilerFunctionsAsExtern(compiler_filter=""): """Marks all global functions which have a "Compiler" attribute matching compiler_filter as 'extern'. The function's attributes are replaced with a single "Extern" attribute, and all calls to the function are switched to use the 'call_lowered' calling convention. If compiler_filter is non-empty only functions with that as their attribute value are outlined. This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism to cleanup the IRModule after custom lowering. Parameters ---------- compiler_filter : String If non-empty, the "Compiler" attribute to filter on. Returns ------- ret : tvm.transform.Pass The pass. """ return _ffi_api.MarkCompilerFunctionsAsExtern(compiler_filter) def CapturePostDfsIndexInSpans(): """Captures the post-dfs index and dominator post-dfs index of (most) expression nodes in their span, in the form "index:<post-dfs index>:<dominator post-dfs index>". This is useful for debugging since a) it helps identify pretty-printed sub-expressions within the overall model and b) the indexes are heavily used by Collage for its compact representation of sub-graphs. Note that Op and Constructor nodes are not changed even though they are assigned an post-dfs index. Returns ------- ret : tvm.transform.Pass The pass. """ return _ffi_api.CapturePostDfsIndexInSpans() def InlineCompilerFunctionsBoundTo(global_vars): """Inlines all global functions bound to a global var in global_vars. Both the global "Compiler" attributed function, and any calls to "Composite" functions it its body are inlined. This pass may be useful for external codegen which needs to undo partitioning based on properties of the entire partition. Parameters ---------- global_vars : Array[tvm.relay.GlobalVar] The global vars of all 'Compiler' functions to inline. Returns ------- ret : tvm.transform.Pass The pass. """ return _ffi_api.InlineCompilerFunctionsBoundTo(global_vars) def CollagePartition(config, cost_estimator=None): """Partition the bodies of all functions according to the available targets so as to minimize model latency. See https://github.com/apache/tvm-rfcs/blob/main/rfcs/0062-collage.md. Parameters ---------- config : CompilationConfig The available targets. cost_estimator : CostEstimator, optional The custom cost estimator to use for costing each candidate partition. Returns ------- ret : tvm.transform.Pass The pass. """ if cost_estimator is None: cost_estimator = relay.collage.CostEstimator() return _ffi_api.CollagePartition(config, cost_estimator) def DivToMul(): """Transform division by a constant to multiplication by the inverse of the constant""" return _ffi_api.DivToMul()
https://github.com/zk-ml/tachikoma
python/tvm/relay/ty.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import """The type nodes of the Relay language.""" from tvm.ir import Type, TypeKind, TypeVar, GlobalTypeVar from tvm.ir import TypeConstraint, FuncType, TupleType, IncompleteType from tvm.ir import TypeCall, TypeRelation, TensorType, RelayRefType as RefType from .base import RelayNode from . import _ffi_api Any = _ffi_api.Any def is_dynamic(tensor_type): """Check whether type has any or symbolic variables as a shape. tensor_type : Type The type to be inspected Returns ------- has_any : bool The check result. """ return _ffi_api.IsDynamic(tensor_type) def ShapeVar(name): """A helper which constructs a type var of which the shape kind. Parameters ---------- name : str Returns ------- type_var : tvm.relay.TypeVar The shape variable. """ return TypeVar(name, kind=TypeKind.ShapeVar) def scalar_type(dtype): """Creates a scalar type. This function returns TensorType((), dtype) Parameters ---------- dtype : str The content data type. Returns ------- s_type : tvm.relay.TensorType The result type. """ return TensorType((), dtype)
https://github.com/zk-ml/tachikoma
python/tvm/relay/type_functor.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The type functor of Relay.""" from .ty import ( TypeVar, IncompleteType, TensorType, FuncType, TupleType, TypeRelation, RefType, GlobalTypeVar, TypeCall, ) from .adt import TypeData class TypeFunctor: """ An abstract visitor defined over Type. Defines the default dispatch over types. """ def __init__(self): # TODO(weberlo): make type vars hashable, so we can memoize pass # pylint: disable=no-else-return def visit(self, typ): """Apply the visitor to a type.""" if isinstance(typ, TypeVar): return self.visit_type_var(typ) elif isinstance(typ, IncompleteType): return self.visit_incomplete_type(typ) elif isinstance(typ, TensorType): return self.visit_tensor_type(typ) elif isinstance(typ, FuncType): return self.visit_func_type(typ) elif isinstance(typ, TupleType): return self.visit_tuple_type(typ) elif isinstance(typ, TypeRelation): return self.visit_type_relation(typ) elif isinstance(typ, RefType): return self.visit_ref_type(typ) elif isinstance(typ, GlobalTypeVar): return self.visit_global_type_var(typ) elif isinstance(typ, TypeCall): return self.visit_type_call(typ) elif isinstance(typ, TypeData): return self.visit_type_data(typ) else: raise Exception("unhandled case: {0}".format(type(typ))) def visit_type_var(self, _): raise NotImplementedError() def visit_incomplete_type(self, _): raise NotImplementedError() def visit_tensor_type(self, _): raise NotImplementedError() def visit_func_type(self, _): raise NotImplementedError() def visit_tuple_type(self, _): raise NotImplementedError() def visit_type_relation(self, _): raise NotImplementedError() def visit_ref_type(self, _): raise NotImplementedError() def visit_global_type_var(self, _): raise NotImplementedError() def visit_type_call(self, _): raise NotImplementedError() def visit_type_data(self, _): raise NotImplementedError() class TypeVisitor(TypeFunctor): """ A visitor over Type. The default behavior recursively traverses the AST. """ def visit_type_var(self, tv): pass def visit_incomplete_type(self, it): pass def visit_tensor_type(self, tt): pass def visit_func_type(self, ft): for arg_type in ft.arg_types: self.visit(arg_type) self.visit(ft.ret_type) for type_param in getattr(ft, "type_params", []): self.visit(type_param) for type_constraint in getattr(ft, "type_constraints", []): self.visit(type_constraint) def visit_tuple_type(self, tt): for field in tt.fields: self.visit(field) def visit_type_relation(self, tr): for arg in tr.args: self.visit(arg) def visit_ref_type(self, rt): self.visit(rt.value) def visit_global_type_var(self, gtv): pass def visit_type_call(self, tc): self.visit(tc.func) for arg in tc.args: self.visit(arg) def visit_type_data(self, td): self.visit(td.header) for type_var in td.type_vars: self.visit(type_var) class TypeMutator(TypeFunctor): """ A functional visitor over Type. The default behavior recursively traverses the AST and reconstructs the AST. """ def visit_type_var(self, tv): return TypeVar(tv.name_hint, tv.kind) def visit_incomplete_type(self, it): return IncompleteType(it.kind) def visit_tensor_type(self, tt): return TensorType(tt.shape, tt.dtype) def visit_func_type(self, ft): new_arg_types = [self.visit(arg_type) for arg_type in ft.arg_types] new_ret_type = self.visit(ft.ret_type) new_type_params = [self.visit(type_param) for type_param in getattr(ft, "type_params", [])] new_type_constraints = [ self.visit(type_constraint) for type_constraint in getattr(ft, "type_constraints", []) ] return FuncType(new_arg_types, new_ret_type, new_type_params, new_type_constraints) def visit_tuple_type(self, tt): return TupleType([self.visit(field) for field in tt.fields]) def visit_type_relation(self, tr): return TypeRelation(tr.func, [self.visit(arg) for arg in tr.args], tr.num_inputs, tr.attrs) def visit_ref_type(self, rt): return RefType(self.visit(rt.value)) def visit_global_type_var(self, gtv): return GlobalTypeVar(gtv.name_hint, gtv.kind) def visit_type_call(self, tc): return TypeCall(self.visit(tc.func), [self.visit(arg) for arg in tc.args]) def visit_type_data(self, td): return TypeData( self.visit(td.header), [self.visit(type_var) for type_var in td.type_vars], td.constructors, )
https://github.com/zk-ml/tachikoma
python/tvm/rpc/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Lightweight TVM RPC module. RPC enables connect to a remote server, upload and launch functions. This is useful to for cross-compile and remote testing, The compiler stack runs on local server, while we use RPC server to run on remote runtime which don't have a compiler available. The test program compiles the program on local server, upload and run remote RPC server, get the result back to verify correctness. """ from .server import Server from .client import connect, connect_tracker from .client import RPCSession, LocalSession, PopenSession, TrackerSession from .minrpc import with_minrpc
https://github.com/zk-ml/tachikoma
python/tvm/rpc/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.rpc""" import tvm._ffi tvm._ffi._init_api("rpc", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/rpc/base.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Base definitions for RPC.""" # pylint: disable=invalid-name import socket import time import json import errno import struct import random import logging from .._ffi.base import py_str # Magic header for RPC data plane RPC_MAGIC = 0xFF271 # magic header for RPC tracker(control plane) RPC_TRACKER_MAGIC = 0x2F271 # sucess response RPC_CODE_SUCCESS = RPC_MAGIC + 0 # duplicate key in proxy RPC_CODE_DUPLICATE = RPC_MAGIC + 1 # cannot found matched key in server RPC_CODE_MISMATCH = RPC_MAGIC + 2 logger = logging.getLogger("RPCServer") class TrackerCode(object): """Enumeration code for the RPC tracker""" FAIL = -1 SUCCESS = 0 PING = 1 STOP = 2 PUT = 3 REQUEST = 4 UPDATE_INFO = 5 SUMMARY = 6 GET_PENDING_MATCHKEYS = 7 RPC_SESS_MASK = 128 # Use "127.0.0.1" or "::1" if there is a need to force ip4 or ip6 # connection for "localhost". def get_addr_family(addr): res = socket.getaddrinfo(addr[0], addr[1], 0, 0, socket.IPPROTO_TCP) return res[0][0] def recvall(sock, nbytes): """Receive all nbytes from socket. Parameters ---------- sock: Socket The socket nbytes : int Number of bytes to be received. """ res = [] nread = 0 while nread < nbytes: chunk = sock.recv(min(nbytes - nread, 1024)) if not chunk: raise IOError("connection reset") nread += len(chunk) res.append(chunk) return b"".join(res) def sendjson(sock, data): """send a python value to remote via json Parameters ---------- sock : Socket The socket data : object Python value to be sent. """ data = json.dumps(data) sock.sendall(struct.pack("<i", len(data))) sock.sendall(data.encode("utf-8")) def recvjson(sock): """receive python value from remote via json Parameters ---------- sock : Socket The socket Returns ------- value : object The value received. """ size = struct.unpack("<i", recvall(sock, 4))[0] data = json.loads(py_str(recvall(sock, size))) return data def random_key(prefix, cmap=None): """Generate a random key Parameters ---------- prefix : str The string prefix cmap : dict Conflict map Returns ------- key : str The generated random key """ if cmap: while True: key = prefix + str(random.random()) if key not in cmap: return key else: return prefix + str(random.random()) def connect_with_retry(addr, timeout=60, retry_period=5): """Connect to a TPC address with retry This function is only reliable to short period of server restart. Parameters ---------- addr : tuple address tuple timeout : float Timeout during retry retry_period : float Number of seconds before we retry again. """ tstart = time.time() while True: try: sock = socket.socket(get_addr_family(addr), socket.SOCK_STREAM) sock.connect(addr) return sock except socket.error as sock_err: if sock_err.args[0] not in (errno.ECONNREFUSED,): raise sock_err period = time.time() - tstart if period > timeout: raise RuntimeError("Failed to connect to server %s" % str(addr)) logger.warning( "Cannot connect to tracker %s, retry in %g secs...", str(addr), retry_period ) time.sleep(retry_period)
https://github.com/zk-ml/tachikoma
python/tvm/rpc/client.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """RPC client tools""" import os import socket import stat import struct import time import tvm._ffi from tvm._ffi.base import TVMError from tvm.contrib import utils from tvm.runtime import ndarray as nd from tvm._ffi.runtime_ctypes import Device from . import _ffi_api, base, server class RPCSession(object): """RPC Client session module Do not directly create the object, call connect """ # pylint: disable=invalid-name def __init__(self, sess): self._sess = sess self._tbl_index = _ffi_api.SessTableIndex(sess) self._remote_funcs = {} def system_lib(self): """Get system-wide library module. Returns ------- module : runtime.Module The system-wide library module. See Also -------- tvm.runtime.system_lib """ return self.get_function("runtime.SystemLib")() def get_function(self, name): """Get function from the session. Parameters ---------- name : str The name of the function Returns ------- f : Function The result function. """ return self._sess.get_function(name) def device(self, dev_type, dev_id=0): """Construct a remote device. Parameters ---------- dev_type: int or str dev_id: int, optional Returns ------- dev: Device The corresponding encoded remote device. """ dev = nd.device(dev_type, dev_id) encode = (self._tbl_index + 1) * base.RPC_SESS_MASK dev.device_type += encode dev._rpc_sess = self return dev def upload(self, data, target=None): """Upload file to remote runtime temp folder Parameters ---------- data : str or bytearray The file name or binary in local to upload. target : str, optional The path in remote """ if isinstance(data, bytearray): if not target: raise ValueError("target must present when file is a bytearray") blob = data else: blob = bytearray(open(data, "rb").read()) if not target: target = os.path.basename(data) if "upload" not in self._remote_funcs: self._remote_funcs["upload"] = self.get_function("tvm.rpc.server.upload") self._remote_funcs["upload"](target, blob) def download(self, path): """Download file from remote temp folder. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- blob : bytearray The result blob from the file. """ if "download" not in self._remote_funcs: self._remote_funcs["download"] = self.get_function("tvm.rpc.server.download") return self._remote_funcs["download"](path) def remove(self, path): """Remove file from remote temp folder. Parameters ---------- path: str The relative location to remote temp folder. """ if "remove" not in self._remote_funcs: self._remote_funcs["remove"] = self.get_function("tvm.rpc.server.remove") self._remote_funcs["remove"](path) def load_module(self, path): """Load a remote module, the file need to be uploaded first. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- m : Module The remote module containing remote function. """ return _ffi_api.LoadRemoteModule(self._sess, path) def download_linked_module(self, path): """Link a module in the remote and download it. Parameters ---------- path : str The relative location to remote temp folder. Returns ------- blob : bytearray The result blob from the file. Note ---- This function can be helpful when a linker is not available on the local client. Examples -------- .. code-block:: python mod = build_module_with_cross_compilation() # export the module as tar because a local linker is not available mod.export_library("lib.tar") remote.upload("lib.tar") # invoke the linker on the remote to link the module as a library # note that the library can only run on the same env as the remote with open("lib.so", "wb") as file: file.write(remote.download_linked_module("lib.tar")) """ if "download_linked_module" not in self._remote_funcs: self._remote_funcs["download_linked_module"] = self.get_function( "tvm.rpc.server.download_linked_module" ) return self._remote_funcs["download_linked_module"](path) def cpu(self, dev_id=0): """Construct CPU device.""" return self.device(Device.kDLCPU, dev_id) def cuda(self, dev_id=0): """Construct CUDA GPU device.""" return self.device(Device.kDLCUDA, dev_id) def cl(self, dev_id=0): """Construct OpenCL device.""" return self.device(Device.kDLOpenCL, dev_id) def vulkan(self, dev_id=0): """Construct Vulkan device.""" return self.device(Device.kDLVulkan, dev_id) def metal(self, dev_id=0): """Construct Metal device.""" return self.device(Device.kDLMetal, dev_id) def rocm(self, dev_id=0): """Construct ROCm device.""" return self.device(Device.kDLROCM, dev_id) def ext_dev(self, dev_id=0): """Construct extension device.""" return self.device(Device.kDLExtDev, dev_id) def hexagon(self, dev_id=0): """Construct Hexagon device.""" return self.device(Device.kDLHexagon, dev_id) def webgpu(self, dev_id=0): """Construct WebGPU device.""" return self.device(Device.kDLWebGPU, dev_id) class LocalSession(RPCSession): """RPCSession interface backed by local environment. This class can be used to implement functions that need to be ran both locally and remotely. """ def __init__(self): self._temp = server._server_env([]) RPCSession.__init__(self, _ffi_api.LocalSession()) @tvm._ffi.register_func("rpc.PopenSession") def _popen_session(binary): temp = utils.tempdir() if isinstance(binary, (bytes, bytearray)): path_exec = temp.relpath("server.minrpc") with open(path_exec, "wb") as outfile: outfile.write(binary) os.chmod(path_exec, stat.S_IXUSR | stat.S_IRUSR) path_exec = os.path.abspath(path_exec) else: path_exec = os.path.abspath(binary) if not os.path.isfile(path_exec): raise RuntimeError(f"{path_exec} does not exist.") if not os.access(path_exec, os.X_OK): raise RuntimeError(f"{path_exec} is not executable.") sess = _ffi_api.CreatePipeClient(path_exec) return sess class PopenSession(RPCSession): """RPCSession interface backed by popen. Parameters ---------- binary : List[Union[str, bytes]] The binary to be executed. """ def __init__(self, binary): RPCSession.__init__(self, _popen_session(binary)) class TrackerSession(object): """Tracker client session. Parameters ---------- addr : tuple The address tuple """ def __init__(self, addr): self._addr = addr self._sock = None self._connect() def __del__(self): self.close() def _connect(self): self._sock = base.connect_with_retry(self._addr) self._sock.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC)) magic = struct.unpack("<i", base.recvall(self._sock, 4))[0] if magic != base.RPC_TRACKER_MAGIC: raise RuntimeError("%s is not RPC Tracker" % str(self._addr)) def close(self): """Close the tracker connection.""" if self._sock: self._sock.close() self._sock = None def summary(self): """Get the summary dict of the tracker.""" base.sendjson(self._sock, [base.TrackerCode.SUMMARY]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) return value[1] def text_summary(self): """Get a text summary of the tracker.""" data = self.summary() total_ct = {} res = "" res += "Server List\n" res += "------------------------------\n" res += "server-address key\n" res += "------------------------------\n" sorted_server = sorted(data["server_info"], key=lambda x: x["key"]) for item in sorted_server: addr = item["addr"] res += "%21s " % ":".join(map(str, addr)) res += item["key"] + "\n" key = item["key"].split(":")[1] # 'server:rasp3b` -> 'rasp3b' if key not in total_ct: total_ct[key] = 0 total_ct[key] += 1 res += "------------------------------\n" res += "\n" # compute max length of device key queue_info = data["queue_info"] keys = list(queue_info.keys()) if keys: keys.sort() max_key_len = max([len(k) for k in keys]) else: max_key_len = 0 res += "Queue Status\n" title = ("%%-%ds" % max_key_len + " total free pending\n") % "key" separate_line = "-" * len(title) + "\n" res += separate_line + title + separate_line for k in keys: total = total_ct.get(k, 0) free, pending = queue_info[k]["free"], queue_info[k]["pending"] if total or pending: res += ("%%-%ds" % max_key_len + " %-5d %-4d %-7d\n") % ( k, total, free, pending, ) res += separate_line return res def request( self, key, priority=1, session_timeout=0, max_retry=5, session_constructor_args=None ): """Request a new connection from the tracker. Parameters ---------- key : str The type key of the device. priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry before give up. session_constructor_args : list, optional List of additional arguments to passed as the remote session constructor. The first element of the list is always a string specifying the name of the session constructor, the following args are the positional args to that function. """ last_err = None for _ in range(max_retry): try: if self._sock is None: self._connect() base.sendjson(self._sock, [base.TrackerCode.REQUEST, key, "", priority]) value = base.recvjson(self._sock) if value[0] != base.TrackerCode.SUCCESS: raise RuntimeError("Invalid return value %s" % str(value)) url, port, matchkey = value[1] return connect( url, port, matchkey, session_timeout, session_constructor_args=session_constructor_args, ) except socket.error as err: self.close() last_err = err except TVMError as err: last_err = err raise RuntimeError( "Cannot request %s after %d retry, last_error:%s" % (key, max_retry, str(last_err)) ) def request_and_run(self, key, func, priority=1, session_timeout=0, max_retry=2): """Request a resource from tracker and run the func. This function safe-guard rare server node dropout during execution. In such case, a new resource will be requested and func will be ran again. Parameters ---------- key : str The type key of the device. func : function of session -> value A stateless function priority : int, optional The priority of the request. session_timeout : float, optional The duration of the session, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. max_retry : int, optional Maximum number of times to retry the function before give up. """ last_err = None for _ in range(max_retry): try: sess = self.request(key, priority=priority, session_timeout=session_timeout) tstart = time.time() return func(sess) except TVMError as err: duration = time.time() - tstart # roughly estimate if the error is due to timeout termination if session_timeout and duration >= session_timeout * 0.95: raise RuntimeError("Session timeout when running %s" % func.__name__) last_err = err raise RuntimeError( "Failed to run on %s after %d retry, last_error:%s" % (key, max_retry, str(last_err)) ) def connect( url, port, key="", session_timeout=0, session_constructor_args=None, enable_logging=False ): """Connect to RPC Server Parameters ---------- url : str The url of the host port : int The port to connect to key : str, optional Additional key to match server session_timeout : float, optional The duration of the session in seconds, allows server to kill the connection when duration is longer than this value. When duration is zero, it means the request must always be kept alive. session_constructor_args: List List of additional arguments to passed as the remote session constructor. The first element of the list is always a string specifying the name of the session constructor, the following args are the positional args to that function. enable_logging: boolean flag to enable/disable logging. Logging is disabled by default. Returns ------- sess : RPCSession The connected session. Examples -------- Normal usage .. code-block:: python client = rpc.connect(server_url, server_port, server_key) Session_constructor can be used to customize the session in the remote The following code connects to a remote internal server via a proxy by constructing another RPCClientSession on the proxy machine and use that as the serving session of the proxy endpoint. .. code-block:: python client_via_proxy = rpc.connect( proxy_server_url, proxy_server_port, proxy_server_key, enable_logging session_constructor_args=[ "rpc.Connect", internal_url, internal_port, internal_key, internal_logging]) """ try: if session_timeout: key += " -timeout=%s" % str(session_timeout) session_constructor_args = session_constructor_args if session_constructor_args else [] if not isinstance(session_constructor_args, (list, tuple)): raise TypeError("Expect the session constructor to be a list or tuple") sess = _ffi_api.Connect(url, port, key, enable_logging, *session_constructor_args) except NameError: raise RuntimeError("Please compile with USE_RPC=1") return RPCSession(sess) def connect_tracker(url, port): """Connect to a RPC tracker Parameters ---------- url : str The url of the host port : int The port to connect to Returns ------- sess : TrackerSession The connected tracker session. """ return TrackerSession((url, port))
https://github.com/zk-ml/tachikoma
python/tvm/rpc/minrpc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Utils to path.""" import os from tvm._ffi import libinfo from tvm.contrib import cc def find_minrpc_server_libpath(server="posix_popen_server"): """Get the path of minrpc server libary. Parameters ---------- server : str The kind of built in minrpc server. Returns ------- path : str The path to the min server library. """ curr_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) source_dir = os.path.abspath(os.path.join(curr_dir, "..", "..", "..")) minrpc_dir = os.path.join(source_dir, "src", "runtime", "minrpc") path = os.path.join(minrpc_dir, server, ("%s.cc" % server)) candidates = [path] if not os.path.isfile(path): raise RuntimeError("Cannot find minserver %s, in candidates %s" % (server, candidates)) return minrpc_dir, path def with_minrpc(compile_func, server="posix_popen_server", runtime="libtvm"): """Attach the compiler function with minrpc related options. Parameters ---------- compile_func : Union[str, Callable[[str, str, Optional[str]], None]] The compilation function to decorate. server : str The server type. runtime : str The runtime library. Returns ------- fcompile : function The return compilation. """ minrpc_dir, server_path = find_minrpc_server_libpath(server) runtime_path = libinfo.find_lib_path([runtime, runtime + ".so", runtime + ".dylib"])[0] runtime_dir = os.path.abspath(os.path.dirname(runtime_path)) options = ["-std=c++17"] # Make sure the rpath to the libtvm is set so we can do local tests. # Note that however, this approach won't work on remote. # Always recommend to link statically. options += ["-Wl,-rpath=" + runtime_dir] options += ["-I" + path for path in libinfo.find_include_path()] options += ["-I" + minrpc_dir] fcompile = cc.cross_compiler( compile_func, options=options, add_files=[server_path, runtime_path] ) fcompile.__name__ = "with_minrpc" fcompile.need_system_lib = True return fcompile
https://github.com/zk-ml/tachikoma
python/tvm/rpc/proxy.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """RPC proxy, allows both client/server to connect and match connection. In normal RPC, client directly connect to server's IP address. Sometimes this cannot be done when server do not have a static address. RPCProxy allows both client and server connect to the proxy server, the proxy server will forward the message between the client and server. """ # pylint: disable=unused-variable, unused-argument import os import asyncio import logging import socket import threading import errno import struct import time try: import tornado from tornado import gen from tornado import websocket from tornado import ioloop from . import tornado_util except ImportError as error_msg: raise ImportError( "RPCProxy module requires tornado package %s. Try 'pip install tornado'." % error_msg ) from tvm.contrib.popen_pool import PopenWorker from . import _ffi_api from . import base from .base import TrackerCode from .server import _server_env from .._ffi.base import py_str class ForwardHandler(object): """Forward handler to forward the message.""" def _init_handler(self): """Initialize handler.""" self._init_message = bytes() self._init_req_nbytes = 4 self._magic = None self.timeout = None self._rpc_key_length = None self._done = False self._proxy = ProxyServerHandler.current assert self._proxy self.rpc_key = None self.match_key = None self.forward_proxy = None self.alloc_time = None def __del__(self): logging.info("Delete %s...", self.name()) def name(self): """Name of this connection.""" return "RPCConnection" def _init_step(self, message): if self._magic is None: assert len(message) == 4 self._magic = struct.unpack("<i", message)[0] if self._magic != base.RPC_MAGIC: logging.info("Invalid RPC magic from %s", self.name()) self.close() self._init_req_nbytes = 4 elif self._rpc_key_length is None: assert len(message) == 4 self._rpc_key_length = struct.unpack("<i", message)[0] self._init_req_nbytes = self._rpc_key_length elif self.rpc_key is None: assert len(message) == self._rpc_key_length self.rpc_key = py_str(message) # match key is used to do the matching self.match_key = self.rpc_key[7:].split()[0] self.on_start() else: assert False def on_start(self): """Event when the initialization is completed""" self._proxy.handler_ready(self) def on_data(self, message): """on data""" assert isinstance(message, bytes) if self.forward_proxy: self.forward_proxy.send_data(message) else: while message and self._init_req_nbytes > len(self._init_message): nbytes = self._init_req_nbytes - len(self._init_message) self._init_message += message[:nbytes] message = message[nbytes:] if self._init_req_nbytes == len(self._init_message): temp = self._init_message self._init_req_nbytes = 0 self._init_message = bytes() self._init_step(temp) if message: logging.info("Invalid RPC protocol, too many bytes %s", self.name()) self.close() def on_error(self, err): logging.info("%s: Error in RPC %s", self.name(), err) self.close_pair() def close_pair(self): if self.forward_proxy: self.forward_proxy.signal_close() self.forward_proxy = None self.close() def on_close_event(self): """on close event""" assert not self._done logging.info("RPCProxy:on_close_event %s ...", self.name()) if self.match_key: key = self.match_key if self._proxy._client_pool.get(key, None) == self: self._proxy._client_pool.pop(key) if self._proxy._server_pool.get(key, None) == self: self._proxy._server_pool.pop(key) self._done = True self.forward_proxy = None class TCPHandler(tornado_util.TCPHandler, ForwardHandler): """Event driven TCP handler.""" def __init__(self, sock, addr): super(TCPHandler, self).__init__(sock) self._init_handler() self.addr = addr def name(self): return "TCPSocketProxy:%s:%s" % (str(self.addr[0]), self.rpc_key) def send_data(self, message, binary=True): self.write_message(message, True) def on_message(self, message): self.on_data(message) def on_close(self): logging.info("RPCProxy: on_close %s ...", self.name()) self._close_process = True if self.forward_proxy: self.forward_proxy.signal_close() self.forward_proxy = None self.on_close_event() class WebSocketHandler(websocket.WebSocketHandler, ForwardHandler): """Handler for websockets.""" def __init__(self, *args, **kwargs): super(WebSocketHandler, self).__init__(*args, **kwargs) self._init_handler() def name(self): return "WebSocketProxy:%s" % (self.rpc_key) def on_message(self, message): self.on_data(message) def data_received(self, _): raise NotImplementedError() def send_data(self, message): try: self.write_message(message, True) except websocket.WebSocketClosedError as err: self.on_error(err) def on_close(self): logging.info("RPCProxy: on_close %s ...", self.name()) if self.forward_proxy: self.forward_proxy.signal_close() self.forward_proxy = None self.on_close_event() def signal_close(self): self.close() class RequestHandler(tornado.web.RequestHandler): """Handles html request.""" def __init__(self, *args, **kwargs): file_path = kwargs.pop("file_path") if file_path.endswith("html"): self.page = open(file_path).read() web_port = kwargs.pop("rpc_web_port", None) if web_port: self.page = self.page.replace( "ws://localhost:9190/ws", "ws://localhost:%d/ws" % web_port ) else: self.page = open(file_path, "rb").read() super(RequestHandler, self).__init__(*args, **kwargs) def data_received(self, _): pass def get(self, *args, **kwargs): self.write(self.page) class ProxyServerHandler(object): """Internal proxy server handler class.""" current = None def __init__( self, sock, listen_port, web_port, timeout_client, timeout_server, tracker_addr, index_page=None, resource_files=None, ): assert ProxyServerHandler.current is None ProxyServerHandler.current = self if web_port: handlers = [ (r"/ws", WebSocketHandler), ] if index_page: handlers.append( (r"/", RequestHandler, {"file_path": index_page, "rpc_web_port": web_port}) ) logging.info("Serving RPC index html page at http://localhost:%d", web_port) resource_files = resource_files if resource_files else [] for fname in resource_files: basename = os.path.basename(fname) pair = (r"/%s" % basename, RequestHandler, {"file_path": fname}) handlers.append(pair) logging.info(pair) self.app = tornado.web.Application(handlers) self.app.listen(web_port) self.sock = sock self.sock.setblocking(0) self.loop = ioloop.IOLoop.current() def event_handler(_, events): self._on_event(events) self.loop.add_handler(self.sock.fileno(), event_handler, self.loop.READ) self._client_pool = {} self._server_pool = {} self.timeout_alloc = 5 self.timeout_client = timeout_client self.timeout_server = timeout_server # tracker information self._listen_port = listen_port self._tracker_addr = tracker_addr self._tracker_conn = None self._tracker_pending_puts = [] self._key_set = set() self.update_tracker_period = 2 if tracker_addr: logging.info("Tracker address:%s", str(tracker_addr)) def _callback(): self._update_tracker(True) self.loop.call_later(self.update_tracker_period, _callback) logging.info("RPCProxy: Websock port bind to %d", web_port) def _on_event(self, _): while True: try: conn, addr = self.sock.accept() TCPHandler(conn, addr) except socket.error as err: if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): break def _pair_up(self, lhs, rhs): lhs.forward_proxy = rhs rhs.forward_proxy = lhs lhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS)) lhs.send_data(struct.pack("<i", len(rhs.rpc_key))) lhs.send_data(rhs.rpc_key.encode("utf-8")) rhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS)) rhs.send_data(struct.pack("<i", len(lhs.rpc_key))) rhs.send_data(lhs.rpc_key.encode("utf-8")) logging.info("Pairup connect %s and %s", lhs.name(), rhs.name()) def _regenerate_server_keys(self, keys): """Regenerate keys for server pool""" keyset = set(self._server_pool.keys()) new_keys = [] # re-generate the server match key, so old information is invalidated. for key in keys: rpc_key, _ = key.split(":") handle = self._server_pool[key] del self._server_pool[key] new_key = base.random_key(rpc_key + ":", keyset) self._server_pool[new_key] = handle keyset.add(new_key) new_keys.append(new_key) return new_keys def _update_tracker(self, period_update=False): """Update information on tracker.""" try: if self._tracker_conn is None: self._tracker_conn = socket.socket( base.get_addr_family(self._tracker_addr), socket.SOCK_STREAM ) self._tracker_conn.connect(self._tracker_addr) self._tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC)) magic = struct.unpack("<i", base.recvall(self._tracker_conn, 4))[0] if magic != base.RPC_TRACKER_MAGIC: self.loop.stop() raise RuntimeError("%s is not RPC Tracker" % str(self._tracker_addr)) # just connect to tracker, need to update all keys self._tracker_pending_puts = self._server_pool.keys() if self._tracker_conn and period_update: # periodically update tracker information # regenerate key if the key is not in tracker anymore # and there is no in-coming connection after timeout_alloc base.sendjson(self._tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS]) pending_keys = set(base.recvjson(self._tracker_conn)) update_keys = [] for k, v in self._server_pool.items(): if k not in pending_keys: if v.alloc_time is None: v.alloc_time = time.time() elif time.time() - v.alloc_time > self.timeout_alloc: update_keys.append(k) v.alloc_time = None if update_keys: logging.info( "RPCProxy: No incoming conn on %s, regenerate keys...", str(update_keys) ) new_keys = self._regenerate_server_keys(update_keys) self._tracker_pending_puts += new_keys need_update_info = False # report new connections for key in self._tracker_pending_puts: rpc_key = key.split(":")[0] base.sendjson( self._tracker_conn, [TrackerCode.PUT, rpc_key, (self._listen_port, key), None] ) assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS if rpc_key not in self._key_set: self._key_set.add(rpc_key) need_update_info = True if need_update_info: keylist = "[" + ",".join(self._key_set) + "]" cinfo = {"key": "server:proxy" + keylist, "addr": [None, self._listen_port]} base.sendjson(self._tracker_conn, [TrackerCode.UPDATE_INFO, cinfo]) assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS self._tracker_pending_puts = [] except (socket.error, IOError) as err: logging.info( "Lost tracker connection: %s, try reconnect in %g sec", str(err), self.update_tracker_period, ) self._tracker_conn.close() self._tracker_conn = None self._regenerate_server_keys(self._server_pool.keys()) if period_update: def _callback(): self._update_tracker(True) self.loop.call_later(self.update_tracker_period, _callback) def _handler_ready_tracker_mode(self, handler): """tracker mode to handle handler ready.""" if handler.rpc_key.startswith("server:"): key = base.random_key(handler.match_key + ":", self._server_pool) handler.match_key = key self._server_pool[key] = handler self._tracker_pending_puts.append(key) self._update_tracker() else: if handler.match_key in self._server_pool: self._pair_up(self._server_pool.pop(handler.match_key), handler) else: handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH)) handler.signal_close() def _handler_ready_proxy_mode(self, handler): """Normal proxy mode when handler is ready.""" if handler.rpc_key.startswith("server:"): pool_src, pool_dst = self._client_pool, self._server_pool timeout = self.timeout_server else: pool_src, pool_dst = self._server_pool, self._client_pool timeout = self.timeout_client key = handler.match_key if key in pool_src: self._pair_up(pool_src.pop(key), handler) return if key not in pool_dst: pool_dst[key] = handler def cleanup(): """Cleanup client connection if timeout""" if pool_dst.get(key, None) == handler: logging.info( "Timeout client connection %s, cannot find match key=%s", handler.name(), key, ) pool_dst.pop(key) handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH)) handler.signal_close() self.loop.call_later(timeout, cleanup) else: logging.info("Duplicate connection with same key=%s", key) handler.send_data(struct.pack("<i", base.RPC_CODE_DUPLICATE)) handler.signal_close() def handler_ready(self, handler): """Report handler to be ready.""" logging.info("Handler ready %s", handler.name()) if self._tracker_addr: self._handler_ready_tracker_mode(handler) else: self._handler_ready_proxy_mode(handler) def run(self): """Run the proxy server""" ioloop.IOLoop.current().start() def _proxy_server( listen_sock, listen_port, web_port, timeout_client, timeout_server, tracker_addr, index_page, resource_files, ): asyncio.set_event_loop(asyncio.new_event_loop()) handler = ProxyServerHandler( listen_sock, listen_port, web_port, timeout_client, timeout_server, tracker_addr, index_page, resource_files, ) handler.run() class PopenProxyServerState(object): """Internal PopenProxy State for Popen""" current = None def __init__( self, host, port=9091, port_end=9199, web_port=0, timeout_client=600, timeout_server=600, tracker_addr=None, index_page=None, resource_files=None, ): sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM) self.port = None for my_port in range(port, port_end): try: sock.bind((host, my_port)) self.port = my_port break except socket.error as sock_err: if sock_err.errno in [errno.EADDRINUSE]: continue raise sock_err if not self.port: raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end)) logging.info("RPCProxy: client port bind to %s:%d", host, self.port) sock.listen(1) self.thread = threading.Thread( target=_proxy_server, args=( sock, self.port, web_port, timeout_client, timeout_server, tracker_addr, index_page, resource_files, ), ) # start the server in a different thread # so we can return the port directly self.thread.start() def _popen_start_proxy_server( host, port=9091, port_end=9199, web_port=0, timeout_client=600, timeout_server=600, tracker_addr=None, index_page=None, resource_files=None, ): # This is a function that will be sent to the # Popen worker to run on a separate process. # Create and start the server in a different thread state = PopenProxyServerState( host, port, port_end, web_port, timeout_client, timeout_server, tracker_addr, index_page, resource_files, ) PopenProxyServerState.current = state # returns the port so that the main can get the port number. return state.port class Proxy(object): """Start RPC proxy server on a separate process. Python implementation based on PopenWorker. Parameters ---------- host : str The host url of the server. port : int The TCP port to be bind to port_end : int, optional The end TCP port to search web_port : int, optional The http/websocket port of the server. timeout_client : float, optional Timeout of client until it sees a matching connection. timeout_server : float, optional Timeout of server until it sees a matching connection. tracker_addr: Tuple (str, int) , optional The address of RPC Tracker in tuple (host, ip) format. If is not None, the server will register itself to the tracker. index_page : str, optional Path to an index page that can be used to display at proxy index. resource_files : str, optional Path to local resources that can be included in the http request """ def __init__( self, host, port=9091, port_end=9199, web_port=0, timeout_client=600, timeout_server=600, tracker_addr=None, index_page=None, resource_files=None, ): self.proc = PopenWorker() # send the function self.proc.send( _popen_start_proxy_server, [ host, port, port_end, web_port, timeout_client, timeout_server, tracker_addr, index_page, resource_files, ], ) # receive the port self.port = self.proc.recv() self.host = host def terminate(self): """Terminate the server process""" if self.proc: logging.info("Terminating Proxy Server...") self.proc.kill() self.proc = None def __del__(self): self.terminate() def websocket_proxy_server(url, key=""): """Create a RPC server that uses an websocket that connects to a proxy. Parameters ---------- url : str The url to be connected. key : str The key to identify the server. """ def create_on_message(conn): def _fsend(data): data = bytes(data) conn.write_message(data, binary=True) return len(data) on_message = _ffi_api.CreateEventDrivenServer(_fsend, "WebSocketProxyServer", "%toinit") return on_message @gen.coroutine def _connect(key): conn = yield websocket.websocket_connect(url) on_message = create_on_message(conn) temp = _server_env(None) # Start connecton conn.write_message(struct.pack("<i", base.RPC_MAGIC), binary=True) key = "server:" + key conn.write_message(struct.pack("<i", len(key)), binary=True) conn.write_message(key.encode("utf-8"), binary=True) msg = yield conn.read_message() assert len(msg) >= 4 magic = struct.unpack("<i", msg[:4])[0] if magic == base.RPC_CODE_DUPLICATE: raise RuntimeError("key: %s has already been used in proxy" % key) if magic == base.RPC_CODE_MISMATCH: logging.info("RPCProxy do not have matching client key %s", key) elif magic != base.RPC_CODE_SUCCESS: raise RuntimeError("%s is not RPC Proxy" % url) msg = msg[4:] logging.info("Connection established with remote") if msg: on_message(bytearray(msg), 3) while True: try: msg = yield conn.read_message() if msg is None: break on_message(bytearray(msg), 3) except websocket.WebSocketClosedError as err: break logging.info("WebSocketProxyServer closed...") temp.remove() ioloop.IOLoop.current().stop() ioloop.IOLoop.current().spawn_callback(_connect, key) ioloop.IOLoop.current().start()
https://github.com/zk-ml/tachikoma
python/tvm/rpc/server.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """RPC server implementation. Note ---- Server is TCP based with the following protocol: - Initial handshake to the peer - [RPC_MAGIC, keysize(int32), key-bytes] - The key is in format - {server|client}:device-type[:random-key] [-timeout=timeout] """ # pylint: disable=invalid-name import ctypes import socket import select import struct import logging import threading import multiprocessing import time import errno import tvm._ffi from tvm._ffi.base import py_str from tvm._ffi.libinfo import find_lib_path from tvm.runtime.module import load_module as _load_module from tvm.contrib import utils from tvm.contrib.popen_pool import PopenWorker from . import _ffi_api from . import base # pylint: disable=unused-import from . import testing from .base import TrackerCode logger = logging.getLogger("RPCServer") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter( fmt="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) ) logger.addHandler(console_handler) logger.setLevel(logging.INFO) logger.propagate = False def _server_env(load_library, work_path=None): """Server environment function return temp dir""" if work_path: temp = work_path else: temp = utils.tempdir() # pylint: disable=unused-variable @tvm._ffi.register_func("tvm.rpc.server.workpath", override=True) def get_workpath(path): return temp.relpath(path) @tvm._ffi.register_func("tvm.rpc.server.load_module", override=True) def load_module(file_name): """Load module from remote side.""" path = temp.relpath(file_name) m = _load_module(path) logger.info("load_module %s", path) return m @tvm._ffi.register_func("tvm.rpc.server.download_linked_module", override=True) def download_linked_module(file_name): """Load module from remote side.""" # pylint: disable=import-outside-toplevel path = temp.relpath(file_name) if path.endswith(".o"): # Extra dependencies during runtime. from tvm.contrib import cc as _cc _cc.create_shared(path + ".so", path) path += ".so" elif path.endswith(".tar"): # Extra dependencies during runtime. from tvm.contrib import cc as _cc, tar as _tar tar_temp = utils.tempdir(custom_path=path.replace(".tar", "")) _tar.untar(path, tar_temp.temp_dir) files = [tar_temp.relpath(x) for x in tar_temp.listdir()] _cc.create_shared(path + ".so", files) path += ".so" elif path.endswith(".dylib") or path.endswith(".so"): pass else: raise RuntimeError("Do not know how to link %s" % file_name) logger.info("Send linked module %s to client", path) return bytearray(open(path, "rb").read()) libs = [] load_library = load_library.split(":") if load_library else [] for file_name in load_library: file_name = find_lib_path(file_name)[0] libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL)) logger.info("Load additional library %s", file_name) temp.libs = libs return temp def _serve_loop(sock, addr, load_library, work_path=None): """Server loop""" sockfd = sock.fileno() temp = _server_env(load_library, work_path) _ffi_api.ServerLoop(sockfd) if not work_path: temp.remove() logger.info("Finish serving %s", addr) def _parse_server_opt(opts): # parse client options ret = {} for kv in opts: if kv.startswith("-timeout="): ret["timeout"] = float(kv[9:]) return ret def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr): """Listening loop of the server.""" def _accept_conn(listen_sock, tracker_conn, ping_period=2): """Accept connection from the other places. Parameters ---------- listen_sock: Socket The socket used by listening process. tracker_conn : connection to tracker Tracker connection ping_period : float, optional ping tracker every k seconds if no connection is accepted. """ old_keyset = set() # Report resource to tracker if tracker_conn: matchkey = base.random_key(rpc_key + ":") base.sendjson(tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr]) assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS else: matchkey = rpc_key unmatch_period_count = 0 unmatch_timeout = 4 # Wait until we get a valid connection while True: if tracker_conn: trigger = select.select([listen_sock], [], [], ping_period) if not listen_sock in trigger[0]: base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS]) pending_keys = base.recvjson(tracker_conn) old_keyset.add(matchkey) # if match key not in pending key set # it means the key is acquired by a client but not used. if matchkey not in pending_keys: unmatch_period_count += 1 else: unmatch_period_count = 0 # regenerate match key if key is acquired but not used for a while if unmatch_period_count * ping_period > unmatch_timeout + ping_period: logger.info("no incoming connections, regenerate key ...") matchkey = base.random_key(rpc_key + ":", old_keyset) base.sendjson( tracker_conn, [TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr] ) assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS unmatch_period_count = 0 continue conn, addr = listen_sock.accept() magic = struct.unpack("<i", base.recvall(conn, 4))[0] if magic != base.RPC_MAGIC: conn.close() continue keylen = struct.unpack("<i", base.recvall(conn, 4))[0] key = py_str(base.recvall(conn, keylen)) arr = key.split() expect_header = "client:" + matchkey server_key = "server:" + rpc_key if arr[0] != expect_header: conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH)) conn.close() logger.warning("mismatch key from %s", addr) continue conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS)) conn.sendall(struct.pack("<i", len(server_key))) conn.sendall(server_key.encode("utf-8")) return conn, addr, _parse_server_opt(arr[1:]) # Server logic tracker_conn = None while True: try: # step 1: setup tracker and report to tracker if tracker_addr and tracker_conn is None: tracker_conn = base.connect_with_retry(tracker_addr) tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC)) magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0] if magic != base.RPC_TRACKER_MAGIC: raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr)) # report status of current queue cinfo = {"key": "server:" + rpc_key, "addr": (custom_addr, port)} base.sendjson(tracker_conn, [TrackerCode.UPDATE_INFO, cinfo]) assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS # step 2: wait for in-coming connections conn, addr, opts = _accept_conn(sock, tracker_conn) except (socket.error, IOError): # retry when tracker is dropped if tracker_conn: tracker_conn.close() tracker_conn = None continue except RuntimeError as exc: raise exc # step 3: serving work_path = utils.tempdir() logger.info("connection from %s", addr) server_proc = multiprocessing.Process( target=_serve_loop, args=(conn, addr, load_library, work_path) ) server_proc.start() # close from our side. conn.close() # wait until server process finish or timeout server_proc.join(opts.get("timeout", None)) if server_proc.is_alive(): logger.info("Timeout in RPC session, kill..") # pylint: disable=import-outside-toplevel import psutil parent = psutil.Process(server_proc.pid) # terminate worker children for child in parent.children(recursive=True): child.terminate() # terminate the worker server_proc.terminate() work_path.remove() def _connect_proxy_loop(addr, key, load_library): key = "server:" + key retry_count = 0 max_retry = 5 retry_period = 5 while True: try: sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM) sock.connect(addr) sock.sendall(struct.pack("<i", base.RPC_MAGIC)) sock.sendall(struct.pack("<i", len(key))) sock.sendall(key.encode("utf-8")) magic = struct.unpack("<i", base.recvall(sock, 4))[0] if magic == base.RPC_CODE_DUPLICATE: raise RuntimeError("key: %s has already been used in proxy" % key) if magic == base.RPC_CODE_MISMATCH: logger.warning("RPCProxy do not have matching client key %s", key) elif magic != base.RPC_CODE_SUCCESS: raise RuntimeError("%s is not RPC Proxy" % str(addr)) keylen = struct.unpack("<i", base.recvall(sock, 4))[0] remote_key = py_str(base.recvall(sock, keylen)) opts = _parse_server_opt(remote_key.split()[1:]) logger.info("connected to %s", str(addr)) process = multiprocessing.Process(target=_serve_loop, args=(sock, addr, load_library)) process.start() sock.close() process.join(opts.get("timeout", None)) if process.is_alive(): logger.info("Timeout in RPC session, kill..") process.terminate() retry_count = 0 except (socket.error, IOError) as err: retry_count += 1 logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period) if retry_count > max_retry: raise RuntimeError("Maximum retry error: last error: %s" % str(err)) time.sleep(retry_period) class PopenRPCServerState(object): """Internal PopenRPCServer State""" current = None def __init__( self, host, port=9091, port_end=9199, is_proxy=False, tracker_addr=None, key="", load_library=None, custom_addr=None, silent=False, ): # start update self.host = host self.port = port self.libs = [] self.custom_addr = custom_addr if silent: logger.setLevel(logging.ERROR) if not is_proxy: sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM) self.port = None for my_port in range(port, port_end): try: sock.bind((host, my_port)) self.port = my_port break except socket.error as sock_err: if sock_err.errno in [errno.EADDRINUSE]: continue raise sock_err if not self.port: raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end)) logger.info("bind to %s:%d", host, self.port) sock.listen(1) self.sock = sock self.thread = threading.Thread( target=_listen_loop, args=(self.sock, self.port, key, tracker_addr, load_library, self.custom_addr), ) self.thread.start() else: self.thread = threading.Thread( target=_connect_proxy_loop, args=((host, port), key, load_library) ) self.thread.start() def _popen_start_rpc_server( host, port=9091, port_end=9199, is_proxy=False, tracker_addr=None, key="", load_library=None, custom_addr=None, silent=False, no_fork=False, server_init_callback=None, ): if no_fork: multiprocessing.set_start_method("spawn") if server_init_callback: server_init_callback() # This is a function that will be sent to the # Popen worker to run on a separate process. # Create and start the server in a different thread state = PopenRPCServerState( host, port, port_end, is_proxy, tracker_addr, key, load_library, custom_addr, silent ) PopenRPCServerState.current = state # returns the port so that the main can get the port number. return state.port class Server(object): """Start RPC server on a separate process. This is a simple python implementation based on multi-processing. It is also possible to implement a similar C based server with TVM runtime which does not depend on the python. Parameters ---------- host : str The host url of the server. port : int The port to be bind to port_end : int, optional The end port to search is_proxy : bool, optional Whether the address specified is a proxy. If this is true, the host and port actually corresponds to the address of the proxy server. tracker_addr: Tuple (str, int) , optional The address of RPC Tracker in tuple(host, ip) format. If is not None, the server will register itself to the tracker. key : str, optional The key used to identify the device type in tracker. load_library : str, optional List of additional libraries to be loaded during execution. custom_addr: str, optional Custom IP Address to Report to RPC Tracker silent: bool, optional Whether run this server in silent mode. no_fork: bool, optional Whether forbid fork in multiprocessing. server_init_callback: Callable, optional Additional initialization function when starting the server. Note ---- The RPC server only sees functions in the tvm namespace. To bring additional custom functions to the server env, you can use server_init_callback. .. code:: python def server_init_callback(): import tvm # must import mypackage here import mypackage tvm.register_func("function", mypackage.func) server = rpc.Server(host, server_init_callback=server_init_callback) """ def __init__( self, host="0.0.0.0", port=9091, port_end=9199, is_proxy=False, tracker_addr=None, key="", load_library=None, custom_addr=None, silent=False, no_fork=False, server_init_callback=None, ): try: if _ffi_api.ServerLoop is None: raise RuntimeError("Please compile with USE_RPC=1") except NameError: raise RuntimeError("Please compile with USE_RPC=1") self.proc = PopenWorker() # send the function self.proc.send( _popen_start_rpc_server, [ host, port, port_end, is_proxy, tracker_addr, key, load_library, custom_addr, silent, no_fork, server_init_callback, ], ) # receive the port self.port = self.proc.recv() self.host = host def terminate(self): """Terminate the server process""" if self.proc: self.proc.kill() self.proc = None def __del__(self): self.terminate()
https://github.com/zk-ml/tachikoma
python/tvm/rpc/server_ios_launcher.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Python wrapper for running a RPC Server through iOS RPC on the iOS simulator using the simctl command line tool. """ # pylint: disable=invalid-name import os import json import time import threading import subprocess from enum import Enum from typing import Dict, List, AnyStr class OSName(Enum): """The names of the operating systems available on the simulator.""" iOS = "iOS" tvOS = "tvOS" watchOS = "watchOS" class IOSDevice(Enum): """The names of available iOS devices.""" iPhone = "iPhone" iPod = "iPod" iPad = "iPad" class RPCServerMode(Enum): """Server modes available in the iOS RPC application.""" standalone = "standalone" proxy = "proxy" tracker = "tracker" def get_list_of_available_simulators() -> Dict[AnyStr, List]: """ List of simulators available on the system. Simulators are presented as a dictionary. The dictionary key is the name of the operating system of the simulator. The dictionary value is a list of all simulators with a given operating system. """ with subprocess.Popen( "xcrun simctl list devices available --json", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, ) as proc: out, _ = proc.communicate() available_simulators = json.loads(out)["devices"] available_simulators = { key: value for key, value in available_simulators.items() if value != [] } return available_simulators def grep_by_system(available_devices: Dict[AnyStr, List], system_name: OSName) -> List[Dict]: """Search for simulators that use the target operating system.""" def find_index_of_substr(search_field: List[AnyStr], target: AnyStr) -> int: for i, item in enumerate(search_field): if target in item: return i raise ValueError("Search field doesn't content target") keys = list(available_devices.keys()) return available_devices[keys[find_index_of_substr(keys, system_name.value)]] def grep_by_device(available_devices: List[Dict], device_name: IOSDevice) -> List[Dict]: """Search for simulators that emulate a given device.""" return [item for item in available_devices if device_name.value in item["name"]] def get_device_uid(target_device: Dict) -> AnyStr: """Get a unique device ID.""" return target_device["udid"] def check_call_with_runtime_error(cmd: AnyStr, error_message: AnyStr) -> None: """Calling the function `subprocess.check_call` and catching its possible thrown exception.""" try: subprocess.check_call(cmd.split(" ")) except subprocess.CalledProcessError as called_process_error: raise called_process_error from RuntimeError(error_message) def boot_device(udid: AnyStr) -> None: """Boot the device by its unique ID.""" cmd = f"xcrun simctl boot {udid}" error_message = f"Failed to boot device with unique id: {udid}" check_call_with_runtime_error(cmd, error_message) if not is_booted(udid): raise RuntimeError(error_message) def shutdown_device(udid: AnyStr) -> None: """Shutdown the device by its unique ID.""" cmd = f"xcrun simctl shutdown {udid}" error_message = f"Failed to shut down device with unique id: {udid}" check_call_with_runtime_error(cmd, error_message) if not is_turned_off(udid): raise RuntimeError(error_message) def deploy_bundle_to_simulator(udid: AnyStr, bundle_path: AnyStr) -> None: """Deploy iOS RPC bundle <bundle_path> to simulator with its unique ID <udid>.""" check_call_with_runtime_error( cmd=f"xcrun simctl install {udid} {bundle_path}", error_message=f"Failed to deploy bundle <{bundle_path}> to device with unique id: {udid}", ) def delete_bundle_from_simulator(udid: AnyStr, bundle_id: AnyStr) -> None: """Delete iOS RPC bundle <bundle_id> from simulator with its unique ID <udid>.""" check_call_with_runtime_error( cmd=f"xcrun simctl uninstall {udid} {bundle_id}", error_message=f"Failed to uninstall bundle <{bundle_id}> " f"from device with unique id: {udid}", ) def launch_ios_rpc( udid: AnyStr, bundle_id: AnyStr, host_url: AnyStr, host_port: int, key: AnyStr, mode: AnyStr ): # pylint: disable=too-many-arguments, consider-using-with """ Launch iOS RPC application on simulator with No UI interconnection. udid : str Unique device ID. bundle_id : str iOS RPC bundle ID. host_url : str The tracker/proxy address. host_port : int The tracker/proxy port. key : str The key used to identify the device type in tracker. mode : str Server mode. See RPCServerMode. """ cmd = ( f"xcrun simctl launch --console {udid} {bundle_id}" f" --immediate_connect" f" --host_url={host_url}" f" --host_port={host_port}" f" --key={key}" f" --server_mode={mode}" f" --verbose" ) proc = subprocess.Popen( cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True, ) return proc def terminate_ios_rpc(udid: AnyStr, bundle_id: AnyStr) -> None: """Terminate iOS RPC application.""" check_call_with_runtime_error( cmd=f"xcrun simctl terminate {udid} {bundle_id}", error_message=f"Failed to terminate bundle <{bundle_id}> " f"from device with unique id: {udid}", ) def is_booted(udid: AnyStr) -> bool: """Check that the device has booted.""" device = find_device(udid) return device["state"] == "Booted" def is_turned_off(udid: AnyStr) -> bool: """Check that the device has turned off.""" device = find_device(udid) return device["state"] == "Shutdown" def check_booted_device(devices: List[Dict]) -> Dict: """Check if there is already a booted device. If so, return this device.""" for device in devices: if device["state"] == "Booted": return device return {} def find_device(udid: AnyStr) -> Dict: """Find device by its unique ID.""" return_value = {} available_devices = get_list_of_available_simulators() for devices in available_devices.values(): for device in devices: if device["udid"] == udid: return_value = device return return_value class ServerIOSLauncher: """ Python wrapper for launch iOS RPC to simulator. mode : str Server mode. See RPCServerMode. host : str The tracker/proxy address. port : int The tracker/proxy port. key : str The key used to identify the device type in tracker. """ booted_devices = [] bundle_id = os.environ.get("BUNDLE_ID") bundle_path = os.environ.get("BUNDLE_PATH") class ConsoleMarkers(Enum): """ Marker-messages that iOS RPC Server should print to the console output when its states change (see apps/ios_rpc/tvmrpc/RPCServer.mm). STOPPED : str iOS RPC Server process was stopped CALLSTACK : str Call stack if RPC Server was stopped with an error. CONNECTED : str RPC Server reports that it successfully connected. SERVER_IP : str IP on which RPC Server started (for standalone mode). SERVER_PORT : str HOST on which RPC Server started (for standalone mode). """ STOPPED = "PROCESS_STOPPED" CALLSTACK = "First throw call stack" CONNECTED = "[IOS-RPC] STATE: 2" SERVER_IP = "[IOS-RPC] IP: " SERVER_PORT = "[IOS-RPC] PORT: " def __init__(self, mode, host, port, key): if not ServerIOSLauncher.is_compatible_environment(): raise RuntimeError( "Can't create ServerIOSLauncher instance." " No environment variables set for iOS RPC Server." ) self.host = host self.port = port self.external_booted_device = None if not ServerIOSLauncher.booted_devices: self._boot_or_find_booted_device() self.udid = get_device_uid( self.external_booted_device if self.external_booted_device is not None else ServerIOSLauncher.booted_devices[-1] ) self.bundle_was_deployed = False deploy_bundle_to_simulator(self.udid, self.bundle_path) self.bundle_was_deployed = True self.server_was_started = False self.launch_process = launch_ios_rpc(self.udid, self.bundle_id, host, port, key, mode) self._wait_launch_complete( waiting_time=60, hz=10, should_print_host_and_port=mode == RPCServerMode.standalone.value, ) self.server_was_started = True def terminate(self): """Terminate iOS RPC server.""" if self.bundle_was_deployed and self.server_was_started: try: terminate_ios_rpc(self.udid, self.bundle_id) self.launch_process.terminate() self.server_was_started = False except RuntimeError as e: print(e) if self.bundle_was_deployed: try: delete_bundle_from_simulator(self.udid, self.bundle_id) self.bundle_was_deployed = False except RuntimeError as e: print(e) def __del__(self): self.terminate() @staticmethod def is_compatible_environment(): """Check that the current environment has the required variables.""" return bool(os.environ.get("BUNDLE_ID")) and bool(os.environ.get("BUNDLE_PATH")) @staticmethod def shutdown_booted_devices(): """Shutdown simulators that have been booted using this class.""" for device_meta in ServerIOSLauncher.booted_devices: try: shutdown_device(get_device_uid(device_meta)) except RuntimeError as e: print(e) ServerIOSLauncher.booted_devices = [] def _boot_or_find_booted_device(self): """ Boot the required simulator if there is no suitable booted simulator among the available simulators. If there is a suitable booted simulator, then take it as a simulator to which the iOS RPC application will be deployed. """ target_system = OSName.iOS target_device_type = IOSDevice.iPhone available_devices = get_list_of_available_simulators() if not available_devices: raise ValueError("No devices available in this environment") target_devices = grep_by_system(available_devices, target_system) if not target_devices: raise ValueError(f"No available simulators for target system: {target_system.value}") target_devices = grep_by_device(target_devices, target_device_type) if not target_devices: raise ValueError( f"No available simulators for target device type: {target_device_type.value}" ) maybe_booted = check_booted_device(target_devices) if maybe_booted: self.external_booted_device = maybe_booted else: take_latest_model = True target_device = target_devices[-1 if take_latest_model else 0] boot_device(get_device_uid(target_device)) ServerIOSLauncher.booted_devices.append(target_device) def _wait_launch_complete(self, waiting_time, hz, should_print_host_and_port=False): # pylint: disable=too-many-locals """ Wait for the iOS RPC server to start. waiting_time : int The maximum waiting time during which it is necessary to receive a message from RPC Server. hz : int The frequency of checking (in hertz) messages from RPC Server. Checks for messages from the server will occur every 1 / hz second. should_print_host_and_port : bool A flag that indicates that RPC Server should print the host and port on which it was started. Used for standalone mode. """ class Switch: """A simple helper class for boolean switching.""" def __init__(self): self._on = False def toggle(self): """Toggle flag.""" self._on = not self._on @property def on(self): """Flag of this switch.""" return self._on def watchdog(): for _ in range(waiting_time * hz): time.sleep(1.0 / hz) if switch_have_data.on: break if not switch_have_data.on: self.launch_process.terminate() switch_process_was_terminated.toggle() switch_have_data = Switch() switch_process_was_terminated = Switch() watchdog_thread = threading.Thread(target=watchdog) host, port = None, None watchdog_thread.start() for line in self.launch_process.stdout: if not switch_have_data.on: switch_have_data.toggle() found = str(line).find(ServerIOSLauncher.ConsoleMarkers.STOPPED.value) if found != -1: raise RuntimeError("[ERROR] Crash during RCP Server launch.. ") found = str(line).find(ServerIOSLauncher.ConsoleMarkers.CALLSTACK.value) if found != -1: raise RuntimeError("[ERROR] Crash during RCP Server launch.. ") found = str(line).find(ServerIOSLauncher.ConsoleMarkers.SERVER_IP.value) if found != -1: ip = str(line)[ found + len(ServerIOSLauncher.ConsoleMarkers.SERVER_IP.value) : ].rstrip("\n") host = ip found = str(line).find(ServerIOSLauncher.ConsoleMarkers.SERVER_PORT.value) if found != -1: port = str(line)[ found + len(ServerIOSLauncher.ConsoleMarkers.SERVER_PORT.value) : ].rstrip("\n") port = int(port) if str(line).find(ServerIOSLauncher.ConsoleMarkers.CONNECTED.value) != -1: # rpc server reports that it successfully connected break watchdog_thread.join() if switch_process_was_terminated.on: raise TimeoutError("Can't get a response from the iOS Server.") if should_print_host_and_port: if host is None or port is None: raise RuntimeError("No messages with actual host and port.") self.port = port class ServerIOSContextManager: """ Context manager for ServerIOSLauncher. To work with ServerIOSLauncher, it is preferable to use this class so that the terminate method is called in any case. """ def __init__(self, mode, host, port, key): self.__mode = mode self.__host = host self.__port = port self.__key = key self.__ios_rpc_server_launcher = None def __enter__(self): self.__ios_rpc_server_launcher = ServerIOSLauncher( self.__mode, self.__host, self.__port, self.__key ) return self.__ios_rpc_server_launcher def __exit__(self, exc_type, exc_val, exc_tb): if self.__ios_rpc_server_launcher is not None: self.__ios_rpc_server_launcher.terminate() self.__ios_rpc_server_launcher = None
https://github.com/zk-ml/tachikoma
python/tvm/rpc/testing.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name,unnecessary-comprehension """ Testing functions for the RPC server.""" import numpy as np import tvm # RPC test functions to be registered for unit-tests purposes @tvm.register_func("rpc.test.addone") def _addone(x): return x + 1 @tvm.register_func("rpc.test.strcat") def _strcat(name, x): return "%s:%d" % (name, x) @tvm.register_func("rpc.test.except") def _remotethrow(name): raise ValueError("%s" % name) @tvm.register_func("rpc.test.runtime_str_concat") def _strcat(x, y): return x + y @tvm.register_func("rpc.test.remote_array_func") def _remote_array_func(y): x = np.ones((3, 4)) np.testing.assert_equal(y.numpy(), x) @tvm.register_func("rpc.test.add_to_lhs") def _add_to_lhs(x): return lambda y: x + y @tvm.register_func("rpc.test.remote_return_nd") def _my_module(name): # Use closure to check the ref counter correctness nd = tvm.nd.array(np.zeros(10).astype("float32")) if name == "get_arr": return lambda: nd if name == "ref_count": return lambda: tvm.testing.object_use_count(nd) if name == "get_elem": return lambda idx: nd.numpy()[idx] if name == "get_arr_elem": return lambda arr, idx: arr.numpy()[idx] raise RuntimeError("unknown name")
https://github.com/zk-ml/tachikoma
python/tvm/rpc/tornado_util.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Utilities used in tornado.""" import socket import errno from tornado import ioloop class TCPHandler(object): """TCP socket handler backed tornado event loop. Parameters ---------- sock : Socket The TCP socket, will set it to non-blocking mode. """ def __init__(self, sock): self._sock = sock self._ioloop = ioloop.IOLoop.current() self._sock.setblocking(0) self._pending_write = [] self._signal_close = False def _event_handler(_, events): self._event_handler(events) self._ioloop.add_handler( self._sock.fileno(), _event_handler, self._ioloop.READ | self._ioloop.ERROR ) def signal_close(self): """Signal the handler to close. The handler will be closed after the existing pending message are sent to the peer. """ if not self._pending_write: self.close() else: self._signal_close = True def close(self): """Close the socket""" if self._sock is not None: try: self._ioloop.remove_handler(self._sock.fileno()) self._sock.close() except socket.error: pass self._sock = None self.on_close() def write_message(self, message, binary=True): assert binary if self._sock is None: raise IOError("socket is already closed") self._pending_write.append(message) self._update_write() def _event_handler(self, events): """centeral event handler""" if (events & self._ioloop.ERROR) or (events & self._ioloop.READ): if self._update_read() and (events & self._ioloop.WRITE): self._update_write() elif events & self._ioloop.WRITE: self._update_write() def _update_write(self): """Update the state on write""" while self._pending_write: try: msg = self._pending_write[0] if self._sock is None: return nsend = self._sock.send(msg) if nsend != len(msg): self._pending_write[0] = msg[nsend:] else: self._pending_write.pop(0) except socket.error as err: if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): break self.on_error(err) if self._pending_write: self._ioloop.update_handler( self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR | self._ioloop.WRITE ) else: if self._signal_close: self.close() else: self._ioloop.update_handler( self._sock.fileno(), self._ioloop.READ | self._ioloop.ERROR ) def _update_read(self): """Update state when there is read event""" try: msg = bytes(self._sock.recv(4096)) if msg: self.on_message(msg) return True # normal close, remote is closed self.close() except socket.error as err: if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): pass else: self.on_error(err) return False
https://github.com/zk-ml/tachikoma
python/tvm/rpc/tracker.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """RPC Tracker, tracks and distributes the TVM RPC resources. This folder implements the tracker server logic. Note ---- Tracker is a TCP based rest api with the following protocol: - Initial handshake to the peer - RPC_TRACKER_MAGIC - Normal message: [size(int32), json-data] - Each message is initiated by the client, and the tracker replies with a json. List of available APIs: - PING: check if tracker is alive - input: [TrackerCode.PING] - return: TrackerCode.SUCCESS - PUT: report resource to tracker - input: [TrackerCode.PUT, [port, match-key]] - return: TrackerCode.SUCCESS - note: match-key is a randomly generated identify the resource during connection. - REQUEST: request a new resource from tracker - input: [TrackerCode.REQUEST, [key, user, priority]] - return: [TrackerCode.SUCCESS, [url, port, match-key]] """ # pylint: disable=invalid-name import asyncio import heapq import logging import socket import threading import errno import struct import json from tvm.contrib.popen_pool import PopenWorker try: from tornado import ioloop from . import tornado_util except ImportError as error_msg: raise ImportError( "RPCTracker module requires tornado package %s. Try 'pip install tornado'." % error_msg ) from .._ffi.base import py_str from . import base from .base import RPC_TRACKER_MAGIC, TrackerCode logger = logging.getLogger("RPCTracker") console_handler = logging.StreamHandler() console_handler.setFormatter( logging.Formatter( fmt="%(asctime)s.%(msecs)03d %(levelname)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) ) logger.addHandler(console_handler) logger.setLevel(logging.INFO) logger.propagate = False class Scheduler(object): """Abstract interface of scheduler.""" def put(self, value): """Push a resource into the scheduler. This function can trigger callbacks in the scheduler. Parameters ---------- value : object The resource to be put in the scheduler. """ raise NotImplementedError() def request(self, user, priority, callback): """Request a resource. Parameters ---------- user : str The user who is requesting the resource. priority : int The job priority callback : function: value->bool Callback function to receive an resource when ready returns True if the resource is consumed. """ raise NotImplementedError() def remove(self, value): """Remove a resource in the scheduler Parameters ---------- value: object The resource to remove """ def summary(self): """Get summary information of the scheduler.""" raise NotImplementedError() class PriorityScheduler(Scheduler): """Priority based scheduler, FIFO based on request order""" def __init__(self, key): self._key = key self._request_cnt = 0 self._lock = threading.Lock() self._values = [] self._requests = [] def _schedule(self): while self._requests and self._values: value = self._values.pop(0) item = heapq.heappop(self._requests) callback = item[-1] if callback(value[1:]): value[0].pending_matchkeys.remove(value[-1]) else: self._values.append(value) def put(self, value): self._values.append(value) self._schedule() def request(self, user, priority, callback): with self._lock: heapq.heappush(self._requests, (-priority, self._request_cnt, callback)) self._request_cnt += 1 self._schedule() def remove(self, value): if value in self._values: self._values.remove(value) self._schedule() def summary(self): """Get summary information of the scheduler.""" return {"free": len(self._values), "pending": len(self._requests)} class TCPEventHandler(tornado_util.TCPHandler): """Base asynchronize message handler. The tracker and client follows a simple message protocol. The message is in form [nbytes(int32)] [json-str]. All the information is packed in json-str """ def __init__(self, tracker, sock, addr): super(TCPEventHandler, self).__init__(sock) self._data = bytearray() self._tracker = tracker self._msg_size = 0 self._addr = addr self._init_req_nbytes = 4 self._info = {} # list of pending match keys that has not been used. self.pending_matchkeys = set() self._tracker._connections.add(self) self.put_values = [] def name(self): """name of connection""" return "TCPSocket: %s" % str(self._addr) def summary(self): """Summary of this connection""" return self._info def _init_conn(self, message): """Initialize the connection""" if len(message) != 4: logger.warning("Invalid connection from %s", self.name()) self.close() magic = struct.unpack("<i", message)[0] if magic != RPC_TRACKER_MAGIC: logger.warning("Invalid magic from %s", self.name()) self.close() self.write_message(struct.pack("<i", RPC_TRACKER_MAGIC), binary=True) self._init_req_nbytes = 0 def on_message(self, message): """Callback when a message is received. Parameters ---------- message : bytearray The bytes received """ assert isinstance(message, bytes) if self._init_req_nbytes: self._init_conn(message) return self._data += message while True: if self._msg_size == 0: if len(self._data) >= 4: self._msg_size = struct.unpack("<i", self._data[:4])[0] else: return if self._msg_size != 0 and len(self._data) >= self._msg_size + 4: msg = py_str(bytes(self._data[4 : 4 + self._msg_size])) del self._data[: 4 + self._msg_size] self._msg_size = 0 # pylint: disable=broad-except self.call_handler(json.loads(msg)) else: return def ret_value(self, data): """return value to the output""" data = json.dumps(data) self.write_message(struct.pack("<i", len(data)), binary=True) self.write_message(data.encode("utf-8"), binary=True) def call_handler(self, args): """Event handler when json request arrives.""" code = args[0] if code == TrackerCode.PUT: key = args[1] port, matchkey = args[2] self.pending_matchkeys.add(matchkey) # got custom address (from rpc server) if len(args) >= 4 and args[3] is not None: value = (self, args[3], port, matchkey) else: value = (self, self._addr[0], port, matchkey) self._tracker.put(key, value) self.put_values.append(value) self.ret_value(TrackerCode.SUCCESS) elif code == TrackerCode.REQUEST: key = args[1] user = args[2] priority = args[3] def _cb(value): # if the connection is already closed if not self._sock: return False try: self.ret_value([TrackerCode.SUCCESS, value]) except (socket.error, IOError): return False return True self._tracker.request(key, user, priority, _cb) elif code == TrackerCode.PING: self.ret_value(TrackerCode.SUCCESS) elif code == TrackerCode.GET_PENDING_MATCHKEYS: self.ret_value(list(self.pending_matchkeys)) elif code == TrackerCode.STOP: # safe stop tracker if self._tracker._stop_key == args[1]: self.ret_value(TrackerCode.SUCCESS) self._tracker.stop() else: self.ret_value(TrackerCode.FAIL) elif code == TrackerCode.UPDATE_INFO: info = args[1] assert isinstance(info, dict) if info["addr"][0] is None: info["addr"][0] = self._addr[0] self._info.update(info) self.ret_value(TrackerCode.SUCCESS) elif code == TrackerCode.SUMMARY: status = self._tracker.summary() self.ret_value([TrackerCode.SUCCESS, status]) else: logger.warning("Unknown tracker code %d", code) self.close() def on_close(self): self._tracker.close(self) def on_error(self, err): logger.warning("%s: Error in RPC Tracker: %s", self.name(), err) self.close() class TrackerServerHandler(object): """Tracker that tracks the resources.""" def __init__(self, sock, stop_key): self._scheduler_map = {} self._sock = sock self._sock.setblocking(0) self._ioloop = ioloop.IOLoop.current() self._stop_key = stop_key self._connections = set() def _event_handler(_, events): self._on_event(events) self._ioloop.add_handler(self._sock.fileno(), _event_handler, self._ioloop.READ) def _on_event(self, _): while True: try: conn, addr = self._sock.accept() TCPEventHandler(self, conn, addr) except socket.error as err: if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK): break def create_scheduler(self, key): """Create a new scheduler.""" return PriorityScheduler(key) def put(self, key, value): """Report a new resource to the tracker.""" if key not in self._scheduler_map: self._scheduler_map[key] = self.create_scheduler(key) self._scheduler_map[key].put(value) def request(self, key, user, priority, callback): """Request a new resource.""" if key not in self._scheduler_map: self._scheduler_map[key] = self.create_scheduler(key) self._scheduler_map[key].request(user, priority, callback) def close(self, conn): self._connections.remove(conn) if "key" in conn._info: for value in conn.put_values: _, _, _, key = value rpc_key = key.split(":")[0] self._scheduler_map[rpc_key].remove(value) def stop(self): """Safely stop tracker.""" for conn in list(self._connections): conn.close() self._sock.close() self._ioloop.stop() def summary(self): """Return a dict summarizing current status.""" qinfo = {} for k, v in self._scheduler_map.items(): qinfo[k] = v.summary() cinfo = [] # ignore client connections without key for conn in self._connections: res = conn.summary() if res.get("key", "").startswith("server"): cinfo.append(res) return {"queue_info": qinfo, "server_info": cinfo} def run(self): """Run the tracker server""" self._ioloop.start() def _tracker_server(listen_sock, stop_key): asyncio.set_event_loop(asyncio.new_event_loop()) handler = TrackerServerHandler(listen_sock, stop_key) handler.run() class PopenTrackerServerState(object): """Internal PopenTrackerServer State""" current = None def __init__(self, host, port=9190, port_end=9199, silent=False): if silent: logger.setLevel(logging.WARN) sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM) self.port = None self.stop_key = base.random_key("tracker") for my_port in range(port, port_end): try: sock.bind((host, my_port)) self.port = my_port break except socket.error as sock_err: if sock_err.errno in [errno.EADDRINUSE]: continue raise sock_err if not self.port: raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end)) logger.info("bind to %s:%d", host, self.port) sock.listen(1) self.thread = threading.Thread(target=_tracker_server, args=(sock, self.stop_key)) self.thread.start() self.host = host def _popen_start_tracker_server(host, port=9190, port_end=9199, silent=False): # This is a function that will be sent to the # Popen worker to run on a separate process. # Create and start the server in a different thread state = PopenTrackerServerState(host, port, port_end, silent) PopenTrackerServerState.current = state # returns the port so that the main can get the port number. return (state.port, state.stop_key) class Tracker(object): """Start RPC tracker on a separate process. Python implementation based on PopenWorker. Parameters ---------- host : str The host url of the server. port : int The TCP port to be bind to port_end : int, optional The end TCP port to search silent: bool, optional Whether run in silent mode """ def __init__(self, host="0.0.0.0", port=9190, port_end=9199, silent=False): if silent: logger.setLevel(logging.WARN) self.proc = PopenWorker() # send the function self.proc.send( _popen_start_tracker_server, [ host, port, port_end, silent, ], ) # receive the port self.port, self.stop_key = self.proc.recv() self.host = host def _stop_tracker(self): sock = socket.socket(base.get_addr_family((self.host, self.port)), socket.SOCK_STREAM) sock.connect(("127.0.0.1", self.port)) sock.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC)) magic = struct.unpack("<i", base.recvall(sock, 4))[0] assert magic == base.RPC_TRACKER_MAGIC base.sendjson(sock, [TrackerCode.STOP, self.stop_key]) assert base.recvjson(sock) == TrackerCode.SUCCESS sock.close() def terminate(self): """Terminate the server process""" if self.proc: if self.proc.is_alive(): self._stop_tracker() self.proc.join(0.1) if self.proc.is_alive(): logger.info("Terminating Tracker Server...") self.proc.kill() self.proc = None def __del__(self): try: self.terminate() except TypeError: pass
https://github.com/zk-ml/tachikoma
python/tvm/runtime/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM runtime namespace.""" # class exposures from .packed_func import PackedFunc from .object import Object from .object_path import ObjectPath, ObjectPathPair from .object_generic import ObjectGeneric, ObjectTypes from .ndarray import NDArray, DataType, DataTypeCode, Device from .module import Module, num_threads from .profiling import Report # function exposures from .object_generic import convert_to_object, convert, const from .ndarray import device, cpu, cuda, gpu, opencl, cl, vulkan, metal, mtl from .ndarray import vpi, rocm, ext_dev from .module import load_module, enabled, system_lib, load_static_library from .container import String, ShapeTuple from .params import save_param_dict, load_param_dict from . import executor
https://github.com/zk-ml/tachikoma
python/tvm/runtime/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.runtime""" import tvm._ffi # Exports functions registered via TVM_REGISTER_GLOBAL with the "runtime" prefix. # e.g. TVM_REGISTER_GLOBAL("runtime.ModuleLoadFromFile") tvm._ffi._init_api("runtime", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/_ffi_node_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument """FFI for tvm.node""" import tvm._ffi # The implementations below are default ones when the corresponding # functions are not available in the runtime only mode. # They will be overriden via _init_api to the ones registered # via TVM_REGISTER_GLOBAL in the compiler mode. def AsRepr(obj): return obj.type_key() + "(" + obj.handle.value + ")" def NodeListAttrNames(obj): return lambda x: 0 def NodeGetAttr(obj, name): raise AttributeError() def SaveJSON(obj): raise RuntimeError("Do not support object serialization in runtime only mode") def LoadJSON(json_str): raise RuntimeError("Do not support object serialization in runtime only mode") # Exports functions registered via TVM_REGISTER_GLOBAL with the "node" prefix. # e.g. TVM_REGISTER_GLOBAL("node.AsRepr") tvm._ffi._init_api("node", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/container.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Runtime container structures.""" import tvm._ffi from .object import Object, PyNativeObject from .object_generic import ObjectTypes from . import _ffi_api def getitem_helper(obj, elem_getter, length, idx): """Helper function to implement a pythonic getitem function. Parameters ---------- obj: object The original object elem_getter : function A simple function that takes index and return a single element. length : int The size of the array idx : int or slice The argument passed to getitem Returns ------- result : object The result of getitem """ if isinstance(idx, slice): start = idx.start if idx.start is not None else 0 stop = idx.stop if idx.stop is not None else length step = idx.step if idx.step is not None else 1 if start < 0: start += length if stop < 0: stop += length return [elem_getter(obj, i) for i in range(start, stop, step)] if idx < -length or idx >= length: raise IndexError("Index out of range. size: {}, got index {}".format(length, idx)) if idx < 0: idx += length return elem_getter(obj, idx) @tvm._ffi.register_object("runtime.ADT") class ADT(Object): """Algebatic data type(ADT) object. Parameters ---------- tag : int The tag of ADT. fields : list[Object] or tuple[Object] The source tuple. """ def __init__(self, tag, fields): for f in fields: assert isinstance( f, ObjectTypes ), "Expect object or " "tvm NDArray type, but received : {0}".format(type(f)) self.__init_handle_by_constructor__(_ffi_api.ADT, tag, *fields) @property def tag(self): return _ffi_api.GetADTTag(self) def __getitem__(self, idx): return getitem_helper(self, _ffi_api.GetADTFields, len(self), idx) def __len__(self): return _ffi_api.GetADTSize(self) def tuple_object(fields=None): """Create a ADT object from source tuple. Parameters ---------- fields : list[Object] or tuple[Object] The source tuple. Returns ------- ret : ADT The created object. """ fields = fields if fields else [] for f in fields: assert isinstance( f, ObjectTypes ), "Expect object or tvm " "NDArray type, but received : {0}".format(type(f)) return _ffi_api.Tuple(*fields) @tvm._ffi.register_object("runtime.String") class String(str, PyNativeObject): """TVM runtime.String object, represented as a python str. Parameters ---------- content : str The content string used to construct the object. """ __slots__ = ["__tvm_object__"] def __new__(cls, content): """Construct from string content.""" val = str.__new__(cls, content) val.__init_tvm_object_by_constructor__(_ffi_api.String, content) return val # pylint: disable=no-self-argument def __from_tvm_object__(cls, obj): """Construct from a given tvm object.""" content = _ffi_api.GetFFIString(obj) val = str.__new__(cls, content) val.__tvm_object__ = obj return val @tvm._ffi.register_object("runtime.ShapeTuple") class ShapeTuple(Object): """TVM runtime ShapeTuple object. Parameters ---------- shape : list[int] The shape list used to construct the object. """ def __init__(self, shape): assert isinstance(shape, (list, tuple)), "Expect list of tuple, but received : {0}".format( type(shape) ) for x in shape: assert isinstance(x, int), "Expect int type, but received : {0}".format(type(x)) self.__init_handle_by_constructor__(_ffi_api.ShapeTuple, *shape) def __len__(self): return _ffi_api.GetShapeTupleSize(self) def __getitem__(self, idx): return getitem_helper(self, _ffi_api.GetShapeTupleElem, len(self), idx) def __eq__(self, other): if self.same_as(other): return True if len(self) != len(other): return False for a, b in zip(self, other): if a != b: return False return True
https://github.com/zk-ml/tachikoma
python/tvm/runtime/executor/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """This module contains Python wrappers for the TVM C++ Executor implementations. NOTE: at present, only AOT Executor is contained here. The others are: - GraphExecutor, in python/tvm/contrib/graph_executor.py - VM Executor, in python/tvm/runtime/vm.py TODO(areusch): Consolidate these into this module. """ from .aot_executor import AotModule
https://github.com/zk-ml/tachikoma
python/tvm/runtime/executor/aot_executor.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """A Python wrapper for the Module-based Model Runtime Interface for Ahead-of-Time compilation.""" import numpy as np class AotModule(object): """Wraps the AOT executor runtime.Module. This is a thin wrapper of the underlying TVM module. you can also directly call set_input, run, and get_output of underlying module functions Parameters ---------- module : tvm.runtime.Module The internal tvm module that holds the implemented model functions. Attributes ---------- module : tvm.runtime.Module The internal tvm module that holds the implemented model functions. Examples -------- .. code-block:: python import tvm from tvm import relay from tvm.contrib import graph_executor # build the library using graph executor lib = relay.build(...) lib.export_library("compiled_lib.so") # load it back as a runtime lib: tvm.runtime.Module = tvm.runtime.load_module("compiled_lib.so") # Call the library factory function for default and create # a new runtime.Module, wrap with aot module. gmod = tvm.runtime.executor.AotModule(lib["default"](dev)) # use the aot module. gmod.set_input("x", data) gmod.run() """ def __init__(self, module): self.module = module self._set_input = module["set_input"] self._run = module["run"] self._get_output = module["get_output"] self._get_input = module["get_input"] self._get_num_outputs = module["get_num_outputs"] self._get_input_index = module["get_input_index"] self._get_num_inputs = module["get_num_inputs"] def set_input(self, key=None, value=None, **params): """Set inputs to the module via kwargs Parameters ---------- key : int or str The input key value : the input value. The input key params : dict of str to NDArray Additional arguments """ if key is not None: v = self._get_input(key) if v is None: raise RuntimeError("Could not find '%s' in model's inputs" % key) v.copyfrom(value) if params: # upload big arrays first to avoid memory issue in rpc mode keys = list(params.keys()) keys.sort(key=lambda x: -np.prod(params[x].shape)) for k in keys: # TODO(zhiics) Skip the weights for submodule in a better way. # We should use MetadataModule for initialization and remove # params from set_input val = self._get_input(k) if val: self._get_input(k).copyfrom(params[k]) def run(self, **input_dict): """Run forward execution of the model Parameters ---------- input_dict: dict of str to NDArray List of input values to be feed to """ if input_dict: self.set_input(**input_dict) self._run() def get_num_outputs(self): """Get the number of outputs from the model Returns ------- count : int The number of outputs. """ return self._get_num_outputs() def get_num_inputs(self): """Get the number of inputs to the model Returns ------- count : int The number of inputs. """ return self._get_num_inputs() def get_input(self, index, out=None): """Get index-th input to out Parameters ---------- index : int The input index out : NDArray The output array container """ if out: self._get_input(index).copyto(out) return out return self._get_input(index) def get_input_index(self, name): """Get inputs index via input name. Parameters ---------- name : str The input key name Returns ------- index: int The input index. -1 will be returned if the given input name is not found. """ return self._get_input_index(name) def get_output(self, index, out=None): """Get index-th output to out Parameters ---------- index : int The output index out : NDArray The output array container """ if out: self._get_output(index, out) return out return self._get_output(index)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/module.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import, import-outside-toplevel, inconsistent-return-statements """Runtime Module namespace.""" import os import ctypes import struct from typing import Sequence import numpy as np import tvm._ffi from tvm._ffi.base import _LIB, check_call, c_str, string_types, _RUNTIME_ONLY from tvm._ffi.libinfo import find_include_path from .packed_func import PackedFunc, PackedFuncHandle, _set_class_module from . import _ffi_api class BenchmarkResult: """Runtimes from benchmarking""" def __init__(self, results: Sequence[float]): """Construct a new BenchmarkResult from a sequence of runtimes. Parameters ---------- results : Sequence[float] Raw times from benchmarking Attributes ---------- min : float Minimum runtime in seconds of all results. mean : float Mean runtime in seconds of all results. If py:meth:`Module.time_evaluator` or `benchmark` is called with `number` > 0, then each result is already the mean of a `number` of runtimes, so this becomes the mean of means. median : float Median runtime in seconds of all results. If py:meth:`Module.time_evaluator` is called with `number` > 0, then each result is already the mean of a `number` of runtimes, so this becomes the median of means. max : float Maximum runtime in seconds of all results. If py:meth:`Module.time_evaluator` is called with `number` > 0, then each result is already the mean of a `number` of runtimes, so this becomes the maximum of those means. std : float Standard deviation in seconds of runtimes. If py:meth:`Module.time_evaluator` is called with `number` > 0, then each result is already the mean of a `number` of runtimes, so this becomes the standard deviation of means. results : Sequence[float] The collected runtimes (in seconds). This may be a series of mean runtimes if py:meth:`Module.time_evaluator` or `benchmark` was run with `number` > 1. """ self.results = results self.mean = np.mean(self.results) self.std = np.std(self.results) self.median = np.median(self.results) self.min = np.min(self.results) self.max = np.max(self.results) def __repr__(self): return "BenchmarkResult(min={}, mean={}, median={}, max={}, std={}, results={})".format( self.min, self.mean, self.median, self.max, self.std, self.results ) def __str__(self): return """Execution time summary: {:^12} {:^12} {:^12} {:^12} {:^12} {:^12.4f} {:^12.4f} {:^12.4f} {:^12.4f} {:^12.4f} """.format( "mean (ms)", "median (ms)", "max (ms)", "min (ms)", "std (ms)", self.mean * 1000, self.median * 1000, self.max * 1000, self.min * 1000, self.std * 1000, ) class Module(object): """Runtime Module.""" __slots__ = ["handle", "_entry", "entry_name"] def __init__(self, handle): self.handle = handle self._entry = None self.entry_name = "__tvm_main__" def __del__(self): if _LIB: check_call(_LIB.TVMModFree(self.handle)) def __hash__(self): return ctypes.cast(self.handle, ctypes.c_void_p).value @property def entry_func(self): """Get the entry function Returns ------- f : tvm.runtime.PackedFunc The entry function if exist """ if self._entry: return self._entry self._entry = self.get_function(self.entry_name) return self._entry def implements_function(self, name, query_imports=False): """Returns True if the module has a definition for the global function with name. Note that has_function(name) does not imply get_function(name) is non-null since the module may be, eg, a CSourceModule which cannot supply a packed-func implementation of the function without further compilation. However, get_function(name) non null should always imply has_function(name). Parameters ---------- name : str The name of the function query_imports : bool Whether to also query modules imported by this module. Returns ------- b : Bool True if module (or one of its imports) has a definition for name. """ return _ffi_api.ModuleImplementsFunction(self, name, query_imports) def get_function(self, name, query_imports=False): """Get function from the module. Parameters ---------- name : str The name of the function query_imports : bool Whether also query modules imported by this module. Returns ------- f : tvm.runtime.PackedFunc The result function. """ ret_handle = PackedFuncHandle() check_call( _LIB.TVMModGetFunction( self.handle, c_str(name), ctypes.c_int(query_imports), ctypes.byref(ret_handle) ) ) if not ret_handle.value: raise AttributeError("Module has no function '%s'" % name) return PackedFunc(ret_handle, False) def import_module(self, module): """Add module to the import list of current one. Parameters ---------- module : tvm.runtime.Module The other module. """ check_call(_LIB.TVMModImport(self.handle, module.handle)) def __getitem__(self, name): if not isinstance(name, string_types): raise ValueError("Can only take string as function name") return self.get_function(name) def __eq__(self, other): return self.handle.value == other.handle.value def __call__(self, *args): if self._entry: return self._entry(*args) # pylint: disable=not-callable return self.entry_func(*args) def __repr__(self): return "Module(%s, %x)" % (self.type_key, self.handle.value) @property def type_key(self): """Get type key of the module.""" return _ffi_api.ModuleGetTypeKey(self) @property def format(self): """Get the format of the module.""" return _ffi_api.ModuleGetFormat(self) def get_source(self, fmt=""): """Get source code from module, if available. Parameters ---------- fmt : str, optional The specified format. Returns ------- source : str The result source code. """ return _ffi_api.ModuleGetSource(self, fmt) @property def imported_modules(self): """Get imported modules Returns ---------- modules : list of Module The module """ nmod = _ffi_api.ModuleImportsSize(self) return [_ffi_api.ModuleGetImport(self, i) for i in range(nmod)] @property def is_dso_exportable(self): """Returns true if module is 'DSO exportable', ie can be included in result of export_library by the external compiler directly. Returns ------- b : Bool True if the module is DSO exportable. """ return _ffi_api.ModuleIsDSOExportable(self) def save(self, file_name, fmt=""): """Save the module to file. This do not save the dependent device modules. See also export_shared Parameters ---------- file_name : str The name of the file. fmt : str The format of the file. See Also -------- runtime.Module.export_library : export the module to shared library. """ _ffi_api.ModuleSaveToFile(self, file_name, fmt) def time_evaluator( self, func_name, dev, number=10, repeat=1, min_repeat_ms=0, limit_zero_time_iterations=100, cooldown_interval_ms=0, repeats_to_cooldown=1, f_preproc="", ): """Get an evaluator that measures time cost of running function. Parameters ---------- func_name: str The name of the function in the module. dev: Device The device we should run this function on. number: int The number of times to run this function for taking average. We call these runs as one `repeat` of measurement. repeat: int, optional The number of times to repeat the measurement. In total, the function will be invoked (1 + number x repeat) times, where the first one is warm up and will be discarded. The returned result contains `repeat` costs, each of which is an average of `number` costs. min_repeat_ms: int, optional The minimum duration of one `repeat` in milliseconds. By default, one `repeat` contains `number` runs. If this parameter is set, the parameters `number` will be dynamically adjusted to meet the minimum duration requirement of one `repeat`. i.e., When the run time of one `repeat` falls below this time, the `number` parameter will be automatically increased. limit_zero_time_iterations: int, optional The maximum number of repeats when measured time is equal to 0. It helps to avoid hanging during measurements. cooldown_interval_ms: int, optional The cooldown interval in milliseconds between the number of repeats defined by `repeats_to_cooldown`. repeats_to_cooldown: int, optional The number of repeats before the cooldown is activated. f_preproc: str, optional The preprocess function name we want to execute before executing the time evaluator. Note ---- The function will be invoked (1 + number x repeat) times, with the first call discarded in case there is lazy initialization. Returns ------- ftimer : function The function that takes same argument as func and returns a BenchmarkResult. The ProfileResult reports `repeat` time costs in seconds. """ try: feval = _ffi_api.RPCTimeEvaluator( self, func_name, dev.device_type, dev.device_id, number, repeat, min_repeat_ms, limit_zero_time_iterations, cooldown_interval_ms, repeats_to_cooldown, f_preproc, ) def evaluator(*args): """Internal wrapped evaluator.""" # Wrap feval so we can add more stats in future. blob = feval(*args) fmt = "@" + ("d" * repeat) results = struct.unpack(fmt, blob) return BenchmarkResult(results) return evaluator except NameError: raise NameError("time_evaluator is only supported when RPC is enabled") def _collect_from_import_tree(self, filter_func): """Helper function to collect modules from the tree matching a filter_func, then return it. Parameters ---------- filter_func : Callable[[Module], bool] A function which is invoked for each Module discovered in the import tree (including self). Returns ------- list[Module] : A list of matching Module. """ visited, stack, dso_modules = set(), [], [] # append root module visited.add(self) stack.append(self) while stack: module = stack.pop() if filter_func(module): dso_modules.append(module) for m in module.imported_modules: if m not in visited: visited.add(m) stack.append(m) return dso_modules def _collect_dso_modules(self): return self._collect_from_import_tree(lambda m: m.is_dso_exportable) def export_library(self, file_name, fcompile=None, addons=None, workspace_dir=None, **kwargs): """ Export the module and all imported modules into a single device library. This function only works on host LLVM modules, other runtime::Module subclasses will work with this API but they must support implement the save and load mechanisms of modules completely including saving from streams and files. This will pack your non-shared library module into a single shared library which can later be loaded by TVM. Parameters ---------- file_name : str The name of the shared library. fcompile : function(target, file_list, kwargs), optional The compilation function to use create the final library object during export. For example, when fcompile=_cc.create_shared, or when it is not supplied but module is "llvm," this is used to link all produced artifacts into a final dynamic library. This behavior is controlled by the type of object exported. If fcompile has attribute object_format, will compile host library to that format. Otherwise, will use default format "o". workspace_dir : str, optional The path of the directory used to create the intermediate artifacts when exporting the module. If this is not provided a temporary dir will be created. kwargs : dict, optional Additional arguments passed to fcompile Returns ------- result of fcompile() : unknown, optional If the compilation function returns an artifact it would be returned via export_library, if any. """ # NOTE: this function depends on contrib library features # which are only available in when TVM function is available. if _RUNTIME_ONLY: raise RuntimeError("Cannot call export_library in runtime only mode") # Extra dependencies during runtime. from pathlib import Path from tvm.contrib import cc as _cc, tar as _tar, utils as _utils if isinstance(file_name, Path): file_name = str(file_name) if self.type_key == "stackvm": if not file_name.endswith(".stackvm"): raise ValueError( "Module[%s]: can only be saved as stackvm format." "did you build with LLVM enabled?" % self.type_key ) self.save(file_name) return modules = self._collect_dso_modules() if workspace_dir is None: temp = _utils.tempdir() workspace_dir = temp.temp_dir files = addons if addons else [] is_system_lib = False has_c_module = False llvm_target_string = None for index, module in enumerate(modules): if fcompile is not None and hasattr(fcompile, "object_format"): if module.type_key == "c": assert module.format in [ "c", "cc", "cpp", "cu", ], "The module.format needs to be either c, cc, cpp or cu." object_format = module.format has_c_module = True else: object_format = fcompile.object_format else: if module.type_key == "c": if len(module.format) > 0: assert module.format in [ "c", "cc", "cpp", "cu", ], "The module.format needs to be either c, cc, cpp, or cu." object_format = module.format else: object_format = "c" if "cc" in kwargs: if kwargs["cc"] == "nvcc": object_format = "cu" has_c_module = True else: assert module.type_key == "llvm" or module.type_key == "static_library" object_format = "o" path_obj = os.path.join(workspace_dir, f"lib{index}.{object_format}") module.save(path_obj) files.append(path_obj) is_system_lib = ( module.type_key == "llvm" and module.get_function("__tvm_is_system_module")() ) llvm_target_string = ( module.type_key == "llvm" and module.get_function("_get_target_string")() ) if not fcompile: if file_name.endswith(".tar"): fcompile = _tar.tar else: fcompile = _cc.create_shared if llvm_target_string is None and hasattr(fcompile, "get_target_triple"): triple = fcompile.get_target_triple() assert triple, "Target triple should not be empty" llvm_target_string = "llvm -mtriple " + triple if getattr(fcompile, "need_system_lib", False) and not is_system_lib: raise ValueError("%s need --system-lib option" % str(fcompile)) if self.imported_modules: if enabled("llvm") and llvm_target_string: path_obj = os.path.join(workspace_dir, f"devc.{object_format}") m = _ffi_api.ModulePackImportsToLLVM(self, is_system_lib, llvm_target_string) m.save(path_obj) files.append(path_obj) else: path_cc = os.path.join(workspace_dir, "devc.c") with open(path_cc, "w") as f: f.write(_ffi_api.ModulePackImportsToC(self, is_system_lib)) files.append(path_cc) # The imports could contain a c module but the object format could be tar # Thus, it would not recognize the following include paths as options # which are there assuming a c compiler is the fcompile. if has_c_module and not file_name.endswith(".tar"): options = [] if "options" in kwargs: opts = kwargs["options"] options = opts if isinstance(opts, (list, tuple)) else [opts] opts = options + ["-I" + path for path in find_include_path()] kwargs.update({"options": opts}) return fcompile(file_name, files, **kwargs) def system_lib(): """Get system-wide library module singleton. System lib is a global module that contains self register functions in startup. Unlike normal dso modules which need to be loaded explicitly. It is useful in environments where dynamic loading api like dlopen is banned. To build system lib function, simply specify target option ```llvm --system-lib``` The system lib will be available as long as the result code is linked by the program. The system lib is intended to be linked and loaded during the entire life-cyle of the program. If you want dynamic loading features, use dso modules instead. Returns ------- module : runtime.Module The system-wide library module. """ return _ffi_api.SystemLib() def load_module(path, fmt=""): """Load module from file. Parameters ---------- path : str The path to the module file. fmt : str, optional The format of the file, if not specified it will be inferred from suffix of the file. Returns ------- module : runtime.Module The loaded module Note ---- This function will automatically call cc.create_shared if the path is in format .o or .tar """ if os.path.isfile(path): path = os.path.realpath(path) else: raise ValueError("cannot find file %s" % path) # High level handling for .o and .tar file. # We support this to be consistent with RPC module load. if path.endswith(".o"): # Extra dependencies during runtime. from tvm.contrib import cc as _cc _cc.create_shared(path + ".so", path) path += ".so" elif path.endswith(".tar"): # Extra dependencies during runtime. from tvm.contrib import cc as _cc, utils as _utils, tar as _tar tar_temp = _utils.tempdir(custom_path=path.replace(".tar", "")) _tar.untar(path, tar_temp.temp_dir) files = [tar_temp.relpath(x) for x in tar_temp.listdir()] _cc.create_shared(path + ".so", files) path += ".so" # Redirect to the load API return _ffi_api.ModuleLoadFromFile(path, fmt) def load_static_library(path, func_names): """Load the .o library at path which implements functions with func_names. Unlike the generic load_module the result will remain as a static_library and will not be relinked on-the-fly into a .so library.""" return _ffi_api.ModuleLoadStaticLibrary(path, func_names) def enabled(target): """Whether module runtime is enabled for target Parameters ---------- target : str The target device type. Returns ------- enabled : bool Whether runtime is enabled. Examples -------- The following code checks if gpu is enabled. >>> tvm.runtime.enabled("gpu") """ return _ffi_api.RuntimeEnabled(target) def num_threads() -> int: """Get the number of threads in use by the TVM runtime. Returns ------- int Number of threads in use. """ return _ffi_api.NumThreads() _set_class_module(Module)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/name_transforms.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Name transformation functions shared in Backend and Runtime """ from . import _ffi_api def sanitize_name(original_name: str): """Sanitize name for output into compiler artifacts Parameters ---------- original_name : str Original name to sanitize """ return _ffi_api.SanitizeName(original_name)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/ndarray.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import, redefined-outer-name """Runtime NDArray API""" import ctypes import warnings import numpy as np import tvm._ffi from tvm._ffi.base import _LIB, check_call, c_array, string_types, _FFI_MODE from tvm._ffi.runtime_ctypes import DataType, Device, TVMArray, TVMArrayHandle from tvm._ffi.runtime_ctypes import DataTypeCode, tvm_shape_index_t from . import _ffi_api try: # pylint: disable=wrong-import-position if _FFI_MODE == "ctypes": raise ImportError() from tvm._ffi._cy3.core import _set_class_ndarray, _make_array, _from_dlpack from tvm._ffi._cy3.core import NDArrayBase except (RuntimeError, ImportError) as error: # pylint: disable=wrong-import-position if _FFI_MODE == "cython": raise error from tvm._ffi._ctypes.ndarray import _set_class_ndarray, _make_array, _from_dlpack from tvm._ffi._ctypes.ndarray import NDArrayBase @tvm._ffi.register_object("runtime.NDArray") class NDArray(NDArrayBase): """Lightweight NDArray class of TVM runtime. Strictly this is only an Array Container (a buffer object) No arthimetic operations are defined. All operations are performed by TVM functions. The goal is not to re-build yet another array library. Instead, this is a minimal data structure to demonstrate how can we use TVM in existing project which might have their own array containers. """ @property def dtype(self): """Type of this array""" return str(self.handle.contents.dtype) @property def device(self): """Device of this array""" return self.handle.contents.device def __dlpack__(self, stream=None): # pylint: disable=unused-argument """Export the array for consumption by from_dlpack() as a DLPack capsule. Parameters ---------- stream : int, optional A Python integer representing a pointer to a stream. Stream is provided by the consumer to the producer to instruct the producer to ensure that operations can safely be performed on the array. Returns ------- capsule : PyCapsule A DLPack capsule for the array, containing a DLPackManagedTensor. """ return self.to_dlpack() def __dlpack_device__(self): """Return a tuple of device_type, device_id in DLPack convention""" return (self.handle.contents.device.device_type, self.handle.contents.device.device_id) def __hash__(self): return ctypes.cast(self.handle, ctypes.c_void_p).value def __eq__(self, other): return self.same_as(other) def __ne__(self, other): return not self.__eq__(other) def same_as(self, other): """Check object identity equality Parameters ---------- other : object The other object to compare to Returns ------- same : bool Whether other is same as self. """ if not isinstance(other, NDArrayBase): return False return self.__hash__() == other.__hash__() def __setitem__(self, in_slice, value): """Set ndarray value""" if ( not isinstance(in_slice, slice) or in_slice.start is not None or in_slice.stop is not None ): raise ValueError("Array only support set from numpy array") if isinstance(value, NDArrayBase): if value.handle is not self.handle: value.copyto(self) elif isinstance(value, (np.ndarray, np.generic)): self.copyfrom(value) else: raise TypeError("type %s not supported" % str(type(value))) def copyfrom(self, source_array): """Perform a synchronous copy from the array. Parameters ---------- source_array : array_like The data source we should like to copy from. Returns ------- arr : NDArray Reference to self. """ if isinstance(source_array, NDArrayBase): source_array.copyto(self) return self if not isinstance(source_array, np.ndarray): try: source_array = np.array(source_array, dtype=self.dtype) except: raise TypeError( "array must be an array_like data," + "type %s is not supported" % str(type(source_array)) ) t = DataType(self.dtype) shape, dtype = self.shape, self.dtype if t.lanes > 1: shape = shape + (t.lanes,) t.lanes = 1 dtype = str(t) if source_array.shape != shape: raise ValueError( "array shape do not match the shape of NDArray {0} vs {1}".format( source_array.shape, shape ) ) numpy_str_map = DataType.NUMPY2STR np_dtype_str = ( numpy_str_map[source_array.dtype] if source_array.dtype in numpy_str_map else str(source_array.dtype) ) if (not source_array.flags["C_CONTIGUOUS"]) or ( dtype == "bfloat16" or dtype != np_dtype_str ): source_array = np.ascontiguousarray( source_array, dtype="uint16" if dtype == "bfloat16" else dtype ) assert source_array.flags["C_CONTIGUOUS"] data = source_array.ctypes.data_as(ctypes.c_void_p) nbytes = ctypes.c_size_t(source_array.size * source_array.dtype.itemsize) check_call(_LIB.TVMArrayCopyFromBytes(self.handle, data, nbytes)) return self def __repr__(self): res = "<tvm.nd.NDArray shape={0}, {1}>\n".format(self.shape, self.device) res += self.numpy().__repr__() return res def __str__(self): return str(self.numpy()) def asnumpy(self): """Convert this array to numpy array. This API will be deprecated in TVM v0.8 release. Please use `numpy` instead.""" warnings.warn( "NDArray.asnumpy() will be deprecated in TVM v0.8 release. " "Please use NDArray.numpy() instead.", DeprecationWarning, ) return self.numpy() def numpy(self): """Convert this array to numpy array Returns ------- np_arr : numpy.ndarray The corresponding numpy array. """ t = DataType(self.dtype) shape, dtype = self.shape, self.dtype old_dtype = dtype if t.lanes > 1: shape = shape + (t.lanes,) t.lanes = 1 dtype = str(t) if dtype == "int4": dtype = "int8" if dtype == "bfloat16": dtype = "uint16" np_arr = np.empty(shape, dtype=dtype) assert np_arr.flags["C_CONTIGUOUS"] data = np_arr.ctypes.data_as(ctypes.c_void_p) nbytes = ctypes.c_size_t(np_arr.size * np_arr.dtype.itemsize) check_call(_LIB.TVMArrayCopyToBytes(self.handle, data, nbytes)) if old_dtype == "int4": length = np_arr.size np_arr_ret = np.empty((length,), dtype="int8") np_arr = np_arr.reshape((length,)) old_index = np.bitwise_and(np_arr, 0x0F) even_index = np.bitwise_and(np_arr >> 4, 0x0F) np_arr_ret[1::2] = old_index[0 : length // 2] np_arr_ret[0::2] = even_index[0 : length // 2] return np_arr_ret.reshape(shape) return np_arr def copyto(self, target, mem_scope=None): """Copy array to target Parameters ---------- target : NDArray The target array to be copied, must have same shape as this array. mem_scope : Optional[str] The memory scope of the array. """ if isinstance(target, NDArrayBase): return self._copyto(target) if isinstance(target, Device): res = empty(self.shape, self.dtype, target, mem_scope) return self._copyto(res) raise ValueError("Unsupported target type %s" % str(type(target))) def _create_view(self, shape): """Create a view into an existing array. The view shares the same allocation and datatype as the existing array, but can have a different array shape. This is useful for runtimes that support non-flat memory, where both the physical shape of an allocation and the logical shape of the tensor it represents may need to be independently specified. Warning: This function should not be used outside of low-level manipulations, as it breaks non-aliasing assumptions made by TVM. This function may also be removed/replaced in the future. Parameters ---------- shape: Union[tvm.runtime.ShapeTuple, Sequence[typing.SupportsInt]] The shape of the view. """ if not isinstance(shape, tvm.runtime.ShapeTuple): shape = tvm.runtime.ShapeTuple([int(dim) for dim in shape]) return _ffi_api.TVMArrayCreateView(self, shape) def device(dev_type, dev_id=0): """Construct a TVM device with given device type and id. Parameters ---------- dev_type: int or str The device type mask or name of the device. dev_id : int, optional The integer device id Returns ------- dev: tvm.runtime.Device The corresponding device. Examples -------- Device can be used to create reflection of device by string representation of the device type. .. code-block:: python assert tvm.device("cpu", 1) == tvm.cpu(1) assert tvm.device("cuda", 0) == tvm.cuda(0) """ if isinstance(dev_type, string_types): dev_type = dev_type.split()[0] if dev_type not in Device.STR2MASK: raise ValueError("Unknown device type %s" % dev_type) dev_type = Device.STR2MASK[dev_type] return Device(dev_type, dev_id) def numpyasarray(np_data): """Return a TVMArray representation of a numpy array.""" data = np_data assert data.flags["C_CONTIGUOUS"] arr = TVMArray() shape = c_array(tvm_shape_index_t, data.shape) arr.data = data.ctypes.data_as(ctypes.c_void_p) arr.shape = shape arr.strides = None arr.dtype = DataType(np.dtype(data.dtype).name) arr.ndim = data.ndim # CPU device arr.device = device(Device.kDLCPU, 0) return arr, shape def empty(shape, dtype="float32", device=device(Device.kDLCPU, 0), mem_scope=None): """Create an empty array given shape and device Parameters ---------- shape : Union[tvm.runtime.ShapeTuple, Sequence[typing.SupportsInt]] The shape of the array. dtype : type or str The data type of the array. device : Device The device of the array. mem_scope : Optional[str] The memory scope of the array. Returns ------- arr : tvm.nd.NDArray The array tvm supported. """ if not isinstance(shape, tvm.runtime.ShapeTuple): shape = tvm.runtime.ShapeTuple([int(dim) for dim in shape]) dtype = DataType(dtype) arr = _ffi_api.TVMArrayAllocWithScope(shape, dtype, device, mem_scope) return arr def from_dlpack(dltensor): """Produces an array from an object with __dlpack__ method or a DLPack tensor w/o memory copy. Retreives the underlying DLPack tensor's pointer to create an array from the data. Removes the original DLPack tensor's destructor as now the array is responsible for destruction. Parameters ---------- dltensor : object with __dlpack__ attribute or a DLPack capsule Returns ------- arr: tvm.nd.NDArray The array view of the tensor data. """ t = type(dltensor) if t.__module__ == "builtins" and t.__name__ == "PyCapsule": return _from_dlpack(dltensor) if hasattr(dltensor, "__dlpack__"): dlpack_caps = dltensor.__dlpack__() return _from_dlpack(dlpack_caps) raise AttributeError("Required attribute __dlpack__ not found") def cpu(dev_id=0): """Construct a CPU device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLCPU, dev_id) def cuda(dev_id=0): """Construct a CUDA GPU device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLCUDA, dev_id) def gpu(dev_id=0): """Construct a CUDA GPU device deprecated:: 0.9.0 Use :py:func:`tvm.cuda` instead. Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ warnings.warn( "Please use tvm.cuda() instead of tvm.gpu(). tvm.gpu() is going to be deprecated in 0.9.0", ) return Device(Device.kDLCUDA, dev_id) def rocm(dev_id=0): """Construct a ROCM device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLROCM, dev_id) def opencl(dev_id=0): """Construct a OpenCL device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLOpenCL, dev_id) def metal(dev_id=0): """Construct a metal device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLMetal, dev_id) def vpi(dev_id=0): """Construct a VPI simulated device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLVPI, dev_id) def vulkan(dev_id=0): """Construct a Vulkan device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLVulkan, dev_id) def ext_dev(dev_id=0): """Construct a extension device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device Note ---- This API is reserved for quick testing of new device by plugin device API as ext_dev. """ return Device(Device.kDLExtDev, dev_id) def hexagon(dev_id=0): """Construct a Hexagon device Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLHexagon, dev_id) def webgpu(dev_id=0): """Construct a webgpu device. Parameters ---------- dev_id : int, optional The integer device id Returns ------- dev : Device The created device """ return Device(Device.kDLWebGPU, dev_id) cl = opencl mtl = metal def array(arr, device=cpu(0), mem_scope=None): """Create an array from source arr. Parameters ---------- arr : numpy.ndarray The array to be copied from device : Device, optional The device device to create the array mem_scope : Optional[str] The memory scope of the array Returns ------- ret : NDArray The created array """ if isinstance(arr, tvm.ir.container.Array): raise AttributeError("arr is an instance of", type(arr)) if not isinstance(arr, (np.ndarray, NDArray)): arr = np.array(arr) return empty(arr.shape, arr.dtype, device, mem_scope).copyfrom(arr) # Register back to FFI _set_class_ndarray(NDArray)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/object.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import """Runtime Object API""" import ctypes from tvm._ffi.base import _FFI_MODE, _RUNTIME_ONLY, check_call, _LIB, c_str from tvm._ffi.runtime_ctypes import ObjectRValueRef from . import _ffi_api, _ffi_node_api try: # pylint: disable=wrong-import-position,unused-import if _FFI_MODE == "ctypes": raise ImportError() from tvm._ffi._cy3.core import _set_class_object, _set_class_object_generic from tvm._ffi._cy3.core import ObjectBase, PyNativeObject except (RuntimeError, ImportError) as error: # pylint: disable=wrong-import-position,unused-import if _FFI_MODE == "cython": raise error from tvm._ffi._ctypes.packed_func import _set_class_object, _set_class_object_generic from tvm._ffi._ctypes.object import ObjectBase, PyNativeObject def _new_object(cls): """Helper function for pickle""" return cls.__new__(cls) class Object(ObjectBase): """Base class for all tvm's runtime objects.""" __slots__ = [] def __repr__(self): return _ffi_node_api.AsRepr(self) def __dir__(self): class_names = dir(self.__class__) fnames = _ffi_node_api.NodeListAttrNames(self) size = fnames(-1) return sorted([fnames(i) for i in range(size)] + class_names) def __getattr__(self, name): # specially check handle since # this is required for PackedFunc calls if name == "handle": raise AttributeError("handle is not set") try: return _ffi_node_api.NodeGetAttr(self, name) except AttributeError: raise AttributeError("%s has no attribute %s" % (str(type(self)), name)) from None def __hash__(self): return _ffi_api.ObjectPtrHash(self) def __eq__(self, other): return self.same_as(other) def __ne__(self, other): return not self.__eq__(other) def __reduce__(self): cls = type(self) return (_new_object, (cls,), self.__getstate__()) def __getstate__(self): handle = self.handle if handle is not None: return {"handle": _ffi_node_api.SaveJSON(self)} return {"handle": None} def __setstate__(self, state): # pylint: disable=assigning-non-slot, assignment-from-no-return handle = state["handle"] self.handle = None if handle is not None: self.__init_handle_by_constructor__(_ffi_node_api.LoadJSON, handle) def _move(self): """Create an RValue reference to the object and mark the object as moved. This is a advanced developer API that can be useful when passing an unique reference to an Object that you no longer needed to a function. A unique reference can trigger copy on write optimization that avoids copy when we transform an object. Note ---- All the reference of the object becomes invalid after it is moved. Be very careful when using this feature. Examples -------- .. code-block:: python x = tvm.tir.Var("x", "int32") x0 = x some_packed_func(x._move()) # both x0 and x will points to None after the function call. Returns ------- rvalue : The rvalue reference. """ return ObjectRValueRef(self) _set_class_object(Object)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/object_generic.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Common implementation of object generic related logic""" # pylint: disable=unused-import, invalid-name from numbers import Number, Integral from tvm._ffi.base import string_types from tvm._ffi.runtime_ctypes import ObjectRValueRef from . import _ffi_node_api, _ffi_api from .object import ObjectBase, PyNativeObject, _set_class_object_generic from .ndarray import NDArrayBase from .packed_func import PackedFuncBase, convert_to_tvm_func from .module import Module class ObjectGeneric(object): """Base class for all classes that can be converted to object.""" def asobject(self): """Convert value to object""" raise NotImplementedError() ObjectTypes = (ObjectBase, NDArrayBase, Module, ObjectRValueRef, PyNativeObject) def convert_to_object(value, span=None): """Convert a Python value to corresponding object type. Parameters ---------- value : str The value to be inspected. span : Optional[Span] The location of this itervar in the source code. Returns ------- obj : Object The corresponding object value. """ if isinstance(value, ObjectTypes): return value if isinstance(value, bool): return const(value, "uint1x1", span=span) if isinstance(value, Number): return const(value, span=span) if isinstance(value, string_types): return _ffi_api.String(value) if isinstance(value, (list, tuple)): value = [convert_to_object(x) for x in value] return _ffi_api.Array(*value) if isinstance(value, dict): vlist = [] for item in value.items(): if ( not isinstance(item[0], ObjectTypes) and not isinstance(item[0], string_types) and not isinstance(item[0], Number) ): raise ValueError("key of map must already been a container type") vlist.append(convert_to_object(item[0])) vlist.append(convert_to_object(item[1])) return _ffi_api.Map(*vlist) if isinstance(value, ObjectGeneric): return value.asobject() if value is None: return None raise ValueError("don't know how to convert type %s to object" % type(value)) def convert(value, span=None): """Convert value to TVM object or function. Parameters ---------- value : python value span : Optional[Span] The location of this statement in the source code. Returns ------- tvm_val : Object or Function Converted value in TVM """ if isinstance(value, (PackedFuncBase, ObjectBase)): return value if callable(value): return convert_to_tvm_func(value) return convert_to_object(value, span=span) def _scalar_type_inference(value): if hasattr(value, "dtype"): dtype = str(value.dtype) elif isinstance(value, bool): dtype = "bool" elif isinstance(value, float): # We intentionally prefer convert the float to float32 since it's more common in DL. if -3.40282347e38 <= value <= 3.40282347e38: dtype = "float32" else: dtype = "float64" elif isinstance(value, int): # We intentionally prefer convert the python int to int32 since it's more common in DL. if -2147483648 <= value <= 2147483647: dtype = "int32" else: dtype = "int64" else: raise NotImplementedError( "Cannot automatically inference the type." " value={}".format(value) ) return dtype def const(value, dtype=None, span=None): """construct a constant Parameters ---------- value : number The content of the constant number. dtype : str or None, optional The data type. span : Optional[Span] The location of the constant value in the source. Returns ------- const_val: tvm.Expr The result expression. """ if dtype is None: dtype = _scalar_type_inference(value) if dtype == "uint64" and value >= (1 << 63): return _ffi_node_api.LargeUIntImm(dtype, value & ((1 << 32) - 1), value >> 32, span) return _ffi_node_api._const(value, dtype, span) _set_class_object_generic(ObjectGeneric, convert_to_object)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/object_path.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ ObjectPath class that represents a path from a root object to one of its descendants via attribute access, array indexing etc. """ import tvm._ffi from tvm.runtime import Object from . import _ffi_node_api __all__ = ( "ObjectPath", "RootPath", "AttributeAccessPath", "UnknownAttributeAccessPath", "ArrayIndexPath", "MissingArrayElementPath", "MapValuePath", "MissingMapEntryPath", "ObjectPathPair", ) @tvm._ffi.register_object("ObjectPath") class ObjectPath(Object): """ Path to an object from some root object. """ def __init__(self) -> None: super().__init__() raise ValueError( "ObjectPath can't be initialized directly. " "Use ObjectPath.root() to create a path to the root object" ) @staticmethod def root() -> "ObjectPath": return _ffi_node_api.ObjectPathRoot() def __eq__(self, other): return _ffi_node_api.ObjectPathEqual(self, other) def __ne__(self, other): return not _ffi_node_api.ObjectPathEqual(self, other) @property def parent(self) -> "ObjectPath": return _ffi_node_api.ObjectPathGetParent(self) def __len__(self) -> int: return _ffi_node_api.ObjectPathLength(self) def get_prefix(self, length) -> "ObjectPath": return _ffi_node_api.ObjectPathGetPrefix(self, length) def is_prefix_of(self, other) -> "ObjectPath": return _ffi_node_api.ObjectPathIsPrefixOf(self, other) def attr(self, attr_key) -> "ObjectPath": return _ffi_node_api.ObjectPathAttr(self, attr_key) def array_index(self, index) -> "ObjectPath": return _ffi_node_api.ObjectPathArrayIndex(self, index) def missing_array_element(self, index) -> "ObjectPath": return _ffi_node_api.ObjectPathMissingArrayElement(self, index) def map_value(self, key) -> "ObjectPath": return _ffi_node_api.ObjectPathMapValue(self, tvm.runtime.convert(key)) def missing_map_entry(self) -> "ObjectPath": return _ffi_node_api.ObjectPathMissingMapEntry(self) @tvm._ffi.register_object("RootPath") class RootPath(ObjectPath): pass @tvm._ffi.register_object("AttributeAccessPath") class AttributeAccessPath(ObjectPath): pass @tvm._ffi.register_object("UnknownAttributeAccessPath") class UnknownAttributeAccessPath(ObjectPath): pass @tvm._ffi.register_object("ArrayIndexPath") class ArrayIndexPath(ObjectPath): pass @tvm._ffi.register_object("MissingArrayElementPath") class MissingArrayElementPath(ObjectPath): pass @tvm._ffi.register_object("MapValuePath") class MapValuePath(ObjectPath): pass @tvm._ffi.register_object("MissingMapEntryPath") class MissingMapEntryPath(ObjectPath): pass @tvm._ffi.register_object("ObjectPathPair") class ObjectPathPair(Object): """ Pair of ObjectPaths, one for each object being tested for structural equality. """ @property def lhs_path(self) -> ObjectPath: return _ffi_node_api.ObjectPathPairLhsPath(self) @property def rhs_path(self) -> ObjectPath: return _ffi_node_api.ObjectPathPairRhsPath(self)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/packed_func.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-import """Packed Function namespace.""" import ctypes from tvm._ffi.base import _LIB, check_call, c_str, string_types, _FFI_MODE try: # pylint: disable=wrong-import-position if _FFI_MODE == "ctypes": raise ImportError() from tvm._ffi._cy3.core import _set_class_packed_func, _set_class_module from tvm._ffi._cy3.core import PackedFuncBase from tvm._ffi._cy3.core import convert_to_tvm_func except (RuntimeError, ImportError) as error: # pylint: disable=wrong-import-position if _FFI_MODE == "cython": raise error from tvm._ffi._ctypes.packed_func import _set_class_packed_func, _set_class_module from tvm._ffi._ctypes.packed_func import PackedFuncBase from tvm._ffi._ctypes.packed_func import convert_to_tvm_func PackedFuncHandle = ctypes.c_void_p class PackedFunc(PackedFuncBase): """The PackedFunc object used in TVM. Function plays an key role to bridge front and backend in TVM. Function provide a type-erased interface, you can call function with positional arguments. The compiled module returns Function. TVM backend also registers and exposes its API as Functions. The following are list of common usage scenario of tvm.runtime.PackedFunc. - Automatic exposure of C++ API into python - To call PackedFunc from python side - To call python callbacks to inspect results in generated code - Bring python hook into C++ backend See Also -------- tvm.register_func: How to register global function. tvm.get_global_func: How to get global function. """ _set_class_packed_func(PackedFunc)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/params.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name """Helper utility to save and load parameter dicts.""" from . import _ffi_api, ndarray def save_param_dict(params): """Save parameter dictionary to binary bytes. The result binary bytes can be loaded by the GraphModule with API "load_params". Parameters ---------- params : dict of str to NDArray The parameter dictionary. Returns ------- param_bytes: bytearray Serialized parameters. Examples -------- .. code-block:: python # set up the parameter dict params = {"param0": arr0, "param1": arr1} # save the parameters as byte array param_bytes = tvm.runtime.save_param_dict(params) # We can serialize the param_bytes and load it back later. # Pass in byte array to module to directly set parameters tvm.runtime.load_param_dict(param_bytes) """ transformed = {k: ndarray.array(v) for (k, v) in params.items()} return _ffi_api.SaveParams(transformed) def load_param_dict(param_bytes): """Load parameter dictionary to binary bytes. Parameters ---------- param_bytes: bytearray Serialized parameters. Returns ------- params : dict of str to NDArray The parameter dictionary. """ if isinstance(param_bytes, (bytes, str)): param_bytes = bytearray(param_bytes) return _ffi_api.LoadParams(param_bytes)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/profiler_vm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin """ The Relay Virtual Machine profiler. Provides extra APIs for profiling vm execution. """ import warnings from tvm.runtime import _ffi_api from tvm.rpc import base as rpc_base from . import vm from .profiling import Report def enabled(): """Whether vm profiler is enabled.""" return hasattr(_ffi_api, "_VirtualMachineDebug") class VirtualMachineProfiler(vm.VirtualMachine): """Relay profile VM runtime.""" def __init__(self, exe, device, memory_cfg=None): super(VirtualMachineProfiler, self).__init__(exe, device, memory_cfg) # Make sure the constructor of the VM module is on the proper device # Remote devices have device_type of their actual device_type + RPC_SESS_MASK if device.device_type >= rpc_base.RPC_SESS_MASK: self.module = device._rpc_sess.get_function("runtime._VirtualMachineDebug")(exe) else: self.module = _ffi_api._VirtualMachineDebug(exe.module) self._init = self.module["init"] self._invoke = self.module["invoke"] self._profile = self.module["profile"] self._profile_rpc = self.module["profile_rpc"] self._set_input = self.module["set_input"] self._setup_device(device, memory_cfg) def get_stat(self, sort_by_time=True): # pylint: disable=unused-argument """Get the statistics of executed ops. REMOVED, use profile method instead. """ warnings.warn("get_stat has been removed, use profile instead") return "" def profile(self, *args, func_name="main", collectors=None, **kwargs): """Profile a function call. Parameters ---------- func_name : str The name of the function. collectors : Optional[Sequence[MetricCollector]] Extra metrics to collect. If profiling over RPC, collectors must be `None`. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. Returns ------- timing_results : str Overall and per-op timing results formatted in a table. """ if args or kwargs: self.set_input(func_name, *args, **kwargs) if self.module.type_key == "rpc": # We cannot serialize MetricCollectors over RPC assert collectors is None, "Profiling with collectors is not supported over RPC" return Report.from_json(self._profile_rpc(func_name)) return self._profile(func_name, collectors)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/profiling/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Registration of profiling objects in python.""" from typing import Dict, Sequence, Optional from ... import _ffi from . import _ffi_api from .. import Object, Device @_ffi.register_object("runtime.profiling.Report") class Report(Object): """A container for information gathered during a profiling run. Attributes ---------- calls : Array[Dict[str, Object]] Per-call profiling metrics (function name, runtime, device, ...). device_metrics : Dict[Device, Dict[str, Object]] Per-device metrics collected over the entire run. """ def __init__( self, calls: Sequence[Dict[str, Object]], device_metrics: Dict[str, Dict[str, Object]], configuration: Dict[str, Object], ): """Construct a profiling report from a list of metrics and per-device metrics. Parameters ---------- calls : Sequence[Dict[str, Object]] Per function call metrics. device_metrics : Dict[str, Dict[str, Object]] Per device metrics. configuration : Dict[str, Object] Configuration of TVM for this profiling run. Includes number of threads, executor. """ self.__init_handle_by_constructor__(_ffi_api.Report, calls, device_metrics, configuration) def csv(self): """Convert this profiling report into CSV format. This only includes calls and not overall metrics. Returns ------- csv : str `calls` in CSV format. """ return _ffi_api.AsCSV(self) def table(self, sort=True, aggregate=True, col_sums=True): """Generate a human-readable table Parameters ---------- sort : bool If aggregate is true, whether to sort call frames by descending duration. If aggregate is False, whether to sort frames by order of appearancei n the program. aggregate : bool Whether to join multiple calls to the same op into a single line. col_sums : bool Whether to include the sum of each column. Returns ------- table : str A human-readable table """ return _ffi_api.AsTable(self, sort, aggregate, col_sums) def json(self): """Convert this profiling report into JSON format. Example output: .. code-block: { "calls": [ { "Duration (us)": { "microseconds": 12.3 }, "Name": "fused_dense", "Count": { "count": 1 }, "Percent": { "percent": 10.3 } } ], "device_metrics": { "cpu": { "Duration (us)": { "microseconds": 334.2 }, "Percent": { "percent": 100 } } } } {"calls": [ {"Duration (us)": {"microseconds": 12.3} ,"Name": "fused_dense" ,"Count": {"count":1} ,"Percent": {"percent": 10.3} } ], "device_metrics": {"cpu": {"Duration (us)": {"microseconds": 334.2} ,"Percent": {"percent": 100.0} } } } Returns ------- json : str Formatted JSON """ return _ffi_api.AsJSON(self) @classmethod def from_json(cls, s): """Deserialize a report from JSON. Parameters ---------- s : str Report serialize via :py:meth:`json`. Returns ------- report : Report The deserialized report. """ return _ffi_api.FromJSON(s) @_ffi.register_object("runtime.profiling.Count") class Count(Object): """A integer count of something""" def __init__(self, count: int): self.__init_handle_by_constructor__(_ffi_api.Count, count) @_ffi.register_object("runtime.profiling.Duration") class Duration(Object): """A duration of something""" def __init__(self, duration: float): self.__init_handle_by_constructor__(_ffi_api.Duration, duration) @_ffi.register_object("runtime.profiling.Percent") class Percent(Object): """A Percent of something""" def __init__(self, percent: float): self.__init_handle_by_constructor__(_ffi_api.Percent, percent) @_ffi.register_object("runtime.profiling.Ratio") class Ratio(Object): """A Ratio of two things""" def __init__(self, ratio: float): self.__init_handle_by_constructor__(_ffi_api.Ratio, ratio) @_ffi.register_object("runtime.profiling.MetricCollector") class MetricCollector(Object): """Interface for user defined profiling metric collection.""" @_ffi.register_object("runtime.profiling.DeviceWrapper") class DeviceWrapper(Object): """Wraps a tvm.runtime.Device""" def __init__(self, dev: Device): self.__init_handle_by_constructor__(_ffi_api.DeviceWrapper, dev) def profile_function(mod, dev, collectors, func_name=None, warmup_iters=10): """Collect performance information of a function execution. Usually used with a compiled PrimFunc. This information can include performance counters like cache hits and FLOPs that are useful in debugging performance issues of individual PrimFuncs. Different metrics can be collected depending on which MetricCollector is used. Example ------- .. code-block: python f = tvm.build(my_func, target="llvm", name="my_func") prof = tvm.runtime.profiling.profile_function( f, tvm.cpu(), [tvm.runtime.profiling.PAPIMetricCollector({tvm.cpu(): ["PAPI_FP_OPS"]}), ) counters = prof(*args) print(counters) Parameters ---------- mod: Module Module containing the function to profile. dev: Device Device to run the function on. collectors: List[MetricCollector] :py:class:`MetricCollector`s which will collect performance information. func_name: Optional[str] Name of the function in `mod` to profile. Defaults to the `entry_name` of `mod`. warmup_iters: int Number of iterations to run the function before collecting performance information. Recommended to set this larger than 0 for consistent cache effects. Defaults to 10. Returns ------- prof: PackedFunc[args, Dict[str, ObjectRef]] PackedFunc which takes the same arguments as the `mod[func_name]` and returns performance metrics as a `Dict[str, ObjectRef]` where values can be `CountNode`, `DurationNode`, `PercentNode`. """ if func_name is None: func_name = mod.entry_name return _ffi_api.ProfileFunction( mod, func_name, dev.device_type, dev.device_id, warmup_iters, collectors ) # We only enable this class when TVM is build with PAPI support if _ffi.get_global_func("runtime.profiling.PAPIMetricCollector", allow_missing=True) is not None: @_ffi.register_object("runtime.profiling.PAPIMetricCollector") class PAPIMetricCollector(MetricCollector): """Collects performance counter information using the Performance Application Programming Interface (PAPI). """ def __init__(self, metric_names: Optional[Dict[Device, Sequence[str]]] = None): """ Parameters ---------- metric_names : Optional[Dict[Device, Sequence[str]]] List of per-device metrics to collect. You can find a list of valid metrics by runing `papi_native_avail` from the command line. """ metric_names = {} if metric_names is None else metric_names wrapped = dict() for dev, names in metric_names.items(): wrapped[DeviceWrapper(dev)] = names self.__init_handle_by_constructor__(_ffi_api.PAPIMetricCollector, wrapped)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/profiling/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI for profiling""" from ... import _ffi _ffi._init_api("runtime.profiling", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/runtime/vm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin """ The Relay Virtual Machine runtime. Implements a Python interface to executing the compiled VM object. """ import numpy as np import tvm from tvm.runtime import Module from tvm._ffi.runtime_ctypes import TVMByteArray from tvm._ffi import base as _base from .object import Object from . import _ffi_api, container from ..rpc.base import RPC_SESS_MASK def _convert(arg, cargs): def _gettype(arg): if isinstance(arg, np.float16): return "float16" elif isinstance(arg, (_base.integer_types, bool)): return "int32" else: return "float32" if isinstance(arg, Object): cargs.append(arg) elif arg is None: cargs.append(tvm.nd.array([], device=tvm.cpu(0))) elif isinstance(arg, np.ndarray): nd_arr = tvm.nd.array(arg, device=tvm.cpu(0)) cargs.append(nd_arr) elif isinstance(arg, tvm.runtime.NDArray): cargs.append(arg) elif isinstance(arg, (tuple, list)): field_args = [] for field in arg: _convert(field, field_args) cargs.append(container.tuple_object(field_args)) elif isinstance(arg, (_base.numeric_types, bool)): dtype = _gettype(arg) value = tvm.nd.array(np.array(arg, dtype=dtype), device=tvm.cpu(0)) cargs.append(value) elif isinstance(arg, str): cargs.append(arg) else: raise TypeError("Unsupported type: %s" % (type(arg))) def convert(args): cargs = [] for arg in args: _convert(arg, cargs) return cargs class Executable(object): """Relay VM executable""" def __init__(self, mod): self.mod = mod self._function_params = {} self._save = self.mod["save"] self._get_lib = self.mod["get_lib"] self._get_bytecode = self.mod["get_bytecode"] self._get_constants = self.mod["get_constants"] self._get_virtual_devices = self.mod["get_virtual_devices"] self._get_primitives = self.mod["get_primitives"] self._get_stats = self.mod["get_stats"] self._get_function_arity = self.mod["get_function_arity"] self._get_function_param_name = self.mod["get_function_param_name"] self._move_late_bound_consts = self.mod["move_late_bound_consts"] self._get_late_bound_consts = self.mod["get_late_bound_consts"] self._load_late_bound_consts = self.mod["load_late_bound_consts"] self._load_late_bound_consts_from_map = self.mod["load_late_bound_consts_from_map"] def save(self): """Save the Relay VM Executable. Returns ------- code : bytearray The binary blob representing a serialized Relay VM executable. It can then be saved to disk and later deserialized into a new Executable. lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. It is basically a library that is composed of hardware dependent code. Notes ----- The returned code is organized with the following sections in order. - Global section. This section contains the globals used by the virtual machine. - Constant section. This section is used to store the constant pool of a virtual machine. - Primitive name section. This section is introduced to accommodate the list of primitive operator names that will be invoked by the virtual machine. - Code section. The VM functions, including bytecode, are sitting in this section. Examples -------- .. code-block:: python import numpy as np import tvm from tvm import te from tvm import relay # define a simple network. x = relay.var('x', shape=(10, 10)) f = relay.Function([x], x + x) mod = tvm.IRModule({"main": f}) # create a Relay VM. dev = tvm.cpu() target = "llvm" executable = relay.vm.compile(mod, target) code, lib = executable.save() # save and load the code and lib file. tmp = tvm.contrib.utils.tempdir() path_lib = tmp.relpath("lib.so") lib.export_library(path_lib) with open(tmp.relpath("code.ro"), "wb") as fo: fo.write(code) loaded_lib = tvm.runtime.load_module(path_lib) loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read()) # deserialize. des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib) # execute the deserialized executable. x_data = np.random.rand(10, 10).astype('float32') des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev) res = des_vm.run(x_data) print(res.numpy()) """ return self._save(), self._get_lib() @staticmethod def load_exec(bytecode, lib): """Construct an executable from saved artifacts. Parameters ---------- bytecode : bytearray The binary blob representing a the Relay VM bytecode. lib : :py:class:`~tvm.runtime.Module` The runtime module that contains the generated code. Returns ------- exec: Executable An executable constructed using the provided artifacts. """ if isinstance(bytecode, (bytes, str)): bytecode = bytearray(bytecode) elif not isinstance(bytecode, (bytearray, TVMByteArray)): raise TypeError( "bytecode is expected to be the type of bytearray " + "or TVMByteArray, but received {}".format(type(bytecode)) ) if lib is not None and not isinstance(lib, tvm.runtime.Module): raise TypeError( "lib is expected to be the type of tvm.runtime.Module" + ", but received {}".format(type(lib)) ) return Executable(_ffi_api.Load_Executable(bytecode, lib)) @property def lib(self): """Get the library that contains hardware dependent code. Returns ------- ret : :py:class:`~tvm.runtime.Module` The runtime module that contains hardware dependent code. """ return self._get_lib() @property def stats(self): """Get the statistics of the Relay VM executable. Returns ------- ret : String The statistic information of the VM executable. """ return self._get_stats() @property def primitive_ops(self): """Get the name of the primitive ops contained in the executable. Returns ------- ret : List[String] The list of primitive ops. """ ret = [] num_primitives = _ffi_api.GetNumOfPrimitives(self.module) for i in range(num_primitives): ret.append(_ffi_api.GetPrimitiveFields(self.module, i)) return ret @property def bytecode(self): """Get the bytecode of the Relay VM executable. Returns ------- ret : String The bytecode of the executable. Notes ----- The bytecode is in the following format: func_name reg_file_size num_instructions param1 param2 ... paramM instruction1 instruction2 ... instructionN Each instruction is printed in the following format: hash opcode field1 ... fieldX # The text format. The part starting from # is only used for visualization and debugging. The real serialized code doesn't contain it, therefore the deserializer doesn't need to deal with it as well. """ return self._get_bytecode() @property def constants(self): """Returns a human-readable description of all the constants in the executable. Useful for debugging and diffing generated executables in unit tests.""" return self._get_constants() @property def virtual_devices(self): """Returns a human-readable description of all the (virtual) devices in the executable.""" return self._get_virtual_devices() @property def primitives(self): """Returns a human-readable description of all the primitives (ie PackedFuncs) in the executable""" return self._get_primitives() @property def globals(self): """Get the globals used by the Relay VM executable. Returns ------- ret : List[String] The globals contained in the executable. """ ret = [] num_globals = _ffi_api.GetNumOfGlobals(self.module) for i in range(num_globals): ret.append(_ffi_api.GetGlobalFields(self.module, i)) return ret @property def module(self): """Return the runtime module contained in a virtual machine executable.""" return self.mod def get_function_params(self, func_name): """Get VM Function parameters""" if func_name in self._function_params: return self._function_params[func_name] arity = self._get_function_arity(func_name) assert arity >= 0 params = [] for i in range(arity): p = self._get_function_param_name(func_name, i) assert p params.append(p) self._function_params[func_name] = params return params def move_late_bound_consts(self, path, byte_limit): """Move all constants of byte size greater or equal to byte_limit to file at path""" return self._move_late_bound_consts(path, byte_limit) def get_late_bound_consts(self, byte_limit): """Return all constants of byte size greater or equal to byte_limit""" return self._get_late_bound_consts(byte_limit) def load_late_bound_consts(self, path): """Re-load constants previously saved to file at path""" return self._load_late_bound_consts(path) def load_late_bound_consts_from_map(self, map): """Re-load constants supplied in map""" return self._load_late_bound_consts_from_map(map) class VirtualMachine(object): """Relay VM runtime. Parameters ---------- exe : Executable The VM executable. device : tvm.runtime.Device or List[tvm.runtime.Device] The device(s) on which the model will run. Currently at most one device per device type is supported. memory_cfg : str or Dict[tvm.runtime.Device, str], optional Config the type of memory allocator. The allocator type can be ["naive", "pooled"]. If memory_cfg is None, all devices will use pooled allocator by default. If memory_cfg is string, all devices will use the specified allocator type. If memory_cfg is a dict, each device uses the allocator type specified in the dict, or pooled allocator if not specified in the dict. """ NAIVE_ALLOCATOR = 1 POOLED_ALLOCATOR = 2 def __init__(self, exe, device, memory_cfg=None): """ Construct a VirtualMachine wrapper class which provides a simple interface over the raw C++ Module based API. Parameters ---------- exe: Union[Executable, Module] The executable either with the wrapper Python type or the raw runtime.Module. In most cases this will be the Python wrapper class tvm.runtime.vm.Executable but if you instead get the underlying runtime.Module subclass (i.e `exe.mod`) you can directly pass it to this method. This case can occur when doing things such as RPC where TVM's module APIs return the raw modules, not the wrapped modules. This constructor will handle this internally. device: Union[Device, List[Device]] The device, or devices on which to execute the VM code. memory_cfg: Optional[str] The allocator behavior to use for the VM. Returns ------- vm: VirtualMachine A VM wrapper object. """ if not isinstance(exe, Executable) and not isinstance(exe, Module): raise TypeError( "exe is expected to be the type of Executable, " + "but received {}".format(type(exe)) ) if not isinstance(exe, Executable): exe = Executable(exe) self.module = exe.mod["vm_load_executable"]() self._exec = exe self._init = self.module["init"] self._invoke = self.module["invoke"] self._invoke_stateful = self.module["invoke_stateful"] self._get_output = self.module["get_output"] self._get_num_outputs = self.module["get_num_outputs"] self._get_input_index = self.module["get_input_index"] self._set_input = self.module["set_input"] self._set_one_input = self.module["set_one_input"] self._set_outputs = self.module["set_outputs"] self._setup_device(device, memory_cfg) def _setup_device(self, dev, memory_cfg): """Init devices and allocators.""" devs = dev if not isinstance(dev, (list, tuple)): if not isinstance(dev, tvm.runtime.Device): raise TypeError("dev is expected to be Device or List[Device]") devs = [dev] # CPU is required for executing shape functions if not any(c.device_type % RPC_SESS_MASK == tvm.cpu().device_type for c in devs): devs.append(tvm.cpu()) default_alloc_type = VirtualMachine.POOLED_ALLOCATOR if memory_cfg is None: memory_cfg = {} elif isinstance(memory_cfg, str): assert memory_cfg in ["naive", "pooled"] if memory_cfg == "naive": default_alloc_type = VirtualMachine.NAIVE_ALLOCATOR memory_cfg = {} elif not isinstance(memory_cfg, dict): raise TypeError( "memory_cfg is expected be string or dictionary, " + "but received {}".format(type(memory_cfg)) ) init_args = [] for device in devs: init_args.append(device.device_type % RPC_SESS_MASK) init_args.append(device.device_id) alloc_type = memory_cfg[device] if device in memory_cfg else default_alloc_type init_args.append(alloc_type) self._init(*init_args) def set_input(self, func_name, *args, **kwargs): """Set the input to a function. If device type and device id for input tensor are the same as for target one the zero copy is used. It means that internal tensor is reference to memory allocated by input one. Otherwise new internal NDarray is created and data is copied Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. """ if kwargs: # kwargs is a super set of the required function parameters. We # only find the ones that are needed. func_params = self._exec.get_function_params(func_name) new_args = [None] * len(func_params) cnt = 0 for k in kwargs: if k in func_params: idx = func_params.index(k) new_args[idx] = kwargs[k] cnt += 1 assert len(args) + cnt == len(func_params) idx = 0 for i, arg in enumerate(new_args): if arg is None: new_args[i] = args[idx] idx += 1 args = new_args cargs = convert(args) self._set_input(func_name, *cargs) def set_one_input(self, func_name, *args, **kwargs): """Set the one input tensor with tag to a function. Parameters ---------- func_name : str The name of the function. args : [str or int, tvm.runtime.NDArray] name or index of tensor and input tensor, optional kwargs: dict of str or int to tvm.runtime.NDArray, optional taged arguments to the function. Only args or kwargs should exist """ if kwargs: assert len(kwargs) == 1 tag = next(iter(kwargs)) if isinstance(tag, str): func_params = self._exec.get_function_params(func_name) assert tag in func_params self._set_one_input(func_name, tag, kwargs[tag]) else: assert len(args) == 2 self._set_one_input(func_name, args[0], args[1]) def invoke(self, func_name, *args, **kwargs): """Invoke a function. Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. Returns ------- result : Object The output. """ if args or kwargs: self.set_input(func_name, *args, **kwargs) return self._invoke(func_name) def run(self, *args, **kwargs): """Run the main function. Parameters ---------- args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. Returns ------- result : Object The output. """ return self.invoke("main", *args, **kwargs) def invoke_stateful(self, func_name, *args, **kwargs): """Invoke a function and ignore the returned result. Use this function when running over rpc because it is currently impossible to return a ADT object over rpc. To get the outputs, use :py:func`get_outputs`. Parameters ---------- func_name : str The name of the function. args : list[tvm.runtime.NDArray] or list[np.ndarray] The arguments to the function. kwargs: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. """ if args or kwargs: self.set_input(func_name, *args, **kwargs) self._invoke_stateful(func_name) def invoke_with_outputs(self, func_name, input_args, output_args): # TODO(vvchernov): consider scenario then output tensors set once """Invoke a function with pre-allocated output tensors. The output tensors should be set every invocation. input_args can be None if set_input method was used before. This invoke method allows to avoid excess copying if memory for output tensors was allocated before inference. Parameters ---------- func_name : str The name of the function. input_args: dict of str to tvm.runtime.NDArray or np.ndarray Named arguments to the function. output_args : list[tvm.runtime.NDArray] or list[DLTensor] The output tensors of the function. """ if input_args: func_params = self._exec.get_function_params(func_name) new_args = [None] * len(func_params) cnt = 0 for k in input_args: if k in func_params: idx = func_params.index(k) new_args[idx] = input_args[k] cnt += 1 assert cnt == len(func_params) cargs = convert(new_args) self._set_input(func_name, *cargs) self._set_outputs(func_name, *output_args) self._invoke(func_name) def get_outputs(self): """Get the outputs from a call to :py:func`invoke_stateful`. Returns ------- outputs : List[NDArray] """ return [self._get_output(i) for i in range(self._get_num_outputs())] def get_input_index(self, input_name, func_name="main"): """Get inputs index via input name. Parameters ---------- name : str The input key name func_name : str The function name Returns ------- index: int The input index. -1 will be returned if the given input name is not found. """ return self._get_input_index(input_name, func_name) def benchmark( self, device, *args, func_name="main", repeat=5, number=5, min_repeat_ms=None, limit_zero_time_iterations=100, end_to_end=False, cooldown_interval_ms=0, repeats_to_cooldown=1, **kwargs, ): """Calculate runtime of a function by repeatedly calling it. Use this function to get an accurate measurement of the runtime of a function. The function is run multiple times in order to account for variability in measurements, processor speed or other external factors. Mean, median, standard deviation, min and max runtime are all reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that synchonization and data transfer operations are not counted towards the runtime. This allows for fair comparison of runtimes across different functions and models. The `end_to_end` flag switches this behavior to include data transfer operations in the runtime. The benchmarking loop looks approximately like so: .. code-block:: python for r in range(repeat): time_start = now() for n in range(number): func_name() time_end = now() total_times.append((time_end - time_start)/number) Parameters ---------- func_name : str The function to benchmark repeat : int Number of times to run the outer loop of the timing code (see above). The output will contain `repeat` number of datapoints. number : int Number of times to run the inner loop of the timing code. This inner loop is run in between the timer starting and stopping. In order to amortize any timing overhead, `number` should be increased when the runtime of the function is small (less than a 1/10 of a millisecond). min_repeat_ms : Optional[int] If set, the inner loop will be run until it takes longer than `min_repeat_ms` milliseconds. This can be used to ensure that the function is run enough to get an accurate measurement. limit_zero_time_iterations : Optional[int] The maximum number of repeats when measured time is equal to 0. It helps to avoid hanging during measurements. end_to_end : bool If set, include time to transfer input tensors to the device and time to transfer returned tensors in the total runtime. This will give accurate timings for end to end workloads. cooldown_interval_ms: Optional[int] The cooldown interval in milliseconds between the number of repeats defined by `repeats_to_cooldown`. repeats_to_cooldown: Optional[int] The number of repeats before the cooldown is activated. args : Sequence[Object] Arguments to the function. These are cached before running timing code, so that data transfer costs are not counted in the runtime. kwargs : Dict[str, Object] Named arguments to the function. These are cached like `args`. Returns ------- timing_results : BenchmarkResult Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to access the individual runtimes (in seconds). """ min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms if end_to_end: # We need to unpack keyword arguments into positional arguments packed_args = list(args) for k, v in kwargs.items(): i = self.get_input_index(k, func_name) if i < 0: raise TypeError(f"{func_name}() got an unexpected keyword argument '{k}'") while i >= len(packed_args): packed_args.append(None) packed_args[i] = v return self.module.time_evaluator( "invoke_return_to_device", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms, limit_zero_time_iterations=limit_zero_time_iterations, )(func_name, device.device_type % RPC_SESS_MASK, device.device_id, *packed_args) if args or kwargs: self.set_input(func_name, *args, **kwargs) return self.module.time_evaluator( "invoke", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms, limit_zero_time_iterations=limit_zero_time_iterations, cooldown_interval_ms=cooldown_interval_ms, repeats_to_cooldown=repeats_to_cooldown, )(func_name)
https://github.com/zk-ml/tachikoma
python/tvm/script/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script APIs of TVM Python Package""" from .parser import ir, ir_module, parse as from_source, tir
https://github.com/zk-ml/tachikoma
python/tvm/script/highlight.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Highlight printed TVM script. """ import sys import warnings from typing import Optional, Union from tvm.ir import IRModule from tvm.tir import PrimFunc def cprint(printable: Union[IRModule, PrimFunc, str], style: Optional[str] = None) -> None: """ Print highlighted TVM script string with Pygments Parameters ---------- printable : Union[IRModule, PrimFunc, str] The TVM script to be printed style : str, optional Printing style, auto-detected if None. Notes ----- The style parameter follows the Pygments style names or Style objects. Three built-in styles are extended: "light", "dark" and "ansi". By default, "light" will be used for notebook environment and terminal style will be "ansi" for better style consistency. As an fallback when the optional Pygment library is not installed, plain text will be printed with a one-time warning to suggest installing the Pygment library. Other Pygment styles can be found in https://pygments.org/styles/ """ if isinstance(printable, (IRModule, PrimFunc)): printable = printable.script() try: # pylint: disable=import-outside-toplevel import pygments from packaging import version from pygments import highlight from pygments.formatters import HtmlFormatter, Terminal256Formatter from pygments.lexers.python import Python3Lexer from pygments.style import Style from pygments.token import Comment, Keyword, Name, Number, Operator, String if version.parse(pygments.__version__) < version.parse("2.4.0"): raise ImportError("Required Pygments version >= 2.4.0 but got " + pygments.__version__) except ImportError as err: with warnings.catch_warnings(): warnings.simplefilter("once", UserWarning) install_cmd = sys.executable + ' -m pip install "Pygments>=2.4.0" --upgrade --user' warnings.warn( str(err) + "\n" + "To print highlighted TVM script, please install Pygments:\n" + install_cmd, category=UserWarning, ) print(printable) else: class JupyterLight(Style): """A Jupyter-Notebook-like Pygments style configuration (aka. "light")""" background_color = "" styles = { Keyword: "bold #008000", Keyword.Type: "nobold #008000", Name.Function: "#0000FF", Name.Class: "bold #0000FF", Name.Decorator: "#AA22FF", String: "#BA2121", Number: "#008000", Operator: "bold #AA22FF", Operator.Word: "bold #008000", Comment: "italic #007979", } class VSCDark(Style): """A VSCode-Dark-like Pygments style configuration (aka. "dark")""" background_color = "" styles = { Keyword: "bold #c586c0", Keyword.Type: "#82aaff", Keyword.Namespace: "#4ec9b0", Name.Class: "bold #569cd6", Name.Function: "bold #dcdcaa", Name.Decorator: "italic #fe4ef3", String: "#ce9178", Number: "#b5cea8", Operator: "#bbbbbb", Operator.Word: "#569cd6", Comment: "italic #6a9956", } class AnsiTerminalDefault(Style): """The default style for terminal display with ANSI colors (aka. "ansi")""" background_color = "" styles = { Keyword: "bold ansigreen", Keyword.Type: "nobold ansigreen", Name.Class: "bold ansiblue", Name.Function: "bold ansiblue", Name.Decorator: "italic ansibrightmagenta", String: "ansiyellow", Number: "ansibrightgreen", Operator: "bold ansimagenta", Operator.Word: "bold ansigreen", Comment: "italic ansibrightblack", } is_in_notebook = "ipykernel" in sys.modules # in notebook env (support html display). if style is None: # choose style automatically according to the environment: style = JupyterLight if is_in_notebook else AnsiTerminalDefault elif style == "light": style = JupyterLight elif style == "dark": style = VSCDark elif style == "ansi": style = AnsiTerminalDefault if is_in_notebook: # print with HTML display from IPython.display import ( # pylint: disable=import-outside-toplevel HTML, display, ) formatter = HtmlFormatter(style=JupyterLight) formatter.noclasses = True # inline styles html = highlight(printable, Python3Lexer(), formatter) display(HTML(html)) else: print(highlight(printable, Python3Lexer(), Terminal256Formatter(style=style)))
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """tvm.script.ir_builder is a generic IR builder for TVM.""" from .base import IRBuilder
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.script.ir_builder""" import tvm._ffi tvm._ffi._init_api("script.ir_builder", __name__) # pylint: disable=protected-access
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/base.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """A generic IRBuilder across the TVM stack""" from typing import Any, Callable, List from tvm._ffi import register_object as _register_object from tvm.runtime import Object as _Object from . import _ffi_api @_register_object("script.ir_builder.IRBuilderFrame") class IRBuilderFrame(_Object): """A stack frame of the IRBuilder used to keep track of the current scope. Furthermore, the information stored in each stack frame can be useful for context-dependent IR construction. Examples -------- The `T.match_buffer` below instead an element in the buffer map of `PrimFuncFrame`: .. code-block:: python from tvm.script.ir_builder import tir as T from tvm.script.ir_builder import IRBuilder with IRBuilder() as builder: with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame) # to `builder`'s stack of frames buffer = T.match_buffer(...) The `T.match_buffer` below instead generates `MatchBufferRegion` in a TIR block: .. code-block:: python from tvm.script.ir_builder import tir as T from tvm.script.ir_builder import IRBuilder with IRBuilder() as builder: with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame) # to `builder`'s stack of frames with T.block(...): # pushes a BlockFrame (subclass of IRBuilderFrame) # to `builder`'s stack of frames buffer = T.match_buffer(...) """ def __enter__(self) -> "IRBuilderFrame": _ffi_api.IRBuilderFrameEnter(self) # type: ignore[attr-defined] # pylint: disable=no-member return self def __exit__(self, ptype, value, trace) -> None: # pylint: disable=unused-argument _ffi_api.IRBuilderFrameExit(self) # type: ignore[attr-defined] # pylint: disable=no-member def add_callback(self, callback: Callable[[], None]) -> None: """Add a callback method invoked when exiting the with-scope. Parameters ---------- callback : Callable[[], None] The callback method to be invoked. """ _ffi_api.IRBuilderFrameAddCallback( # type: ignore[attr-defined] # pylint: disable=no-member self, callback ) @_register_object("script.ir_builder.IRBuilder") class IRBuilder(_Object): """A dialect-agnostic IRBuilder that constructs any IR of TVM. Examples -------- An idiomatic use of this class is to put this inside the with-scope, call dialect-specific methods accordingly. Upon exiting the scope. .. code-block:: python from tvm.script.ir_builder import tir as T from tvm.script.ir_builder import IRBuilder with IRBuilder() as builder: with T.prim_func(...): # pushes a PrimFuncFrame (subclass of IRBuilderFrame) # to `builder`'s stack of frames buffer = T.match_buffer(...) return builder.get() # returns the constructed IR, i.e. tir.PrimFunc """ def __init__(self) -> None: """Construct an IRBuilder.""" self.__init_handle_by_constructor__( _ffi_api.IRBuilder # type: ignore[attr-defined] # pylint: disable=no-member ) def __enter__(self) -> "IRBuilder": """Enter the with-scope for IRBuilder, which allows the IRBuilder to be discoverable using `IRBuilder.current()`. Examples -------- .. code-block:: python from tvm.script.ir_builder import IRBuilder with IRBuilder() as builder: assert IRBuilder.current() == builder """ _ffi_api.IRBuilderEnter(self) # type: ignore[attr-defined] # pylint: disable=no-member return self def __exit__(self, ptype, value, trace) -> None: # pylint: disable=unused-argument _ffi_api.IRBuilderExit(self) # type: ignore[attr-defined] # pylint: disable=no-member @staticmethod def current() -> "IRBuilder": """Get the current IRBuilder put in the with-scope. Returns ------- builder : IRBuilder The current IRBuilder. """ return _ffi_api.IRBuilderCurrent() # type: ignore[attr-defined] # pylint: disable=no-member def get(self) -> _Object: """Get the constructed IR.""" return _ffi_api.IRBuilderGet(self) # type: ignore[attr-defined] # pylint: disable=no-member @staticmethod def name(s: str, v: Any) -> Any: """Set the name of an object. Parameters ---------- s : str The name of the object. v : Any The object to name. Returns ------- v : Any The same object with the name set. """ return _ffi_api.IRBuilderName(s, v) # type: ignore[attr-defined] # pylint: disable=no-member @staticmethod def name_many( # pylint: disable=invalid-name s: List[str], vs: List[Any], ) -> List[Any]: """Set the name of a list of objects. Parameters ---------- s : List[str] The names of the objects. vs : List[Any] The objects to name. Returns ------- vs : List[Any] The same objects with the names set. """ assert len(s) == len(vs) return [IRBuilder.name(i, v) for i, v in zip(s, vs)]
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/ir/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Package tvm.script.ir_builder.ir""" from .frame import IRModuleFrame from .ir import ir_module
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/ir/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs""" import tvm._ffi tvm._ffi._init_api("script.ir_builder.ir", __name__) # pylint: disable=protected-access
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/ir/frame.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Package tvm.script.ir_builder.ir.frame""" from tvm._ffi import register_object as _register_object from ..base import IRBuilderFrame @_register_object("script.ir_builder.IRModuleFrame") class IRModuleFrame(IRBuilderFrame): ...
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/ir/ir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Package tvm.script.ir_builder.ir.ir""" from . import _ffi_api from .frame import IRModuleFrame def ir_module() -> IRModuleFrame: return _ffi_api.IRModule() # type: ignore[attr-defined] # pylint: disable=no-member
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/tir/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Package tvm.script.ir_builder.tir""" from .ir import * # pylint: disable=wildcard-import,redefined-builtin from .ir import boolean as bool # pylint: disable=redefined-builtin
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/tir/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs""" import tvm._ffi tvm._ffi._init_api("script.ir_builder.tir", __name__) # pylint: disable=protected-access
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/tir/frame.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """IRBuilder for TIR""" from typing import List, Union from tvm._ffi import register_object as _register_object from tvm.tir import Buffer, Var from ..base import IRBuilderFrame @_register_object("script.ir_builder.tir.TIRFrame") class TIRFrame(IRBuilderFrame): ... @_register_object("script.ir_builder.tir.PrimFuncFrame") class PrimFuncFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.BlockFrame") class BlockFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.BlockInitFrame") class BlockInitFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.ForFrame") class ForFrame(TIRFrame): def __enter__(self) -> Union[Var, List[Var]]: # type: ignore[override] super().__enter__() return self.vars if len(self.vars) > 1 else self.vars[0] @_register_object("script.ir_builder.tir.AssertFrame") class AssertFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.LetFrame") class LetFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.RealizeFrame") class RealizeFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.AllocateFrame") class AllocateFrame(TIRFrame): def __enter__(self) -> Buffer: super().__enter__() return self.buffer_var @_register_object("script.ir_builder.tir.AllocateConstFrame") class AllocateConstFrame(TIRFrame): def __enter__(self) -> Buffer: super().__enter__() return self.buffer_var @_register_object("script.ir_builder.tir.AttrFrame") class AttrFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.WhileFrame") class WhileFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.IfFrame") class IfFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.ThenFrame") class ThenFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.ElseFrame") class ElseFrame(TIRFrame): ... @_register_object("script.ir_builder.tir.DeclBufferFrame") class DeclBufferFrame(TIRFrame): def __enter__(self) -> Buffer: super().__enter__() return self.buffer @_register_object("script.ir_builder.tir.LaunchThreadFrame") class LaunchThreadFrame(TIRFrame): ...
https://github.com/zk-ml/tachikoma
python/tvm/script/ir_builder/tir/ir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """IRBuilder for TIR""" import functools import inspect from numbers import Integral from typing import Any, Callable, Dict, List, Optional, Tuple, Union # isort: off from typing_extensions import Literal # isort: on import numpy as np # type: ignore from tvm.ir import Range, Type from tvm.runtime import convert, ndarray from tvm.target import Target # pylint: disable=unused-import from tvm.target.codegen import llvm_lookup_intrinsic_id from tvm.tir import Buffer, BufferRegion, PrimExpr from tvm.tir import op as _tir_op from tvm.tir import type_annotation # import tir.expr for direct ir construction to pass structural_equal comparison from tvm.tir.expr import ( EQ, GE, GT, LE, LT, NE, Add, And, Broadcast, BufferLoad, Call, CallEffectKind, Cast, CommReducer, Div, FloatImm, FloorDiv, FloorMod, IntImm, IterVar, Let, Load, Max, Min, Mod, Mul, Not, Or, ProducerLoad, Ramp, Reduce, Select, Shuffle, SizeVar, StringImm, Sub, Var, ) from tvm.tir.generic import cast from . import _ffi_api, frame # pylint: enable=unused-import def buffer_decl( shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral], dtype: str = "float32", data: Var = None, strides: List[PrimExpr] = None, elem_offset: PrimExpr = None, scope: str = "global", align: int = 0, offset_factor: int = 0, buffer_type: str = "", axis_separators: List[int] = None, ) -> Buffer: """The buffer declaration function. Parameters ---------- shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] The type of the buffer prior to flattening. dtype : str The data type in the content of the buffer. data : Var The pointer to the head of the data. strides : List[PrimExpr] The strides of each dimension. elem_offset : PrimExpr The offset in terms of number of dtype elements (including lanes). scope : str The optional storage scope of buffer data pointer. align : int The alignment requirement of data pointer in bytes. offset_factor : int The factor of elem_offset field. buffer_type : str The buffer type. axis_separators : List[int] The separators between input axes when generating flattened output axes. Returns ------- res : Buffer The declared buffer. """ shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape return _ffi_api.BufferDecl( # type: ignore[attr-defined] # pylint: disable=no-member shape, dtype, "", data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, ) def prim_func() -> frame.PrimFuncFrame: """The primitive function statement. Returns ------- res : frame.PrimFuncFrame The PrimFuncFrame. """ return _ffi_api.PrimFunc() # type: ignore[attr-defined] # pylint: disable=no-member def arg(name: str, obj: Union[Var, Buffer]) -> Union[Var, Buffer]: """The PrimFunc arguments adding function. Parameters ---------- name : str The name of the argument. var : Union[Var, Buffer] The argument of Var or Buffer. Returns ------- res : Union[Var, Buffer] The argument. """ return _ffi_api.Arg(name, obj) # type: ignore[attr-defined] # pylint: disable=no-member def func_name(name: str) -> None: """The PrimFunc naming statement. Parameters ---------- name : str The name of the PrimFunc. """ _ffi_api.FuncName(name) # type: ignore[attr-defined] # pylint: disable=no-member def func_attr(attrs: Dict[str, Any]) -> None: """The PrimFunc annotation statement. Parameters ---------- attrs : Dict[str, Any] The annotations of the PrimFunc. """ _ffi_api.FuncAttrs(attrs) # type: ignore[attr-defined] # pylint: disable=no-member def func_ret(ret_type: Type) -> Type: """The PrimFunc return type statement. Parameters ---------- ret_type : Type The return type of the PrimFunc. Returns ------- res : Type The return type. """ return _ffi_api.FuncRet(ret_type) # type: ignore[attr-defined] # pylint: disable=no-member def match_buffer( param: Union[Var, BufferLoad, BufferRegion], shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] = None, dtype: str = "float32", data: Var = None, strides: List[PrimExpr] = None, elem_offset: PrimExpr = None, scope: str = "global", align: int = -1, offset_factor: int = 0, buffer_type: str = "default", axis_separators: List[int] = None, ) -> Buffer: """The buffer match function. Note ---- This function will perform different behavior, depending on the type of param. If the param is a var in function parameter, it will create a buffer from DLTensor. Else if the param is a subregion of other buffers, then create a subregion match inside a block. Example ------- Match buffer from function parameter .. code-block:: python A = T.match_buffer(a, (128, 128), dtype="float32") Match buffer from Buffer subregion .. code-block:: python A = T.match_buffer(B[0:128, i * 128 : i * 128 + 128], (128, 128), dtype="float32") Parameters ---------- param : Union[Var, BufferLoad, BufferRegion] The parameter of the PrimFunc to match. shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] The type of the buffer prior to flattening. dtype : str The data type in the content of the buffer. data : Var The pointer to the head of the data. strides : List[PrimExpr] The strides of each dimension. elem_offset : PrimExpr The offset in terms of number of dtype elements (including lanes). scope : str The optional storage scope of buffer data pointer. align : int The alignment requirement of data pointer in bytes. offset_factor : int The factor of elem_offset field. buffer_type : str The buffer type. axis_separators : List[int] The separators between input axes when generating flattened output axes. Returns ------- res : Buffer The matched buffer. """ if shape is None: if isinstance(param, BufferRegion): dtype = param.buffer.dtype shape = [region.extent for region in param.region] else: raise ValueError("Shape must be specified when binding input param") shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape if strides is None: strides = [] return _ffi_api.MatchBuffer( # type: ignore[attr-defined] # pylint: disable=no-member param, shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, ) def preflattened_buffer( postflattened: Buffer, shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral], dtype: str = "float32", data: Var = None, strides: List[PrimExpr] = None, elem_offset: PrimExpr = None, scope: str = "global", align: int = -1, offset_factor: int = 0, buffer_type: str = "default", axis_separators: List[int] = None, ) -> None: """The pre-flattened buffer statement. Parameters ---------- postflattened : Buffer The original buffer to be flattened. shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] The type of the buffer prior to flattening. dtype : str The data type in the content of the buffer. data : Var The pointer to the head of the data. strides : List[PrimExpr] The strides of each dimension. elem_offset : PrimExpr The offset in terms of number of dtype elements (including lanes). scope : str The optional storage scope of buffer data pointer. align : int The alignment requirement of data pointer in bytes. offset_factor : int The factor of elem_offset field. buffer_type : str The buffer type. axis_separators : List[int] The separators between input axes when generating flattened output axes. """ shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape if strides is None: strides = [] _ffi_api.PreflattenedBuffer( # type: ignore[attr-defined] # pylint: disable=no-member postflattened, shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, ) def block(name: str = "", no_realize: bool = False) -> frame.BlockFrame: """The block declaration statement. Parameters ---------- name : str The name of the block. no_realize : bool The flag whether to construct BlockRealize or Block. Returns ------- res : frame.BlockFrame The BlockFrame. """ return _ffi_api.Block(name, no_realize) # type: ignore[attr-defined] # pylint: disable=no-member def init() -> frame.BlockInitFrame: """The block initialization statement. Returns ------- res : frame.BlockInitFrame The BlockInitFrame. """ return _ffi_api.Init() # type: ignore[attr-defined] # pylint: disable=no-member def where(predicate: Union[PrimExpr, int]) -> None: """The block predicate statement. Parameters ---------- predicate : Union[PrimExpr, Literal[0, 1]] The predicate condition. """ if isinstance(predicate, bool): predicate = IntImm("bool", predicate) if isinstance(predicate, int): if predicate in [0, 1]: predicate = IntImm("bool", predicate) else: raise ValueError(f"Invalid value for predicate: {predicate}") _ffi_api.Where(predicate) # type: ignore[attr-defined] # pylint: disable=no-member def reads(*buffer_slices: List[Union[BufferRegion, BufferLoad]]) -> None: """The block buffer region reading statement. Parameters ---------- buffer_slices : List[Union[BufferRegion, BufferLoad]] The array of buffer regions to read. """ if len(buffer_slices) == 1: if isinstance(buffer_slices[0], tuple): buffer_slices = list(buffer_slices[0]) elif isinstance(buffer_slices[0], list): buffer_slices = buffer_slices[0] # type: ignore[assignment] else: buffer_slices = [buffer_slices[0]] else: buffer_slices = list(buffer_slices) # type: ignore[assignment] _ffi_api.Reads(buffer_slices) # type: ignore[attr-defined] # pylint: disable=no-member def writes(*buffer_slices: List[Union[BufferRegion, BufferLoad]]) -> None: """The block buffer region writing statement. Parameters ---------- buffer_slices : List[Union[BufferRegion, BufferLoad]] The array of buffer regions to write. """ if len(buffer_slices) == 1: if isinstance(buffer_slices[0], tuple): buffer_slices = list(buffer_slices[0]) elif isinstance(buffer_slices[0], list): buffer_slices = buffer_slices[0] # type: ignore[assignment] else: buffer_slices = [buffer_slices[0]] else: buffer_slices = list(buffer_slices) # type: ignore[assignment] _ffi_api.Writes(buffer_slices) # type: ignore[attr-defined] # pylint: disable=no-member def block_attr(attrs: Dict[str, Any]) -> None: """The block annotation statement. Parameters ---------- attrs : Dict[str, Any] The annotation of the block. """ return _ffi_api.BlockAttrs(attrs) # type: ignore[attr-defined] # pylint: disable=no-member def alloc_buffer( shape: Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral], dtype: str = "float32", data: Var = None, strides: List[PrimExpr] = None, elem_offset: PrimExpr = None, scope: str = "global", align: int = -1, offset_factor: int = 0, buffer_type: str = "default", axis_separators: List[int] = None, ) -> Buffer: """The buffer alllocation function. Parameters ---------- shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] The type of the buffer prior to flattening. dtype : str The data type in the content of the buffer. data : Var The pointer to the head of the data. strides : List[PrimExpr] The strides of each dimension. elem_offset : PrimExpr The offset in terms of number of dtype elements (including lanes). scope : str The optional storage scope of buffer data pointer. align : int The alignment requirement of data pointer in bytes. offset_factor : int The factor of elem_offset field. buffer_type : str The buffer type. axis_separators : List[int] The separators between input axes when generating flattened output axes. Returns ------- res : Buffer The allocated buffer. """ shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape if strides is None: strides = [] return _ffi_api.AllocBuffer( # type: ignore[attr-defined] # pylint: disable=no-member shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, ) def _as_range(dom: Union[Range, List[PrimExpr]]) -> Range: """The range constructor. Parameters ---------- dom : Union[Range, List[PrimExpr]] The domain. Returns ------- res : Range The Range. """ if isinstance(dom, Range): return dom if isinstance(dom, (list, tuple)): return Range(dom[0], dom[1]) if hasattr(dom, "dtype"): return Range(IntImm(dom.dtype, 0), dom) return Range(0, dom) class axis: # pylint: disable=invalid-name """The axis class""" @staticmethod def spatial( dom: Union[Range, List[PrimExpr], Tuple[PrimExpr]], binding: PrimExpr, dtype: str = "int32" ) -> Var: """The spatial block axis defining function. Parameters ---------- dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]] The domain of the iteration variable. binding : PrimExpr The binding value of the iteration variable. dtype : str The data type of the iteration variable. Returns ------- res : Var The iteration variable. """ return _ffi_api.AxisSpatial( # type: ignore[attr-defined] # pylint: disable=no-member _as_range(dom), binding, dtype ) @staticmethod def reduce( dom: Union[Range, List[PrimExpr], Tuple[PrimExpr]], binding: PrimExpr, dtype: str = "int32" ) -> Var: """The reduced block axis defining function. Parameters ---------- dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]] The domain of the iteration variable. binding : PrimExpr The binding value of the iteration variable. dtype : str The data type of the iteration variable. Returns ------- res : Var The iteration variable. """ return _ffi_api.AxisReduce( # type: ignore[attr-defined] # pylint: disable=no-member _as_range(dom), binding, dtype ) @staticmethod def scan( dom: Union[Range, List[PrimExpr], Tuple[PrimExpr]], binding: PrimExpr, dtype: str = "int32" ) -> Var: """The scanning block axis defining function. Parameters ---------- dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]] The domain of the iteration variable. binding : PrimExpr The binding value of the iteration variable. dtype : str The data type of the iteration variable. Returns ------- res : Var The iteration variable. """ return _ffi_api.AxisScan( # type: ignore[attr-defined] # pylint: disable=no-member _as_range(dom), binding, dtype ) @staticmethod def opaque( dom: Union[Range, List[PrimExpr], Tuple[PrimExpr]], binding: PrimExpr, dtype: str = "int32" ) -> Var: """The opaque block axis defining function. Parameters ---------- dom : Union[Range, List[PrimExpr], Tuple[PrimExpr]] The domain of the iteration variable. binding : PrimExpr The binding value of the iteration variable. dtype : str The data type of the iteration variable. Returns ------- res : Var The iteration variable. """ return _ffi_api.AxisOpaque( # type: ignore[attr-defined] # pylint: disable=no-member _as_range(dom), binding, dtype ) @staticmethod def remap(kinds: str, bindings: List[PrimExpr], dtype: str = "int32") -> Union[List[Var], Var]: """The block axis remapping function. Parameters ---------- kinds : str The types of the iteration variables. bindings : List[PrimExpr] The binding values of the iteration variables. dtype : str The data types of the iteration variables. Returns ------- res : Var The iteration variables. """ iter_vars = _ffi_api.AxisRemap( # type: ignore[attr-defined] # pylint: disable=no-member kinds, bindings, dtype ) return iter_vars[0] if len(iter_vars) == 1 else iter_vars S = spatial # pylint: disable=invalid-name R = reduce # pylint: disable=invalid-name def serial( start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None ) -> frame.ForFrame: """The serial For statement. Parameters ---------- start : PrimExpr The minimum value of iteration. stop : PrimExpr The maximum value of iteration. annotations : Dict[str, Any] The optional annotations of the For statement. Returns ------- res : frame.ForFrame The ForFrame. """ if stop is None: stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 return _ffi_api.Serial(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member def parallel( start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None ) -> frame.ForFrame: """The parallel For statement. Parameters ---------- start : PrimExpr The minimum value of iteration. stop : PrimExpr The maximum value of iteration. annotations : Dict[str, Any] The optional annotations of the For statement. Returns ------- res : frame.ForFrame The ForFrame. """ if stop is None: stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 return _ffi_api.Parallel(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member def vectorized( start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None ) -> frame.ForFrame: """The vectorized For statement. Parameters ---------- start : PrimExpr The minimum value of iteration. stop : PrimExpr The maximum value of iteration. annotations : Dict[str, Any] The optional annotations of the For statement. Returns ------- res : frame.ForFrame The ForFrame. """ if stop is None: stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 return _ffi_api.Vectorized(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member def unroll( start: PrimExpr, stop: PrimExpr = None, *, annotations: Dict[str, Any] = None ) -> frame.ForFrame: """The unrolled For statement. Parameters ---------- start : PrimExpr The minimum value of iteration. stop : PrimExpr The maximum value of iteration. annotations : Dict[str, Any] The optional annotations of the For statement. Returns ------- res : frame.ForFrame The ForFrame. """ if stop is None: stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 return _ffi_api.Unroll(start, stop, annotations) # type: ignore[attr-defined] # pylint: disable=no-member def thread_binding( start: PrimExpr, stop: PrimExpr = None, thread: str = None, *, annotations: Dict[str, Any] = None, ) -> frame.ForFrame: """The thread-binding For statement. Parameters ---------- start : PrimExpr The minimum value of iteration. stop : PrimExpr The maximum value of iteration. thread : str The thread for loop variable to bind. annotations : Dict[str, Any] The optional annotations of the For statement. Returns ------- res : frame.ForFrame The ForFrame. """ if thread is None: if not isinstance(stop, str): raise ValueError("Thread cannot be None for thread_binding") thread = stop stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 elif stop is None: stop = start if hasattr(start, "dtype"): start = IntImm(start.dtype, 0) else: start = 0 return _ffi_api.ThreadBinding( # type: ignore[attr-defined] # pylint: disable=no-member start, stop, thread, annotations ) def grid(*extents: PrimExpr) -> frame.ForFrame: """The grid For statement. Parameters ---------- extents : PrimExpr The extents of the iteration. Returns ------- res : frame.ForFrame The ForFrame. """ return _ffi_api.Grid(extents) # type: ignore[attr-defined] # pylint: disable=no-member def Assert(condition: PrimExpr, message: str) -> frame.AssertFrame: # pylint: disable=invalid-name """Create an assertion statement. Parameters ---------- condition : PrimExpr The PrimExpr to test. message : str The output error message when the assertion fails. Returns ------- res : frame.AssertFrame The result AssertFrame. """ return _ffi_api.Assert(condition, message) # type: ignore[attr-defined] # pylint: disable=no-member def let( v: Var, value: PrimExpr, body: PrimExpr = None, ) -> frame.LetFrame: """Create a new let binding. Parameters ---------- v : Var The variable to bind. value : PrimExpr The value to be bound. body : PrimExpr The body expression, None will be used if it was not specified. Returns ------- res : frame.LetFrame The result LetFrame. """ if body is None: return _ffi_api.Let(v, value) # type: ignore[attr-defined] # pylint: disable=no-member return Let(v, value, body) def realize( buffer_slice: BufferRegion, storage_scope: str, condition: PrimExpr = True, ) -> frame.RealizeFrame: """Create a realization. Parameters ---------- buffer_slice : BufferRegion The region of buffer access. storage_scope : str The storage scope associated with this realization. condition: PrimExpr The condition expression, the default is True. Returns ------- res : frame.RealizeFrame The result RealizeFrame. """ return _ffi_api.Realize( # type: ignore[attr-defined] # pylint: disable=no-member buffer_slice, storage_scope, condition ) def allocate( extents: List[PrimExpr], dtype: str, scope: str = "global", condition: PrimExpr = None, annotations=None, ) -> frame.AllocateFrame: """Allocate node. Parameters ---------- extents : List[PrimExpr] The extents of the allocate. dtype : str The data type of the buffer. scope : str The storage scope. condition : PrimExpr The condition. annotations: Optional[Mapping[str, Object]] Additional annotation hints. """ if isinstance(condition, bool): condition = IntImm("bool", condition) return _ffi_api.Allocate( # type: ignore[attr-defined] # pylint: disable=no-member extents, dtype, scope, condition, annotations ) def allocate_const( data: List[PrimExpr], dtype: str, extents: List[PrimExpr], annotations=None, ) -> frame.AllocateConstFrame: """Allocate constant node. Parameters ---------- data : List[PrimExpr] The data associated with the constant. dtype : str The data type of the buffer. extents : List[PrimExpr] The extents of the allocate. annotations : Optional[Map] Additional annotations about the allocation. """ np_data = np.asarray(data, dtype=dtype) prod_extent = 1 for extent in extents: prod_extent *= extent prod_shape = 1 for shape in np_data.shape: prod_shape *= shape if prod_extent == prod_shape: np_data = np_data.reshape(extents) return _ffi_api.AllocateConst( # type: ignore[attr-defined] # pylint: disable=no-member ndarray.array(np_data), dtype, extents, annotations ) def attr(node: Any, attr_key: str, value: Union[PrimExpr, str]) -> frame.AttrFrame: """Create an attribute node. Parameters ---------- node : Any The node to annotate the attribute. attr_key : str Attribute type key. value : Union[PrimExpr, str] The value of the attribute. Returns ------- res : frame.AttrFrame The result AttrFrame. """ node = convert(node) value = convert(value) return _ffi_api.Attr(node, attr_key, value) # type: ignore[attr-defined] # pylint: disable=no-member def While(condition: PrimExpr) -> frame.WhileFrame: # pylint: disable=invalid-name """Create a while node. Parameters ---------- condition : PrimExpr The termination condition of the loop. Returns ------- res : frame.WhileFrame The result WhileFrame. """ if isinstance(condition, bool): condition = IntImm("bool", condition) return _ffi_api.While(condition) # type: ignore[attr-defined] # pylint: disable=no-member def If(condition: PrimExpr) -> frame.IfFrame: # pylint: disable=invalid-name """Create an if node. Parameters ---------- condition : PrimExpr The condition of if statement, executes the true branch if the condition is true, otherwise jump into the false branch. Returns ------- res : frame.IfFrame The result IfFrame. """ if isinstance(condition, bool): condition = IntImm("bool", condition) return _ffi_api.If(condition) # type: ignore[attr-defined] # pylint: disable=no-member def Then() -> frame.ThenFrame: # pylint: disable=invalid-name """Create a then. Returns ------- res : frame.ThenFrame The result ThenFrame. """ return _ffi_api.Then() # type: ignore[attr-defined] # pylint: disable=no-member def Else() -> frame.ElseFrame: # pylint: disable=invalid-name """Create an else. Returns ------- res : frame.ElseFrame The result ElseFrame. """ return _ffi_api.Else() # type: ignore[attr-defined] # pylint: disable=no-member def decl_buffer( shape, dtype="float32", data=None, strides=None, elem_offset=None, scope="global", align=0, offset_factor=0, buffer_type="", axis_separators=None, ) -> frame.DeclBufferFrame: """Create a buffer declaration node. Parameters ---------- shape : Union[List[PrimExpr], Tuple[PrimExpr], PrimExpr, Integral] The type of the buffer prior to flattening. dtype : str The data type in the content of the buffer. data : Var The pointer to the head of the data. strides : List[PrimExpr] The strides of each dimension. elem_offset : PrimExpr The offset in terms of number of dtype elements (including lanes). scope : str The optional storage scope of buffer data pointer. align : int The alignment requirement of data pointer in bytes. offset_factor : int The factor of elem_offset field. buffer_type : str The buffer type. axis_separators : List[int] The separators between input axes when generating flattened output axes. Returns ------- res : frame.DeclBufferFrame The result DeclBufferFrame. """ shape = (shape,) if isinstance(shape, (PrimExpr, Integral)) else shape return _ffi_api.DeclBuffer( # type: ignore[attr-defined] # pylint: disable=no-member shape, dtype, "", data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, ) def launch_thread( iter_var: IterVar, # pylint: disable=redefined-outer-name extent: PrimExpr, ) -> frame.LaunchThreadFrame: """Launch a thread. Parameters ---------- iter_var : IterVar The iteration variable. extent : PrimExpr The extent of environment thread. Returns ------- res : frame.LaunchThreadFrame The result LaunchThreadFrame. Examples -------- .. code-block:: python from tvm.script.ir_builder import tir as T brow = T.env_thread("blockIdx.y") T.launch_thread(brow, 1) """ return _ffi_api.LaunchThread(iter_var, extent) # type: ignore[attr-defined] # pylint: disable=no-member def env_thread(thread_tag: str) -> IterVar: """Bind a var to thread env" Parameters ---------- thread_tag : str The thread type tag. Returns ------- res : IterVar The result iteration variable gets bound to the thread env. """ return _ffi_api.EnvThread(thread_tag) # type: ignore[attr-defined] # pylint: disable=no-member def buffer_store(buffer: Buffer, value: PrimExpr, indices: List[Union[PrimExpr, slice]]) -> None: """Buffer store node. Parameters ---------- buffer : Buffer The buffer. value : PrimExpr The value to be stored. indices : List[Union[PrimExpr, slice]] The indices location to be stored. """ from tvm.arith import Analyzer # pylint: disable=import-outside-toplevel expr_indices = [] for index in indices: if isinstance(index, slice): step = 1 if index.step is None else index.step lanes = Analyzer().simplify((index.stop - index.start + step - 1) // step) if lanes == 1: expr_indices.append(index.start) else: expr_indices.append(ramp(index.start, step, int(lanes))) else: expr_indices.append(index) if isinstance(value, bool) and buffer.dtype == "bool": value = IntImm("bool", value) return _ffi_api.BufferStore( # type: ignore[attr-defined] # pylint: disable=no-member buffer, value, expr_indices ) def prefetch(buffer: Buffer, indices: List[PrimExpr]) -> None: """The prefetch hint for a buffer. Parameters ---------- buffer : Buffer The buffer to be prefetched. indices : List[PrimExpr] The indices of the buffer to extract. """ return _ffi_api.Prefetch(buffer, indices) # type: ignore[attr-defined] # pylint: disable=no-member def evaluate(value: PrimExpr) -> None: """Evaluate the input expression. Parameters ---------- value: PrimExpr The input expression to evaluate. """ if isinstance(value, str): value = StringImm(value) if isinstance(value, bool): value = cast(value, "bool") return _ffi_api.Evaluate(value) # type: ignore[attr-defined] # pylint: disable=no-member __all__ = [] for _dtype in ["Float", "UInt", "Int"]: for _size in ["8", "16", "32", "64"]: for _lanes in ["", "x4", "x8", "x16", "x32", "x64"]: _name = _dtype + _size + _lanes # pylint: disable=invalid-name def func_gen(name: str): """Generate a function for each PrimExpr dtype. Parameters ---------- name: str The ffi function name to call. """ def func( expr: Union[ None, PrimExpr, Literal["inf", "-inf", "nan"], ] = None ) -> PrimExpr: if isinstance(expr, str): expr = float(expr) return getattr(_ffi_api, name)(expr) return func globals()[_name.lower()] = func_gen(_name) __all__.append(_name.lower()) def boolean(expr: Optional[PrimExpr] = None) -> PrimExpr: """Construct a new tir.Var with type boolean or cast expression to type boolean. Parameters ---------- expr: PrimExpr The expression to be cast. Returns ------- res : PrimExpr The new tir.Var with type boolean or casted expression with type boolean. """ return _ffi_api.Boolean(expr) # type: ignore[attr-defined] # pylint: disable=no-member def handle(expr: Optional[PrimExpr] = None) -> PrimExpr: """Construct a new tir.Var with type handle or cast expression to type handle. Parameters ---------- expr: PrimExpr The expression to be cast. Returns ------- res : PrimExpr The new tir.Var with type handle or casted expression with type handle. """ return _ffi_api.Handle(expr) # type: ignore[attr-defined] # pylint: disable=no-member def void(expr: Optional[PrimExpr] = None) -> PrimExpr: """Construct a new tir.Var with type void or cast expression to type void. Parameters ---------- expr: PrimExpr The expression to be cast. Returns ------- res : PrimExpr The new tir.Var with type void or casted expression with type void. """ return _ffi_api.Void(expr) # type: ignore[attr-defined] # pylint: disable=no-member def var(dtype: str, name: str = "") -> Var: """Construct a new tir.Var. Parameters ---------- dtype: str The dtype of the Var. name: str The name of the Var. Returns ------- res : Var The result tir.Var. """ return Var(name, dtype) # pylint: disable=no-member def ptr(dtype: str, storage_scope: str = "global") -> Var: """The pointer declaration function. Parameters ---------- dtype : str The data type of the pointer. storage_scope : str The storage scope of the pointer. Returns ------- res : Var The pointer. """ return _ffi_api.Ptr(dtype, storage_scope) # type: ignore[attr-defined] # pylint: disable=no-member def min(a: PrimExpr, b: PrimExpr) -> PrimExpr: # pylint: disable=redefined-builtin """Compute the minimum value of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. """ return _ffi_api.min(a, b) # type: ignore[attr-defined] # pylint: disable=no-member def max(a: PrimExpr, b: PrimExpr) -> PrimExpr: # pylint: disable=redefined-builtin """Compute the maximum value of two expressions. Parameters ---------- a : PrimExpr The left hand operand b : PrimExpr The right hand operand Returns ------- res : PrimExpr The result expression. """ return _ffi_api.max(a, b) # type: ignore[attr-defined] # pylint: disable=no-member def iter_var(v: Union[Var, str], dom: Range, iter_type: str, thread_tag: str) -> IterVar: """The iteration variable. Parameters ---------- var : Union[Var, str] The internal variable that is used for iteration. dom : Range The domain of the iteration. iter_type : str The iteration type. thread_tag : str The thread type tag. Returns ------- res : IterVar The iteration variable. """ iter_type = getattr(IterVar, iter_type) return IterVar(dom, v, iter_type, thread_tag) def comm_reducer(combiner: Callable, identity: List[PrimExpr]) -> CommReducer: """ Create a CommReducer from lambda inputs/outputs and the identities Parameters ---------- combiner : Callable A binary function which takes two PrimExpr as input to return a PrimExpr. identity : List[PrimExpr] A list of types of output PrimExpr. Returns ------- res : CommReducer The CommReducer. """ params = inspect.signature(combiner).parameters num_args = len(params) args = [] for name, i in zip(params.keys(), identity + identity): if isinstance(i, int): args.append(Var(name, "int32")) else: args.append(Var(name, i.dtype)) res = combiner(*args) if not isinstance(res, tuple): res = (res,) return CommReducer(args[: num_args // 2], args[num_args // 2 :], res, identity) def target(target_config: Union[Dict, str]) -> Target: """ Create a target Parameters ---------- target_config : Union[Dict, str] The target configuration. Returns ------- res : Target The target. """ if not isinstance(target_config, (str, dict)): raise ValueError( f"T.target expected a config dict or string, but got {type(target_config)}" ) return Target(target_config) def _op_wrapper(func): @functools.wraps(func) def wrapped(*args, **kwargs): if "dtype" in kwargs: kwargs.pop("dtype") return func(*args, **kwargs) return wrapped def _dtype_forward(func): @functools.wraps(func) def wrapped(*args, **kwargs): if "dtype" in kwargs: args = (kwargs.pop("dtype"),) + args return func(*args, **kwargs) return wrapped # pylint: disable=invalid-name broadcast = Broadcast ramp = Ramp buffer_var = ptr abs = _op_wrapper(_tir_op.abs) # pylint: disable=redefined-builtin fabs = abs acos = _op_wrapper(_tir_op.acos) acosh = _op_wrapper(_tir_op.acosh) address_of = _op_wrapper(_tir_op.address_of) asin = _op_wrapper(_tir_op.asin) asinh = _op_wrapper(_tir_op.asinh) atan = _op_wrapper(_tir_op.atan) atan2 = _op_wrapper(_tir_op.atan2) atanh = _op_wrapper(_tir_op.atanh) ceil = _op_wrapper(_tir_op.ceil) clz = _op_wrapper(_tir_op.clz) copysign = _op_wrapper(_tir_op.copysign) cos = _op_wrapper(_tir_op.cos) cosh = _op_wrapper(_tir_op.cosh) erf = _op_wrapper(_tir_op.erf) exp = _op_wrapper(_tir_op.exp) exp2 = _op_wrapper(_tir_op.exp2) exp10 = _op_wrapper(_tir_op.exp10) floor = _op_wrapper(_tir_op.floor) ceildiv = _op_wrapper(_tir_op.ceildiv) floordiv = _op_wrapper(_tir_op.floordiv) floormod = _op_wrapper(_tir_op.floormod) fmod = _op_wrapper(_tir_op.fmod) hypot = _op_wrapper(_tir_op.hypot) if_then_else = _op_wrapper(_tir_op.if_then_else) infinity = _op_wrapper(_tir_op.infinity) isfinite = _op_wrapper(_tir_op.isfinite) isinf = _op_wrapper(_tir_op.isinf) isnan = _op_wrapper(_tir_op.isnan) isnullptr = _op_wrapper(_tir_op.isnullptr) ldexp = _op_wrapper(_tir_op.ldexp) likely = _op_wrapper(_tir_op.likely) log = _op_wrapper(_tir_op.log) log1p = _op_wrapper(_tir_op.log1p) log2 = _op_wrapper(_tir_op.log2) log10 = _op_wrapper(_tir_op.log10) lookup_param = _op_wrapper(_tir_op.lookup_param) max_value = _op_wrapper(_tir_op.max_value) min_value = _op_wrapper(_tir_op.min_value) nearbyint = _op_wrapper(_tir_op.nearbyint) nextafter = _op_wrapper(_tir_op.nextafter) popcount = _op_wrapper(_tir_op.popcount) power = _op_wrapper(_tir_op.power) q_multiply_shift = _op_wrapper(_tir_op.q_multiply_shift) q_multiply_shift_per_axis = _op_wrapper(_tir_op.q_multiply_shift_per_axis) ret = _op_wrapper(_tir_op.ret) reinterpret = _dtype_forward(_tir_op.reinterpret) round = _op_wrapper(_tir_op.round) # pylint: disable=redefined-builtin rsqrt = _op_wrapper(_tir_op.rsqrt) shift_left = _op_wrapper(_tir_op.shift_left) shift_right = _op_wrapper(_tir_op.shift_right) sigmoid = _op_wrapper(_tir_op.sigmoid) sin = _op_wrapper(_tir_op.sin) sinh = _op_wrapper(_tir_op.sinh) sqrt = _op_wrapper(_tir_op.sqrt) tan = _op_wrapper(_tir_op.tan) tanh = _op_wrapper(_tir_op.tanh) trunc = _op_wrapper(_tir_op.trunc) truncdiv = _op_wrapper(_tir_op.truncdiv) truncmod = _op_wrapper(_tir_op.truncmod) tvm_access_ptr = _op_wrapper(_tir_op.tvm_access_ptr) tvm_throw_last_error = _op_wrapper(_tir_op.tvm_throw_last_error) tvm_stack_alloca = _op_wrapper(_tir_op.tvm_stack_alloca) tvm_stack_make_shape = _op_wrapper(_tir_op.tvm_stack_make_shape) tvm_stack_make_array = _op_wrapper(_tir_op.tvm_stack_make_array) tvm_check_return = _op_wrapper(_tir_op.tvm_check_return) call_packed = _op_wrapper(_tir_op.call_packed) call_cpacked = _op_wrapper(_tir_op.call_cpacked) call_packed_lowered = _op_wrapper(_tir_op.call_packed_lowered) call_cpacked_lowered = _op_wrapper(_tir_op.call_cpacked_lowered) call_extern = _dtype_forward(_tir_op.call_extern) call_intrin = _dtype_forward(_tir_op.call_intrin) call_llvm_intrin = _dtype_forward(_tir_op.call_llvm_intrin) call_llvm_pure_intrin = _dtype_forward(_tir_op.call_llvm_pure_intrin) call_pure_extern = _dtype_forward(_tir_op.call_pure_extern) tvm_tuple = _op_wrapper(_tir_op.tvm_tuple) tvm_struct_set = _op_wrapper(_tir_op.tvm_struct_set) tvm_struct_get = _tir_op.tvm_struct_get tvm_thread_allreduce = _op_wrapper(_tir_op.tvm_thread_allreduce) tvm_load_matrix_sync = _op_wrapper(_tir_op.tvm_load_matrix_sync) tvm_mma_sync = _op_wrapper(_tir_op.tvm_mma_sync) tvm_bmma_sync = _op_wrapper(_tir_op.tvm_bmma_sync) tvm_fill_fragment = _op_wrapper(_tir_op.tvm_fill_fragment) tvm_store_matrix_sync = _op_wrapper(_tir_op.tvm_store_matrix_sync) ptx_mma = _dtype_forward(_tir_op.ptx_mma) ptx_mma_sp = _dtype_forward(_tir_op.ptx_mma_sp) ptx_ldmatrix = _dtype_forward(_tir_op.ptx_ldmatrix) ptx_cp_async = _dtype_forward(_tir_op.ptx_cp_async) ptx_wait_group = _op_wrapper(_tir_op.ptx_wait_group) ptx_commit_group = _op_wrapper(_tir_op.ptx_commit_group) mma_store = _dtype_forward(_tir_op.mma_store) mma_fill = _dtype_forward(_tir_op.mma_fill) vectorlow = _dtype_forward(_tir_op.vectorlow) vectorhigh = _dtype_forward(_tir_op.vectorhigh) vectorcombine = _dtype_forward(_tir_op.vectorcombine) assume = _op_wrapper(_tir_op.assume) undef = _op_wrapper(_tir_op.undef) tvm_call_packed = call_packed tvm_call_cpacked = call_cpacked tvm_call_packed_lowered = call_packed_lowered tvm_call_cpacked_lowered = call_cpacked_lowered TVMBackendAllocWorkspace = _op_wrapper(_tir_op.TVMBackendAllocWorkspace) TVMBackendFreeWorkspace = _op_wrapper(_tir_op.TVMBackendFreeWorkspace) start_profile_intrinsic = _op_wrapper(_tir_op.start_profile_intrinsic) end_profile_intrinsic = _op_wrapper(_tir_op.end_profile_intrinsic) class meta_var: """A meta variable used in TVMScript metaprogramming. It means that the value of the variable does not appear in the final TIR, but only stays in the parser. Parameters ---------- value: Any The meta variable. """ def __init__(self, value: Any) -> None: self.value = value def __iter__(self): def f(): for i in self.value: yield meta_var(i) return f() # pylint: enable=invalid-name __all__ += [ "buffer_decl", "prim_func", "arg", "func_name", "func_attr", "func_ret", "match_buffer", "preflattened_buffer", "block", "init", "where", "reads", "writes", "block_attr", "alloc_buffer", "axis", "serial", "parallel", "vectorized", "unroll", "thread_binding", "grid", "Assert", "let", "realize", "allocate", "allocate_const", "attr", "While", "If", "Then", "Else", "decl_buffer", "launch_thread", "env_thread", "buffer_store", "prefetch", "evaluate", "boolean", "handle", "void", "var", "ptr", "min", "max", "iter_var", "comm_reducer", "target", "buffer_var", "abs", "fabs", "acos", "acosh", "address_of", "asin", "asinh", "atan", "atan2", "atanh", "ceil", "clz", "copysign", "cos", "cosh", "erf", "exp", "exp2", "exp10", "floor", "ceildiv", "floordiv", "floormod", "fmod", "hypot", "if_then_else", "infinity", "isfinite", "isinf", "isnan", "isnullptr", "ldexp", "likely", "log", "log1p", "log2", "log10", "lookup_param", "max_value", "min_value", "nearbyint", "nextafter", "popcount", "power", "q_multiply_shift", "q_multiply_shift_per_axis", "ret", "reinterpret", "round", "rsqrt", "shift_left", "shift_right", "sigmoid", "sin", "sinh", "sqrt", "tan", "tanh", "trunc", "truncdiv", "truncmod", "tvm_access_ptr", "tvm_throw_last_error", "tvm_stack_alloca", "tvm_stack_make_shape", "tvm_stack_make_array", "tvm_check_return", "call_packed", "call_cpacked", "call_packed_lowered", "call_cpacked_lowered", "call_extern", "call_intrin", "call_llvm_intrin", "call_llvm_pure_intrin", "call_pure_extern", "tvm_tuple", "tvm_struct_set", "tvm_struct_get", "tvm_thread_allreduce", "tvm_load_matrix_sync", "tvm_mma_sync", "tvm_bmma_sync", "tvm_fill_fragment", "tvm_store_matrix_sync", "ptx_mma", "ptx_mma_sp", "ptx_ldmatrix", "ptx_cp_async", "ptx_wait_group", "ptx_commit_group", "mma_store", "mma_fill", "vectorlow", "vectorhigh", "vectorcombine", "assume", "undef", "tvm_call_packed", "tvm_call_cpacked", "tvm_call_packed_lowered", "tvm_call_cpacked_lowered", "TVMBackendAllocWorkspace", "TVMBackendFreeWorkspace", "start_profile_intrinsic", "end_profile_intrinsic", "meta_var", "llvm_lookup_intrinsic_id", "type_annotation", "broadcast", "ramp", "cast", # tvm.tir.expr "Var", "SizeVar", "Reduce", "FloatImm", "IntImm", "StringImm", "Cast", "Add", "Sub", "Mul", "Div", "Mod", "FloorDiv", "FloorMod", "Min", "Max", "EQ", "NE", "LT", "LE", "GT", "GE", "And", "Or", "Not", "Select", "BufferLoad", "ProducerLoad", "Load", "Ramp", "Broadcast", "Shuffle", "Call", "CallEffectKind", "Let", "IterVar", "CommReducer", ]
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the Licens. """The parser""" from . import _core, ir, tir from ._core import parse from .ir import ir_module from .tir import prim_func
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/_core.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the Licens. """The core parser infra""" # pylint: disable=unused-import from .core import dispatch, doc, utils from .core.dispatch import OpMethod, register_op from .core.entry import parse from .core.parser import Parser
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The core parser infra""" from . import diagnostics, dispatch, doc, doc_core, entry, evaluator, parser, utils
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/diagnostics.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Source and diagnostics""" import inspect import sys from typing import Union from tvm.ir import IRModule, SourceName, Span, diagnostics from . import doc class Source: """Source code class for TVMScript. It is constructed by source code str or doc AST tree. Parameters ---------- source_name : str The filename of the file where the source code locates. start_line : int The first line number of the source code. start_column : int The first column number of the first line of the source code. source : str The source code str of source code. full_source : str The complete source code of the file where the source code locates. """ source_name: str start_line: int start_column: int source: str full_source: str def __init__(self, program: Union[str, doc.AST]): if isinstance(program, str): self.source_name = "<str>" self.start_line = 1 self.start_column = 0 self.source = program self.full_source = program return self.source_name = inspect.getsourcefile(program) # type: ignore lines, self.start_line = getsourcelines(program) # type: ignore if lines: self.start_column = len(lines[0]) - len(lines[0].lstrip()) else: self.start_column = 0 if self.start_column and lines: self.source = "\n".join([l[self.start_column :].rstrip() for l in lines]) else: self.source = "".join(lines) try: # It will cause a problem when running in Jupyter Notebook. # `mod` will be <module '__main__'>, which is a built-in module # and `getsource` will throw a TypeError mod = inspect.getmodule(program) if mod: self.full_source = inspect.getsource(mod) else: self.full_source = self.source except TypeError: # It's a work around for Jupyter problem. # Since `findsource` is an internal API of inspect, we just use it # as a fallback method. src, _ = inspect.findsource(program) # type: ignore self.full_source = "".join(src) def as_ast(self) -> doc.AST: """Parse the source code into AST. Returns ------- res : doc.AST The AST of source code. """ return doc.parse(self.source) _getfile = inspect.getfile # pylint: disable=invalid-name _findsource = inspect.findsource # pylint: disable=invalid-name def _patched_inspect_getfile(obj): """Work out which source or compiled file an object was defined in.""" if not inspect.isclass(obj): return _getfile(obj) mod = getattr(obj, "__module__", None) if mod is not None: file = getattr(sys.modules[mod], "__file__", None) if file is not None: return file for _, member in inspect.getmembers(obj): if inspect.isfunction(member): if obj.__qualname__ + "." + member.__name__ == member.__qualname__: return inspect.getfile(member) raise TypeError(f"Source for {obj:!r} not found") def findsource(obj): """Return the entire source file and starting line number for an object.""" import linecache # pylint: disable=import-outside-toplevel if not inspect.isclass(obj): return _findsource(obj) file = inspect.getsourcefile(obj) if file: linecache.checkcache(file) else: file = inspect.getfile(obj) if not (file.startswith("<") and file.endswith(">")): raise OSError("source code not available") module = inspect.getmodule(obj, file) if module: lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) if not lines: raise OSError("could not get source code") qual_names = obj.__qualname__.replace(".<locals>", "<locals>").split(".") in_comment = 0 scope_stack = [] indent_info = {} for i, line in enumerate(lines): n_comment = line.count('"""') if n_comment: # update multi-line comments status in_comment = in_comment ^ (n_comment & 1) continue if in_comment: # skip lines within multi-line comments continue indent = len(line) - len(line.lstrip()) tokens = line.split() if len(tokens) > 1: name = None if tokens[0] == "def": name = tokens[1].split(":")[0].split("(")[0] + "<locals>" elif tokens[0] == "class": name = tokens[1].split(":")[0].split("(")[0] if name: while scope_stack and indent_info[scope_stack[-1]] >= indent: scope_stack.pop() scope_stack.append(name) indent_info[name] = indent if scope_stack == qual_names: return lines, i raise OSError("could not find class definition") def getsourcelines(obj): """Extract the block of code at the top of the given list of lines.""" obj = inspect.unwrap(obj) lines, l_num = findsource(obj) return inspect.getblock(lines[l_num:]), l_num + 1 inspect.getfile = _patched_inspect_getfile class Diagnostics: """Diagnostics class for error reporting in parser. Parameters ---------- source : Source The source code. ctx : diagnostics.DiagnosticContext The diagnostic context for diagnostics. """ source: Source ctx: diagnostics.DiagnosticContext def __init__(self, source: Source): mod = IRModule() mod.source_map.add(source.source_name, source.full_source) self.source = source self.ctx = diagnostics.DiagnosticContext(mod, diagnostics.get_renderer()) def _emit(self, node: doc.AST, message: str, level: diagnostics.DiagnosticLevel) -> None: """Emit a diagnostic. Parameters ---------- node : doc.AST The node with diagnostic information. message : str The diagnostic message. level : diagnostics.DiagnosticLevel The diagnostic level. """ lineno = node.lineno or self.source.start_line col_offset = node.col_offset or self.source.start_column end_lineno = node.end_lineno or lineno end_col_offset = node.end_col_offset or col_offset lineno += self.source.start_line - 1 end_lineno += self.source.start_line - 1 col_offset += self.source.start_column + 1 end_col_offset += self.source.start_column + 1 self.ctx.emit( diagnostics.Diagnostic( level=level, span=Span( source_name=SourceName(self.source.source_name), line=lineno, end_line=end_lineno, column=col_offset, end_column=end_col_offset, ), message=message, ) ) def error(self, node: doc.AST, message: str) -> None: """Emit a diagnostic error. Parameters ---------- node : doc.AST The node with diagnostic error. message : str The diagnostic message. """ self._emit(node, message, diagnostics.DiagnosticLevel.ERROR) self.ctx.render()
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/dispatch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Parser dispatching infrastructure""" from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type from .doc import AST if TYPE_CHECKING: from .parser import Parser ParseMethod = Callable[["Parser", AST], None] ParseVTable: Dict[Tuple[str, str], ParseMethod] = {} OpMethod = Callable[..., Any] OpVTable: Dict[Tuple[Type, AST, int], OpMethod] = {} def register(token: str, type_name: str): """Register a method for a dispatch token and type name. Parameters ---------- token : str The token for IR, e.g., T for TIR and R for Relax. type_name : str The type name of AST node, e.g., FunctionDef, With, For. Returns ------- func : callable The function to register dispatched method of parsing corresponding token and AST node type. """ def func(method: ParseMethod): """Register a method in parser virtual table. Parameters ---------- method : ParseMethod The dispatched method to be registered in parser virtual table. """ ParseVTable[(token, type_name)] = method return func def get( token: str, type_name: str, default: Optional[ParseMethod] = None, ) -> Optional[ParseMethod]: """Get a registered method for a dispatch token and type name, or return a default method if no registered methods with this dispatch token and type name. Parameters ---------- token : str The token for IR, e.g., T for TIR and R for Relax. type_name : str The type name of AST node, e.g., FunctionDef, With, For. default : Optional[ParseMethod] The default method when no registered methods with this dispatch token and type name. Returns ------- func : Optional[ParseMethod] The dispatched method of parsing corresponding token and AST node type. """ return ParseVTable.get((token, type_name), default) def register_op(operand_type: Type, op_node_type: AST, operand_index: int): """Register a method for a operand type, AST operator node and operand index. Parameters ---------- operand_type : Type The type of operands, e.g., tir.PrimExpr, tir.IterVar. op_node_type : AST The doc AST operator node type, e.g., doc.Add, doc.Eq. operand_index : int The operand index, i.e., 0 for left operand and 1 for right operand. Returns ------- func : callable The function to register dispatched method of parsing corresponding a operand type, AST operator node and operand index. """ def func(method: OpMethod): """Register a method in parser operator virtual table. Parameters ---------- method : ParseMethod The dispatched method to be registered in parser operator virtual table. """ OpVTable[(operand_type, op_node_type, operand_index)] = method return func def get_op( operand_type: Type, op_node_type: Type, operand_index: int, default: Optional[OpMethod] = None, ) -> Optional[OpMethod]: """Register a method for a operand type, AST operator node and operand index. Parameters ---------- operand_type : Type The type of operands, e.g., tir.PrimExpr, tir.IterVar. op_node_type : AST The doc AST operator node type, e.g., doc.Add, doc.Eq. operand_index : int The operand index, i.e., 0 for left operand and 1 for right operand. default : Optional[OpMethod] The default method when no registered methods with this operand type, AST operator node and operand index. Returns ------- func : Optional[OpMethod] The function to register dispatched method of parsing corresponding a operand type, AST operator node and operand index. """ return OpVTable.get((operand_type, op_node_type, operand_index), default)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/doc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser doc AST""" import ast import inspect import sys import typing from collections import defaultdict from . import doc_core as doc from .doc_core import * # pylint: disable=unused-import,wildcard-import,redefined-builtin,W0614 FnToDoc = typing.Callable[[ast.AST], doc.AST] FnFromDoc = typing.Callable[[doc.AST], ast.AST] class Entry: """Mapping entry between python AST node type str and doc AST. Parameters ---------- to_doc : typing.Optional[FnToDoc] The callable methods for converting python AST node to doc AST. from_doc : typing.Optional[FnFromDoc] The callable methods for converting doc AST to python AST node. """ to_doc: typing.Optional[FnToDoc] from_doc: typing.Optional[FnFromDoc] def __init__(self): self.to_doc = None self.from_doc = None class Registry: """Registration map from python AST node type str to methods of conversion between python AST node and doc AST node. Parameters ---------- _inst : typing.Optional["Registry"] The instance of Registry. table : typing.Dict[str, Entry] The registration map from python AST node type str to methods of conversion between python AST node and doc AST node. """ _inst: typing.Optional["Registry"] = None table: typing.Dict[str, Entry] def __init__(self): self.table = defaultdict(Entry) def register_to_doc(name: str): """Register the to_doc method for python AST node type. Parameters ---------- name : str The type of python AST node. Returns ------- f : Callable[[FnToDoc], None] The function of registering the to_doc method for python AST node type. """ def f(to_doc: FnToDoc): # pylint: disable=redefined-outer-name reg = Registry._inst # pylint: disable=protected-access reg.table[name].to_doc = to_doc return f def register_from_doc(name: str): """Register the from_doc method for python AST node type. Parameters ---------- name : str The type of python AST node. Returns ------- f : Callable[[FnFromDoc], None] The function of registering the from_doc method for python AST node type. """ def f(to_doc: FnFromDoc): # pylint: disable=redefined-outer-name reg = Registry._inst # pylint: disable=protected-access reg.table[name].from_doc = to_doc return f def _is_atomic_type(node): return ( node is None or node in [..., True, False] or isinstance( node, ( int, float, str, bool, bytes, complex, ), ) ) def _get_registry_entry(cls_name, attr): cls_name = cls_name.split(".")[-1] reg = Registry._inst # pylint: disable=protected-access if cls_name in reg.table: entry = reg.table[cls_name] return getattr(entry, attr, None) return None def from_doc(node): """Get original python AST node from doc AST node. Parameters ---------- node : doc.AST The doc AST node. Returns ------- res : ast.AST The corresponding AST node. """ if _is_atomic_type(node): return node if isinstance(node, tuple): return tuple(from_doc(n) for n in node) if isinstance(node, list): return [from_doc(n) for n in node] func = _get_registry_entry(node.__class__.__name__, "from_doc") if not func: raise NotImplementedError(f"from_doc is not implemented for: {node.__class__.__name__}") return func(node) def to_doc(node): """Get doc AST node from python AST node. Parameters ---------- node : ast.AST The AST node. Returns ------- res : doc.AST The corresponding doc AST node. """ if _is_atomic_type(node): return node if isinstance(node, tuple): return tuple(to_doc(n) for n in node) if isinstance(node, list): return [to_doc(n) for n in node] func = _get_registry_entry(node.__class__.__name__, "to_doc") if not func: raise NotImplementedError(f"to_doc is not implemented for: {node.__class__.__name__}") return func(node) def parse( source: str, filename: str = "<unknown>", mode: str = "exec", ) -> doc.AST: """Parse TVMScript source code str to doc AST. Its interface is consistent with python built-in ast.parse. And it will parse by python 3.8 first if possible, or it will parse with python version in current environment. Parameters ---------- source : str The TVMScript source code. filename : str The optional filename of the file where source code locates. mode : str The parsing mode for ast.parse. Returns ------- res : doc.AST The parsed doc AST. """ try: program = ast.parse( # pylint: disable=unexpected-keyword-arg source=source, filename=filename, mode=mode, feature_version=(3, 8), ) except: # pylint: disable=bare-except program = ast.parse( source=source, filename=filename, mode=mode, ) return to_doc(program) class NodeVisitor: """Node visitor for doc AST""" def visit(self, node: doc.AST) -> None: if isinstance(node, (list, tuple)): for item in node: self.visit(item) return if not isinstance(node, doc.AST): return getattr( self, "visit_" + node.__class__.__name__.split(".")[-1], self.generic_visit, )(node) def generic_visit(self, node: doc.AST) -> None: for field in node.__class__._FIELDS: # pylint: disable=protected-access value = getattr(node, field, None) if value is None: pass elif isinstance(value, (doc.AST, list, tuple)): self.visit(value) class NodeTransformer: """Node transformer for doc AST""" def visit(self, node: doc.AST) -> doc.AST: if isinstance(node, list): return [self.visit(item) for item in node] if isinstance(node, tuple): return tuple(self.visit(item) for item in node) if not isinstance(node, doc.AST): return node return getattr( self, "visit_" + node.__class__.__name__.split(".")[-1], self.generic_visit, )(node) def generic_visit(self, node: doc.AST) -> doc.AST: kv: typing.Dict[str, typing.Any] = {} for field in node.__class__._FIELDS: # pylint: disable=protected-access value = getattr(node, field, None) if value is None: pass elif isinstance(value, (doc.AST, list, tuple)): value = self.visit(value) kv[field] = value return node.__class__(**kv) def _register_default(): class DefaultTranslator: def __init__(self, doc_cls, func, fields): self.doc_cls = doc_cls # getattr(doc, name) self.func = func self.fields = fields def __call__(self, node): kv = {attr: self.func(getattr(node, attr, None)) for attr in self.fields} return self.doc_cls(**kv) Registry._inst = Registry() # pylint: disable=protected-access for cls_name in dir(doc): doc_cls = getattr(doc, cls_name) if not hasattr(ast, cls_name): continue if inspect.isclass(doc_cls) and issubclass(doc_cls, doc.AST): assert "." not in cls_name register_to_doc(cls_name)( DefaultTranslator( getattr(doc, cls_name), to_doc, doc_cls._FIELDS, # pylint: disable=protected-access ) ) register_from_doc(cls_name)( DefaultTranslator( getattr(ast, cls_name), from_doc, doc_cls._FIELDS, # pylint: disable=protected-access ) ) def _py_version() -> typing.Tuple[int, int]: return (sys.version_info.major, sys.version_info.minor) def _register_constant_handling(): if _py_version() not in [(3, 6), (3, 7)]: return def as_constant(f) -> doc.Constant: def to_doc_func(x: ast.AST) -> doc.Constant: return doc.Constant( value=getattr(x, f) if isinstance(f, str) else f(x), kind=None, s=None, n=None, lineno=x.lineno, col_offset=x.col_offset, end_lineno=x.lineno, end_col_offset=x.col_offset, ) return to_doc_func register_to_doc("Str")(as_constant("s")) register_to_doc("NameConstant")(as_constant("value")) register_to_doc("Num")(as_constant("n")) register_to_doc("Bytes")(as_constant("s")) register_to_doc("Ellipsis")(as_constant(lambda _: ...)) def _register_subscription_handling(): if _py_version() >= (3, 9): return def subscript_to_doc(x: ast.Subscript) -> doc.Subscript: if isinstance(x.slice, ast.Slice): return doc.Subscript( value=to_doc(x.value), slice=doc.Slice( lower=to_doc(x.slice.lower), upper=to_doc(x.slice.upper), step=to_doc(x.slice.step), lineno=getattr(x.slice, "lineno", None), col_offset=getattr(x.slice, "col_offset", None), end_lineno=getattr(x.slice, "end_lineno", None), end_col_offset=getattr(x.slice, "end_col_offset", None), ), ctx=to_doc(x.ctx), lineno=getattr(x, "lineno", None), col_offset=getattr(x, "col_offset", None), end_lineno=getattr(x, "end_lineno", None), end_col_offset=getattr(x, "end_col_offset", None), ) if isinstance(x.slice, ast.ExtSlice): return doc.Subscript( value=to_doc(x.value), slice=doc.Tuple( elts=[to_doc(i) for i in x.slice.dims], ctx=doc.Load( lineno=None, col_offset=None, end_lineno=None, end_col_offset=None, ), lineno=getattr(x, "lineno", None), col_offset=getattr(x, "col_offset", None), end_lineno=getattr(x, "end_lineno", None), end_col_offset=getattr(x, "end_col_offset", None), ), ctx=to_doc(x.ctx), lineno=getattr(x, "lineno", None), col_offset=getattr(x, "col_offset", None), end_lineno=getattr(x, "end_lineno", None), end_col_offset=getattr(x, "end_col_offset", None), ) if isinstance(x.slice, ast.Index): return doc.Subscript( value=to_doc(x.value), slice=to_doc(x.slice.value), ctx=to_doc(x.ctx), lineno=getattr(x, "lineno", None), col_offset=getattr(x, "col_offset", None), end_lineno=getattr(x, "end_lineno", None), end_col_offset=getattr(x, "end_col_offset", None), ) raise TypeError(f"Unknown subscript type: {type(x.slice)}") def subscript_from_doc(x: doc.Subscript) -> ast.Subscript: if isinstance(x.slice, doc.Slice): result = ast.Subscript( value=from_doc(x.value), slice=from_doc(x.slice), ctx=from_doc(x.ctx), ) elif isinstance(x.slice, doc.Tuple): result = ast.Subscript( value=from_doc(x.value), slice=ast.ExtSlice( dims=[from_doc(i) for i in x.slice.elts], ), ctx=from_doc(x.ctx), ) else: result = ast.Subscript( value=from_doc(x.value), slice=ast.Index(value=from_doc(x.slice)), ctx=from_doc(x.ctx), ) result.lineno = x.lineno result.col_offset = x.col_offset result.end_lineno = x.end_lineno result.end_col_offset = x.end_col_offset return result register_to_doc("Subscript")(subscript_to_doc) register_from_doc("Subscript")(subscript_from_doc) def _register_index_handling(): if _py_version() >= (3, 9): return def index_to_doc(x: ast.Index) -> doc.Expr: return to_doc(x.value) def index_from_doc(x: doc.Expr) -> ast.Index: result = ast.Index(value=from_doc(x), ctx=from_doc(x.ctx)) result.lineno = x.lineno result.col_offset = x.col_offset result.end_lineno = x.end_lineno result.end_col_offset = x.end_col_offset return result register_to_doc("Index")(index_to_doc) register_from_doc("Index")(index_from_doc) _register_default() _register_constant_handling() _register_subscription_handling() _register_index_handling()
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/doc_core.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=redefined-outer-name,missing-docstring,invalid-name # pylint: disable=useless-super-delegation,redefined-builtin # pylint: disable=too-few-public-methods,too-many-arguments class AST: _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__() self.lineno = lineno self.col_offset = col_offset self.end_lineno = end_lineno self.end_col_offset = end_col_offset class mod(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Module(mod): _FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.body = body class Interactive(mod): _FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.body = body class Expression(mod): _FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.body = body class stmt(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class FunctionDef(stmt): _FIELDS = [ "name", "args", "body", "decorator_list", "returns", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, name, args, body, decorator_list, returns, lineno, col_offset, end_lineno, end_col_offset, ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.name = name self.args = args self.body = body self.decorator_list = decorator_list self.returns = returns class ClassDef(stmt): _FIELDS = [ "name", "bases", "keywords", "body", "decorator_list", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, name, bases, keywords, body, decorator_list, lineno, col_offset, end_lineno, end_col_offset, ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.name = name self.bases = bases self.keywords = keywords self.body = body self.decorator_list = decorator_list class Return(stmt): _FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value class Delete(stmt): _FIELDS = ["targets", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, targets, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.targets = targets class Assign(stmt): _FIELDS = ["targets", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, targets, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.targets = targets self.value = value class AugAssign(stmt): _FIELDS = ["target", "op", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, target, op, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.target = target self.op = op self.value = value class AnnAssign(stmt): _FIELDS = [ "target", "annotation", "value", "simple", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, target, annotation, value, simple, lineno, col_offset, end_lineno, end_col_offset ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.target = target self.annotation = annotation self.value = value self.simple = simple class For(stmt): _FIELDS = [ "target", "iter", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__(self, target, iter, body, orelse, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.target = target self.iter = iter self.body = body self.orelse = orelse class While(stmt): _FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.test = test self.body = body self.orelse = orelse class If(stmt): _FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.test = test self.body = body self.orelse = orelse class With(stmt): _FIELDS = ["items", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, items, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.items = items self.body = body class Raise(stmt): _FIELDS = ["exc", "cause", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, exc, cause, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.exc = exc self.cause = cause class Try(stmt): _FIELDS = [ "body", "handlers", "orelse", "finalbody", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, body, handlers, orelse, finalbody, lineno, col_offset, end_lineno, end_col_offset ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.body = body self.handlers = handlers self.orelse = orelse self.finalbody = finalbody class Assert(stmt): _FIELDS = ["test", "msg", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, test, msg, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.test = test self.msg = msg class Import(stmt): _FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.names = names class ImportFrom(stmt): _FIELDS = ["module", "names", "level", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, module, names, level, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.module = module self.names = names self.level = level class Global(stmt): _FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.names = names class Nonlocal(stmt): _FIELDS = ["names", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, names, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.names = names class Expr(stmt): _FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value class Pass(stmt): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Break(stmt): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Continue(stmt): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class expr(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class BoolOp(expr): _FIELDS = ["op", "values", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, op, values, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.op = op self.values = values class BinOp(expr): _FIELDS = ["left", "op", "right", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, left, op, right, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.left = left self.op = op self.right = right class UnaryOp(expr): _FIELDS = ["op", "operand", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, op, operand, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.op = op self.operand = operand class Lambda(expr): _FIELDS = ["args", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, args, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.args = args self.body = body class IfExp(expr): _FIELDS = ["test", "body", "orelse", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, test, body, orelse, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.test = test self.body = body self.orelse = orelse class Dict(expr): _FIELDS = ["keys", "values", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, keys, values, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.keys = keys self.values = values class Set(expr): _FIELDS = ["elts", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elts, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elts = elts class ListComp(expr): _FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elt = elt self.generators = generators class SetComp(expr): _FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elt = elt self.generators = generators class DictComp(expr): _FIELDS = ["key", "value", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, key, value, generators, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.key = key self.value = value self.generators = generators class GeneratorExp(expr): _FIELDS = ["elt", "generators", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elt, generators, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elt = elt self.generators = generators class Yield(expr): _FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value class YieldFrom(expr): _FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value class Compare(expr): _FIELDS = ["left", "ops", "comparators", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, left, ops, comparators, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.left = left self.ops = ops self.comparators = comparators class Call(expr): _FIELDS = ["func", "args", "keywords", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, func, args, keywords, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.func = func self.args = args self.keywords = keywords class FormattedValue(expr): _FIELDS = [ "value", "conversion", "format_spec", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, value, conversion, format_spec, lineno, col_offset, end_lineno, end_col_offset ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value self.conversion = conversion self.format_spec = format_spec class JoinedStr(expr): _FIELDS = ["values", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, values, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.values = values class Constant(expr): _FIELDS = ["value", "kind", "s", "n", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, kind, s, n, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value self.kind = kind self.s = s self.n = n class NamedExpr(expr): _FIELDS = ["target", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, target, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.target = target self.value = value class Attribute(expr): _FIELDS = ["value", "attr", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, attr, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value self.attr = attr self.ctx = ctx class slice(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Slice(slice): _FIELDS = ["lower", "upper", "step", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lower, upper, step, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.lower = lower self.upper = upper self.step = step class ExtSlice(slice): _FIELDS = ["dims", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, dims, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.dims = dims class Index(slice): _FIELDS = ["value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value class Subscript(expr): _FIELDS = ["value", "slice", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, slice, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value self.slice = slice self.ctx = ctx class Starred(expr): _FIELDS = ["value", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, value, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.value = value self.ctx = ctx class Name(expr): _FIELDS = ["id", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, id, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.id = id self.ctx = ctx class List(expr): _FIELDS = ["elts", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elts, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elts = elts self.ctx = ctx class Tuple(expr): _FIELDS = ["elts", "ctx", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, elts, ctx, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.elts = elts self.ctx = ctx class expr_context(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class AugLoad(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class AugStore(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Param(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Suite(mod): _FIELDS = ["body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.body = body class Del(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Load(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Store(expr_context): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class boolop(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class And(boolop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Or(boolop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class operator(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Add(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class BitAnd(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class BitOr(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class BitXor(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Div(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class FloorDiv(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class LShift(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Mod(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Mult(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class MatMult(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Pow(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class RShift(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Sub(operator): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class unaryop(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Invert(unaryop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Not(unaryop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class UAdd(unaryop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class USub(unaryop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class cmpop(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Eq(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Gt(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class GtE(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class In(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Is(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class IsNot(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class Lt(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class LtE(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class NotEq(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class NotIn(cmpop): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class comprehension(AST): _FIELDS = [ "target", "iter", "ifs", "is_async", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__(self, target, iter, ifs, is_async, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.target = target self.iter = iter self.ifs = ifs self.is_async = is_async class excepthandler(AST): _FIELDS = ["lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) class ExceptHandler(excepthandler): _FIELDS = ["type", "name", "body", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, type, name, body, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.type = type self.name = name self.body = body class arguments(AST): _FIELDS = [ "args", "vararg", "kwonlyargs", "kw_defaults", "kwarg", "defaults", "posonlyargs", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__( self, args, vararg, kwonlyargs, kw_defaults, kwarg, defaults, posonlyargs, lineno, col_offset, end_lineno, end_col_offset, ): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.args = args self.vararg = vararg self.kwonlyargs = kwonlyargs self.kw_defaults = kw_defaults self.kwarg = kwarg self.defaults = defaults self.posonlyargs = posonlyargs class arg(AST): _FIELDS = ["arg", "annotation", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, arg, annotation, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.arg = arg self.annotation = annotation class keyword(AST): _FIELDS = ["arg", "value", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, arg, value, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.arg = arg self.value = value class alias(AST): _FIELDS = ["name", "asname", "lineno", "col_offset", "end_lineno", "end_col_offset"] def __init__(self, name, asname, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.name = name self.asname = asname class withitem(AST): _FIELDS = [ "context_expr", "optional_vars", "lineno", "col_offset", "end_lineno", "end_col_offset", ] def __init__(self, context_expr, optional_vars, lineno, col_offset, end_lineno, end_col_offset): super().__init__(lineno, col_offset, end_lineno, end_col_offset) self.context_expr = context_expr self.optional_vars = optional_vars __all__ = [ "AST", "mod", "Module", "Interactive", "Expression", "stmt", "FunctionDef", "ClassDef", "Return", "Delete", "Assign", "AugAssign", "AnnAssign", "For", "While", "If", "With", "Raise", "Try", "Assert", "Import", "ImportFrom", "Global", "Nonlocal", "Expr", "Pass", "Break", "Continue", "expr", "BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict", "Set", "ListComp", "SetComp", "DictComp", "GeneratorExp", "Yield", "YieldFrom", "Compare", "Call", "FormattedValue", "JoinedStr", "Constant", "NamedExpr", "Attribute", "slice", "Slice", "ExtSlice", "Index", "Subscript", "Starred", "Name", "List", "Tuple", "expr_context", "AugLoad", "AugStore", "Param", "Suite", "Del", "Load", "Store", "boolop", "And", "Or", "operator", "Add", "BitAnd", "BitOr", "BitXor", "Div", "FloorDiv", "LShift", "Mod", "Mult", "MatMult", "Pow", "RShift", "Sub", "unaryop", "Invert", "Not", "UAdd", "USub", "cmpop", "Eq", "Gt", "GtE", "In", "Is", "IsNot", "Lt", "LtE", "NotEq", "NotIn", "comprehension", "excepthandler", "ExceptHandler", "arguments", "arg", "keyword", "alias", "withitem", ]
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/entry.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The entry point of TVM parser.""" from typing import Any, Dict, Union from ...ir_builder import IRBuilder from . import doc from .diagnostics import Source from .parser import Parser def parse(program: Union[doc.AST, Any, str], extra_vars: Dict[str, Any] = None) -> Any: """Register a method for a operand type, AST operator node and operand index. Parameters ---------- program : Union[doc.AST, Any, str] The TVMScript code to parse. extra_vars : Dict[str, Any] The extra variable table for parsing. Returns ------- func : Any The parsed TVMScript program. """ if extra_vars is None: from tvm.script.parser import ir # pylint: disable=import-outside-toplevel from tvm.script.parser import tir # pylint: disable=import-outside-toplevel extra_vars = { "I": ir, "ir": ir, "T": tir, "tir": tir, } source = Source(program) parser = Parser(source) with IRBuilder() as builder: parser.parse(extra_vars=extra_vars) return builder.get()
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/evaluator.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """AST Evaluation""" import ast from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union from . import dispatch, doc if TYPE_CHECKING: from .parser import Parser DEFAULT_OP: Dict[Type, Callable[..., Any]] = { doc.Add: lambda a, b: a + b, doc.Sub: lambda a, b: a - b, doc.Mult: lambda a, b: a * b, doc.Div: lambda a, b: a / b, doc.FloorDiv: lambda a, b: a // b, doc.Mod: lambda a, b: a % b, doc.LShift: lambda a, b: a << b, doc.RShift: lambda a, b: a >> b, doc.BitOr: lambda a, b: a | b, doc.BitXor: lambda a, b: a ^ b, doc.BitAnd: lambda a, b: a & b, doc.MatMult: lambda a, b: a @ b, doc.Pow: lambda a, b: a**b, doc.Eq: lambda a, b: a == b, doc.NotEq: lambda a, b: a != b, doc.Lt: lambda a, b: a < b, doc.LtE: lambda a, b: a <= b, doc.Gt: lambda a, b: a > b, doc.GtE: lambda a, b: a >= b, doc.Is: lambda a, b: a is b, doc.IsNot: lambda a, b: a is not b, doc.In: lambda a, b: a in b, doc.NotIn: lambda a, b: a not in b, doc.And: lambda a, b: a and b, doc.Or: lambda a, b: a or b, doc.Invert: lambda a: ~a, doc.Not: lambda a: not a, doc.UAdd: lambda a: +a, doc.USub: lambda a: -a, } class ExprEvaluator: """Expression evaluator for TVMScript parser. Parameters ---------- parser : Parser The parser bound with the evaluator. value_table : Dict[str, Any] The value table for expression evaluation. new_value_count : int The count for ntermediate result added during evaluation. """ parser: "Parser" value_table: Dict[str, Any] new_value_count: int def __init__(self, parser: "Parser", value_table: Dict[str, Any]) -> None: super().__init__() self.parser = parser self.value_table = value_table self.new_value_count = 0 @staticmethod def eval(parser: "Parser", value_table: Dict[str, Any], node: doc.AST) -> Any: """Expression evaluation for TVMScript parser. Parameters ---------- parser : Parser The parser bound with the evaluator. value_table : Dict[str, Any] The value table for expression evaluation. node : doc.AST The root node of AST tree node of expression to evaluate. Returns ------- res : Any The evaluation result. """ self = ExprEvaluator(parser, value_table) result = self._visit(node) # pylint: disable=protected-access if isinstance(result, doc.Name): if result.id not in self.value_table: self.parser.report_error(result, f"Undefined variable: {result.id}") return self.value_table[result.id] if isinstance(result, doc.Constant): return result.value raise TypeError(f"Unexpected result type: {type(result)}") def _add_intermediate_result(self, value: Any) -> doc.Name: """Add intermediate result during evaluation into value table. Parameters ---------- value : Any The intermediate result. Returns ------- name : doc.Name The doc AST name node with intermediate name for intermediate result. """ name = f"__tvm_tmp_value_{self.new_value_count}" self.new_value_count += 1 self.value_table[name] = value lineno = 0 col_offset = 0 return doc.Name( id=name, ctx=doc.Load( lineno=lineno, col_offset=col_offset, end_lineno=None, end_col_offset=None, ), lineno=lineno, col_offset=col_offset, end_lineno=None, end_col_offset=None, ) def _visit(self, node: doc.AST) -> Any: """General doc AST node visiting method for expression evaluation. Parameters ---------- node : doc.AST The root node of AST tree node of expression to evaluate. Returns ------- res : Any The evaluation result. """ if isinstance(node, list): return [self._visit(n) for n in node] if isinstance(node, tuple): return tuple(self._visit(n) for n in node) assert isinstance(node, doc.AST) if isinstance(node, doc.Name): if node.id not in self.value_table: self.parser.report_error(node, f"Undefined variable: {node.id}") return node if isinstance( node, ( doc.Constant, doc.expr_context, doc.operator, doc.boolop, doc.unaryop, doc.cmpop, ), ): return node if not isinstance(node, (doc.expr, doc.slice)): return node if isinstance(node, doc.Lambda): return self._eval_lambda(node) fields = {} for field in node.__class__._FIELDS: # pylint: disable=protected-access attr = getattr(node, field) if isinstance(attr, (doc.AST, tuple, list)): fields[field] = self._visit(attr) else: fields[field] = attr try: if isinstance(node, doc.BoolOp): value = self._eval_bool_op(fields) elif isinstance(node, doc.Compare): value = self._eval_compare(fields) elif isinstance(node, doc.UnaryOp): value = self._eval_unary_op(fields) elif isinstance(node, doc.BinOp): value = self._eval_bin_op(fields) elif isinstance(node, doc.Slice): value = self._eval_slice(fields) else: value = self._eval_expr(node.__class__(**fields)) except Exception as e: # pylint: disable=broad-except,invalid-name self.parser.report_error(node, str(e)) return self._add_intermediate_result(value) def _eval_lambda(self, node: doc.Lambda) -> Any: """The doc AST lambda node evaluating method. Parameters ---------- node : doc.Lambda The root node of AST tree node of expression to evaluate. Returns ------- res : Any The evaluation result. """ try: value = self._eval_expr(node) except Exception as e: # pylint: disable=broad-except,invalid-name self.parser.report_error(node, str(e)) return self._add_intermediate_result(value) def _eval_bool_op(self, fields: Dict[str, Any]) -> Any: """The doc AST boolean operator node evaluating method. Parameters ---------- fields : Dict[str, Any] The dictionary of boolean operation information, e.g., operator types, operand values. Returns ------- res : Any The evaluation result. """ op = fields["op"] if not isinstance(op, (doc.And, doc.Or)): raise TypeError(f"Unexpected operator: {op}") value = self._eval_expr(fields["values"][0]) for rhs in fields["values"][1:]: value = _eval_op(op, values=[value, self._eval_expr(rhs)]) return value def _eval_compare(self, fields: Dict[str, Any]) -> Any: """The doc AST comparison operation node evaluating method. Parameters ---------- fields : Dict[str, Any] The dictionary of comparison operation information, e.g., operator types, operand values. Returns ------- res : Any The evaluation result. """ value = self._eval_expr(fields["left"]) for op, rhs in zip(fields["ops"], fields["comparators"]): value = _eval_op(op, values=[value, self._eval_expr(rhs)]) return value def _eval_unary_op(self, fields: Dict[str, Any]) -> Any: """The doc AST unary operation node evaluating method. Parameters ---------- fields : Dict[str, Any] The dictionary of unary operation information, e.g., operator types, operand values. Returns ------- res : Any The evaluation result. """ value = self._eval_expr(fields["operand"]) value = _eval_op(fields["op"], values=[value]) return value def _eval_bin_op(self, fields: Dict[str, Any]) -> Any: """The doc AST binary operation node evaluating method. Parameters ---------- fields : Dict[str, Any] The dictionary of binary operation information, e.g., operator types, operand values. Returns ------- res : Any The evaluation result. """ return _eval_op( fields["op"], values=[ self._eval_expr(fields["left"]), self._eval_expr(fields["right"]), ], ) def _eval_slice(self, fields: Dict[str, Any]) -> slice: """The doc AST slice node evaluating method. Parameters ---------- fields : Dict[str, Any] The dictionary of slice information, e.g., lower bound, upper bound, step. Returns ------- res : slice The evaluation result. """ lower, upper, step = fields["lower"], fields["upper"], fields["step"] lower = self._eval_expr(lower) if lower is not None else None upper = self._eval_expr(upper) if upper is not None else None step = self._eval_expr(step) if step is not None else None return slice(lower, upper, step) def _eval_expr(self, v: Any) -> Any: """The doc AST expression node evaluating method. Parameters ---------- v : Any The root node of AST tree node of expression to evaluate. Returns ------- res : Any The evaluation result. """ return _eval_expr(v, self.value_table) def eval_expr( parser: "Parser", node: Union[doc.expr, doc.Expression], dict_globals: Optional[Dict[str, Any]], ) -> Any: """Expression evaluation for TVMScript parser. Parameters ---------- parser : Parser The parser bound with the evaluator. node : Union[doc.expr, doc.Expression] The root node of AST tree node of expression to evaluate. dict_globals : Optional[Dict[str, Any]] The optional global value table for expression evaluation. Returns ------- res : Any The evaluation result. """ value_table = {} if dict_globals is not None: value_table.update(dict_globals) return ExprEvaluator.eval(parser, value_table, node) def eval_assign( parser: "Parser", target: doc.expr, source: Any, ) -> Dict[str, Any]: """Expression assignment evaluation for TVMScript parser. Parameters ---------- parser : Parser The parser bound with the evaluator. target : doc.expr The root node of AST tree node of assigned expression to evaluate. source : Any The source to be assigned with evaluated expression. Returns ------- res : Any The evaluation result. """ try: return _eval_assign(target, source) except Exception as e: # pylint: disable=broad-except,invalid-name parser.report_error(target, f"Failed to evaluate assignment: {str(e)}") raise def _eval_expr( node: Union[doc.expr, doc.Expression], dict_globals: Optional[Dict[str, Any]], ) -> Any: """Expression evaluation implementation for TVMScript parser. Parameters ---------- node : Union[doc.expr, doc.Expression] The root node of AST tree node of expression to evaluate. dict_globals : Optional[Dict[str, Any]] The optional global value table for expression evaluation. Returns ------- res : Any The evaluation result. """ node = doc.from_doc(node) if isinstance(node, ast.expr): node = ast.Expression(body=node) assert isinstance(node, ast.Expression), "Expects an ast.Expression, but gets: " + str(node) if dict_globals is None: dict_globals = {} node = ast.fix_missing_locations(node) exe = compile(node, filename="<ast>", mode="eval") return eval(exe, dict_globals) # pylint: disable=eval-used def _eval_op( op: doc.AST, values: List[Any], ): """Operation expression evaluation implementation for TVMScript parser. Parameters ---------- op : doc.AST The root node of AST tree node of operation expression to evaluate. values : List[Any] The list of values of operands. Returns ------- res : Any The evaluation result. """ op_type = type(op) # pylint: disable=protected-access for i, v in enumerate(values): v_type = getattr(type(v), "_dispatch_type", None) if v_type is None: continue f = dispatch.get_op( operand_type=v_type, op_node_type=op_type, operand_index=i, default=None ) if f is not None: return f(*values) return DEFAULT_OP[op_type](*values) def _eval_assign( target: doc.expr, source: Any, ) -> Dict[str, Any]: """Expression assignment evaluation implementation for TVMScript parser. Parameters ---------- target : doc.expr The root node of AST tree node of assigned expression to evaluate. source : Any The source to be assigned with evaluated expression. Returns ------- res : Any The evaluation result. """ target = doc.from_doc(target) assert isinstance(target, ast.expr) RHS_VAR_NAME = "__tvm_rhs_var__" # pylint: disable=invalid-name rhs_var_name = RHS_VAR_NAME dict_locals = {rhs_var_name: source} mod = ast.fix_missing_locations( ast.Module( body=[ ast.Assign( targets=[target], value=ast.Name( id=rhs_var_name, ctx=ast.Load(), ), ) ], type_ignores=[], ) ) exe = compile(mod, filename="<ast>", mode="exec") exec(exe, {}, dict_locals) # pylint: disable=exec-used del dict_locals[rhs_var_name] return dict_locals
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/parser.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The core parser""" from collections import defaultdict from contextlib import contextmanager from typing import Any, Callable, Dict, List, Optional, Set, Union from tvm._ffi.base import TVMError from tvm.error import DiagnosticError from . import dispatch, doc from .diagnostics import Diagnostics, Source from .evaluator import eval_assign, eval_expr DEFAULT_VISIT = { "Interactive", "Module", "Expression", "Pass", } def _deferred(exit_f: Callable[[], None]): """Created context with certain exit function. Parameters ---------- exit_f : Callable[[], None] The function to call when exiting the context. Returns ------- res : Any The created context. """ @contextmanager def context(): try: yield finally: exit_f() return context() class VarTableFrame: """The variable table frame. A frame of variable table stores the variables created in one block or scope. Parameters ---------- vars : Set[str] The set of variable names in the variable table frame. """ vars: Set[str] def __init__(self): self.vars = set() def add(self, var: str): """Add a new variable into variable table frame. Parameters ---------- var : str The name of new variable. """ if var in self.vars: raise ValueError(f"Variable {var} already defined in current scope") self.vars.add(var) def pop_all(self, fn_pop: Callable[[str], None]): """Pop out all variable in variable table frame. Parameters ---------- fn_pop : Callable[[str], None] The methods to call when popping each variable. """ for var in self.vars: fn_pop(var) self.vars.clear() class VarTable: """The variable table. A variable table stores the all variables when parsing TVMScript. Parameters ---------- frames : List[VarTableFrame] The list or stack of variable table frame. name2value : Dict[str, List[Any]] The dictionary for variable table name-based query. """ frames: List[VarTableFrame] name2value: Dict[str, List[Any]] def __init__(self): self.frames = [] self.name2value = defaultdict(list) def with_frame(self): """Create a new variable table frame as with statement. Returns ------- res : Any The context with new variable table frame. """ def pop_frame(): frame = self.frames.pop() frame.pop_all(lambda name: self.name2value[name].pop()) self.frames.append(VarTableFrame()) return _deferred(pop_frame) def add(self, var: str, value: Any, allow_shadowing: bool = False): """Add a new variable to variable table. Parameters ---------- var : str The name of variable. value : Any The value of variable. allow_shadowing : bool The options of whether variable shadowing allwed for this variable. """ # Skip if the key and value are equal to those in the var_table if self.name2value[var] and self.name2value[var][-1] == value: return if allow_shadowing and var in self.frames[-1].vars: # Shadowing self.name2value[var][-1] = value else: self.frames[-1].add(var) self.name2value[var].append(value) def get(self) -> Dict[str, Any]: """Get a variable dictionary of latest variables. Returns ------- res : Any The variable dictionary copy of latest variables. """ return {key: values[-1] for key, values in self.name2value.items() if values} def exist(self, value: Any) -> bool: """Check if any value exists in variable table. Parameters ---------- value : Any The value of variable. Returns ------- res : bool The existence of the value. """ for v in self.name2value.values(): if v is value: return True return False def _dispatch_wrapper(func: dispatch.ParseMethod) -> dispatch.ParseMethod: def _wrapper(self: "Parser", node: doc.AST) -> None: try: return func(self, node) except DiagnosticError: raise except Exception as e: # pylint: disable=broad-except,invalid-name self.report_error(node, e) raise return _wrapper def _dispatch(self: "Parser", type_name: str) -> dispatch.ParseMethod: for token in [self.dispatch_tokens[-1], "default"]: func = dispatch.get(token=token, type_name=type_name, default=None) if func is not None: return _dispatch_wrapper(func) return _dispatch_wrapper(lambda self, node: self.generic_visit(node)) class Parser(doc.NodeVisitor): """The TVMScript parser Parameters ---------- diag : Diagnostics The diagnostics for error reporting. dispatch_tokens : List[str] The list of dispatching tokens to dispatching parsing method of different IRs and different doc AST structure. var_table : VarTable The variable table for parsing. """ diag: Diagnostics dispatch_tokens: List[str] var_table: VarTable def __init__(self, source: Source) -> None: self.diag = Diagnostics(source) self.dispatch_tokens = ["default"] self.var_table = VarTable() def parse(self, extra_vars: Optional[Dict[str, Any]] = None) -> Any: """The main parse method for parser. Parameters ---------- extra_vars : Optional[Dict[str, Any]] The optional global value table for parsing. Returns ------- res : Any The doc AST node visiting result. """ if extra_vars is None: extra_vars = {} with self.var_table.with_frame(): for k, v in extra_vars.items(): self.var_table.add(k, v) node = self.diag.source.as_ast() self.visit(node) def with_dispatch_token(self, token: str): """Add a new dispatching token as with statement. Parameters ---------- token : str The dispathing token. Returns ------- res : Any The context with new dispatching token. """ def pop_token(): self.dispatch_tokens.pop() self.dispatch_tokens.append(token) return _deferred(pop_token) def eval_expr( self, node: Union[doc.Expression, doc.expr], extra_vars: Optional[Dict[str, Any]] = None, ) -> Any: """Expression evaluation when parsing. Parameters ---------- node : Union[doc.expr, doc.Expression] The root node of AST tree node of expression to evaluate. extra_vars : Optional[Dict[str, Any]] The optional global value table for expression evaluation. Returns ------- res : Any The evaluation result. """ var_values = self.var_table.get() if extra_vars is not None: for k, v in extra_vars.items(): var_values[k] = v return eval_expr(self, node, var_values) def _duplicate_lhs_check(self, target: doc.expr) -> Union[bool, Set[str]]: """Check whether duplicate lhs exists in assignment. Parameters ---------- target : doc.expr The doc AST expr node for lhs. Returns ------- res : Union[bool, Set[str]] The result of true if duplicate lhs exists, or the set of lhs names if no duplicate lhs exists. """ if isinstance(target, (doc.Tuple, doc.List)): vars: Set[str] = set() # pylint: disable=redefined-builtin for i in target.elts: res = self._duplicate_lhs_check(i) if isinstance(res, bool) and res: return True assert isinstance(res, set) if vars & res: return True vars = vars.union(res) return vars elif isinstance(target, doc.Name): return {target.id} else: self.report_error(target, "Invalid type in assign statement") raise NotImplementedError def eval_assign( self, target: doc.expr, source: Any, bind_value: Callable[["Parser", doc.expr, str, Any], Any], allow_shadowing: bool = False, ) -> Dict[str, Any]: """Expression assignment evaluation when parsing. Parameters ---------- target : doc.expr The root node of AST tree node of assigned expression to evaluate. source : Any The source to be assigned with evaluated expression. bind_value : Callable[["Parser", doc.expr, str, Any], Any] The value binding method when assigning the values to variables. allow_shadowing : bool The options of whether variable shadowing allwed for assignment. Returns ------- res : Dict[str, Any] The dirctionary of assignment result. """ if self._duplicate_lhs_check(target) is True: self.report_error(target, "Duplicate vars assigned.") var_values = eval_assign(self, target, source) for k, v in var_values.items(): var = bind_value(self, target, k, v) self.var_table.add(k, var, allow_shadowing) return var_values def report_error( self, node: doc.AST, err: Union[Exception, str] ) -> None: # pylint: disable=no-self-use """The error reporting when parsing. Parameters ---------- node : doc.AST The doc AST node with errors. err: Union[Exception, str] The error to report. """ # Only take the last line of the error message if isinstance(err, TVMError): msg = list(filter(None, str(err).split("\n")))[-1] else: msg = str(err) self.diag.error(node, msg) def visit(self, node: doc.AST) -> None: """The general visiting method. Parameters ---------- node : doc.AST The doc AST node. Returns ------- res : Any The visiting result. """ if isinstance(node, (list, tuple)): for item in node: self.visit(item) return if not isinstance(node, doc.AST): return name = node.__class__.__name__.split(".")[-1] if name in DEFAULT_VISIT: func = self.generic_visit else: func = getattr(self, "visit_" + name, None) if func is None: raise NotImplementedError(f"Visitor of AST node is not implemented: {name}") try: func(node) except DiagnosticError: raise except Exception as e: # pylint: disable=broad-except,invalid-name self.report_error(node, str(e)) raise def visit_body(self, node: List[doc.stmt]) -> Any: """The general body visiting method. Parameters ---------- node : List[doc.stmt] The list of statements in body. Returns ------- res : Any The visiting result. """ for stmt in node: self.visit(stmt) def visit_tvm_annotation(self, node: doc.expr) -> Any: """The general TVM annotation visiting method. Parameters ---------- node : doc.expr The doc AST expr node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "tvm_annotation")(self, node) def visit_FunctionDef(self, node: doc.FunctionDef) -> Any: # pylint: disable=invalid-name """The general function definition visiting method. Parameters ---------- node : doc.FunctionDef The doc AST function definition node. Returns ------- res : Any The visiting result. """ if not node.decorator_list: self.report_error(node, "Function must be decorated") # TODO: only the last decorator is parsed decorator = self.eval_expr(node.decorator_list[-1]) if not hasattr(decorator, "dispatch_token"): self.report_error(node, "The parser does not understand the decorator") token = decorator.dispatch_token func = dispatch.get(token=token, type_name="FunctionDef", default=None) if func is None: self.report_error(node, "The parser does not understand the decorator") _dispatch_wrapper(func)(self, node) def visit_ClassDef(self, node: doc.ClassDef) -> Any: # pylint: disable=invalid-name """The general class definition visiting method. Parameters ---------- node : doc.ClassDef The doc AST class definition node. Returns ------- res : Any The visiting result. """ func = dispatch.get(token="ir", type_name="ClassDef", default=None) if func is None: self.report_error(node, "The parser does not understand the decorator") _dispatch_wrapper(func)(self, node) def visit_arguments(self, node: doc.arguments) -> Any: """The general arguments visiting method. Parameters ---------- node : doc.arguments The doc AST arguments node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "arguments")(self, node) def visit_For(self, node: doc.For) -> Any: # pylint: disable=invalid-name """The general for visiting method. Parameters ---------- node : doc.For The doc AST for node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "For")(self, node) def visit_While(self, node: doc.While) -> Any: # pylint: disable=invalid-name """The general while visiting method. Parameters ---------- node : doc.While The doc AST while node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "While")(self, node) def visit_With(self, node: doc.With) -> Any: # pylint: disable=invalid-name """The general with visiting method. Parameters ---------- node : doc.With The doc AST with node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "With")(self, node) def visit_Assign(self, node: doc.Assign) -> Any: # pylint: disable=invalid-name """The general assign visiting method. Parameters ---------- node : doc.Assign The doc AST assign node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "Assign")(self, node) def visit_AnnAssign(self, node: doc.AnnAssign) -> Any: # pylint: disable=invalid-name """The general annotated assign visiting method. Parameters ---------- node : doc.Assign The doc AST annotated assign node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "AnnAssign")(self, node) def visit_Expr(self, node: doc.Expr) -> Any: # pylint: disable=invalid-name """The general expression visiting method. Parameters ---------- node : doc.Expr The doc AST exprssion node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "Expr")(self, node) def visit_If(self, node: doc.If) -> Any: # pylint: disable=invalid-name """The general if visiting method. Parameters ---------- node : doc.If The doc AST if node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "If")(self, node) def visit_AugAssign(self, node: doc.AugAssign) -> Any: # pylint: disable=invalid-name """The general augmented assignment visiting method. Parameters ---------- node : doc.AugAssign The doc AST augmented assignment node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "AugAssign")(self, node) def visit_Assert(self, node: doc.Assert) -> Any: # pylint: disable=invalid-name """The general assert visiting method. Parameters ---------- node : doc.Assert The doc AST assert node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "Assert")(self, node) def visit_Return(self, node: doc.Return) -> Any: # pylint: disable=invalid-name """The general return visiting method. Parameters ---------- node : doc.Return The doc AST return node. Returns ------- res : Any The visiting result. """ return _dispatch(self, "Return")(self, node)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/core/utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser utils""" import inspect from types import FrameType from typing import Any, Callable, Dict, List from .diagnostics import findsource def inspect_function_capture(func: Callable) -> Dict[str, Any]: """Capture function non-locals and global variables. Parameters ---------- func : Callable The function to inspect. Returns ------- res : Dict[str, Any] The function variables map with non-local or global variables. """ captured = { **inspect.getclosurevars(func).nonlocals, **func.__globals__, # type: ignore } return captured def inspect_class_capture(cls: type) -> Dict[str, Any]: """Capture class non-locals and global variables. Parameters ---------- cls : type The class to inspect. Returns ------- res : Dict[str, Any] The class variables map with non-local or global variables. """ result: Dict[str, Any] = {} for _, v in cls.__dict__.items(): if inspect.isfunction(v): func_vars = inspect_function_capture(v) result.update(**func_vars) return result def is_defined_in_class(frames: List[FrameType], obj: Any) -> bool: """Check whether a object is defined in a class scope. Parameters ---------- frames : List[FrameType] The frame stack of the object, obtained by `inspect.stack()`. Returns ------- res : bool The result if the object is defined in a class scope. """ if len(frames) > 2: frame_info = frames[2] code_context = frame_info.code_context if code_context is None: return False line = code_context[0].strip() if line.startswith("@") and "ir_module" in line: return True if line.startswith("class"): lineno = frame_info.lineno if lineno >= 2: source, _ = findsource(obj) line = source[lineno - 2].strip() if line.startswith("@") and "ir_module" in line: return True return False
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/ir/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The ir module parser""" from . import parser as _parser from .entry import ir_module __all__ = ["ir_module"]
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/ir/entry.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The entry point of TVM parser for ir module.""" import inspect from typing import Type from tvm.ir import IRModule from .._core import parse, utils def ir_module(mod: Type) -> IRModule: """The parsing method for ir module, by using `@ir_module` as decorator. Parameters ---------- mod : Type The class to be parsed as ir module. Returns ------- ir_module : IRModule The parsed ir module. """ if not inspect.isclass(mod): raise TypeError(f"Expect a class, but got: {mod}") return parse(mod, utils.inspect_class_capture(mod)) setattr(ir_module, "dispatch_token", "ir")
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/ir/parser.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The base parser for ir module""" from ...ir_builder import ir as I from .._core import Parser, dispatch, doc @dispatch.register(token="ir", type_name="ClassDef") def _visit_class_def(self: Parser, node: doc.ClassDef) -> None: """The class definition visiting method for ir module. Parameters ---------- self : Parser The visiting parser. node : doc.ClassDef The doc AST class definition node. """ with self.var_table.with_frame(): with I.ir_module(): with self.with_dispatch_token("ir"): self.visit_body(node.body) @dispatch.register(token="ir", type_name="Assign") def _visit_assign(_self: Parser, _node: doc.Assign) -> None: """The assign visiting method for ir module. Parameters ---------- self : Parser The visiting parser. node : doc.ClassDef The doc AST assign node. """ @dispatch.register(token="ir", type_name="Expr") def _visit_expr(_self: Parser, _node: doc.Expr) -> None: """The expression visiting method for ir module. Parameters ---------- self : Parser The visiting parser. node : doc.ClassDef The doc AST expression node. """
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/tir/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The tir parser""" from ...ir_builder.tir import * # pylint: disable=redefined-builtin from ...ir_builder.tir import ir as _tir from . import operation as _operation from . import parser as _parser from .entry import Buffer, Ptr, prim_func __all__ = _tir.__all__ + ["Buffer", "Ptr", "prim_func"]
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/tir/entry.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The entry point of TVM parser for tir.""" import inspect from typing import Callable, Union from tvm.tir import Buffer, PrimFunc from ...ir_builder.tir import buffer_decl, ptr from .._core import parse, utils def prim_func(func: Callable) -> Union[PrimFunc, Callable]: """The parsing method for tir prim func, by using `@prim_func` as decorator. Parameters ---------- func : Callable The function to be parsed as prim func. Returns ------- res : Union[PrimFunc, Callable] The parsed tir prim func. """ if not inspect.isfunction(func): raise TypeError(f"Expect a function, but got: {func}") if utils.is_defined_in_class(inspect.stack(), func): return func return parse(func, utils.inspect_function_capture(func)) setattr(prim_func, "dispatch_token", "tir") class BufferProxy: """Buffer proxy class for constructing tir buffer. Overload __call__ and __getitem__ to support syntax as T.Buffer() and T.Buffer[]. """ def __call__( self, shape, dtype=None, data=None, strides=None, elem_offset=None, scope="global", align=0, offset_factor=0, buffer_type="", axis_separators=None, ) -> Buffer: if dtype is None: raise ValueError("Data type must be specified when constructing buffer") return buffer_decl( shape, dtype=dtype, data=data, strides=strides, elem_offset=elem_offset, scope=scope, align=align, offset_factor=offset_factor, buffer_type=buffer_type, axis_separators=axis_separators, ) def __getitem__(self, keys) -> Buffer: if not isinstance(keys, tuple): return self(keys) if len(keys) >= 2 and not isinstance(keys[1], str): return self(keys) return self(*keys) # pylint: disable=no-member # type: ignore class PtrProxy: """Ptr proxy class for constructing tir pointer. Overload __call__ and __getitem__ to support syntax as T.Ptr() and T.Ptr[]. """ def __call__(self, dtype, storage_scope="global"): if callable(dtype): dtype = dtype().dtype return ptr(dtype, storage_scope) # pylint: disable=no-member # type: ignore def __getitem__(self, keys): if not isinstance(keys, tuple): return self(keys) return self(*keys) Buffer = BufferProxy() # pylint: disable=invalid-name Ptr = PtrProxy() # pylint: disable=invalid-name
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/tir/operation.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The tir expression operation registration""" from typing import Type from tvm import tir from tvm.tir import IntImm from .._core import OpMethod, doc, register_op def _register_expr_op(ty: Type): # pylint: disable=invalid-name ty._dispatch_type = ty # pylint: disable=protected-access def _and(a, b): if isinstance(a, bool): a = IntImm("bool", a) if isinstance(b, bool): b = IntImm("bool", b) return tir.And(a, b) def _or(a, b): if isinstance(a, bool): a = IntImm("bool", a) if isinstance(b, bool): b = IntImm("bool", b) return tir.Or(a, b) def r(op: Type, i: int, m: OpMethod): # pylint: disable=invalid-name register_op(ty, op, i)(m) for i in [0, 1]: # Case 1. binop r(doc.Add, i, tir.Add) r(doc.Sub, i, tir.Sub) r(doc.Mult, i, tir.Mul) r(doc.Div, i, tir.Div) r(doc.FloorDiv, i, tir.FloorDiv) r(doc.Mod, i, tir.FloorMod) r(doc.LShift, i, lambda a, b: a << b) r(doc.RShift, i, lambda a, b: a >> b) r(doc.BitOr, i, lambda a, b: a | b) r(doc.BitXor, i, lambda a, b: a ^ b) r(doc.BitAnd, i, lambda a, b: a & b) # doc.MatMult <-- not implemented # doc.Pow <-- not implemented # Case 2. cmpop r(doc.Eq, i, tir.EQ) r(doc.NotEq, i, tir.NE) r(doc.Lt, i, tir.LT) r(doc.LtE, i, tir.LE) r(doc.Gt, i, tir.GT) r(doc.GtE, i, tir.GE) # doc.Is <-- not implemented # doc.IsNot <-- not implemented # doc.In <-- not implemented # doc.NotIn <-- not implemented # Case 3. boolop r(doc.And, i, _and) r(doc.Or, i, _or) for i in [0]: # Case 4. unaryop r(doc.Invert, i, lambda a: ~a) r(doc.Not, i, tir.Not) r(doc.UAdd, i, lambda a: +a) r(doc.USub, i, lambda a: -a) _register_expr_op(tir.PrimExpr) _register_expr_op(tir.IterVar)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser/tir/parser.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """The base parser for tir""" import contextlib from functools import partial from typing import Any from tvm.ir import PrimType from tvm.tir import Buffer, IterVar, PrimExpr, Var from ...ir_builder import tir as T from ...ir_builder.base import IRBuilder from ...ir_builder.base import IRBuilderFrame as Frame from .._core import Parser, dispatch, doc def bind_with_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any: """Value binding methods when parsing with statement. e.g. binding i, j, k with T.grid(128, 128, 128), when parsing with T.grid(128, 128, 18) as i, j, k. Parameters ---------- self : Parser The current parser. node : doc.expr The doc AST expression node for error reporting. var_name : str The variable name. value : Any The value to be bound with. Returns ------- res : Any The bound value. """ if isinstance(value, (list, tuple)): for i, v in enumerate(value): bind_with_value(self, node, f"{var_name}_{i}", v) return value elif isinstance(value, (Buffer, Var)): IRBuilder.name(var_name, value) return value else: self.report_error(node, f"Do not know how to bind type: {type(value)} in with statement") raise NotImplementedError def bind_for_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any: """Value binding methods when parsing for statement. e.g. binding i, j, k with T.grid(128, 128, 128), when parsing for i, j, k in T.grid(128, 128, 128). Parameters ---------- self : Parser The current parser. node : doc.expr The doc AST expression node for error reporting. var_name : str The variable name. value : Any The value to be bound with. Returns ------- res : Any The bound value. """ if isinstance(value, (list, tuple)): for i, v in enumerate(value): bind_for_value(self, node, f"{var_name}_{i}", v) return value elif isinstance(value, Var): IRBuilder.name(var_name, value) return value else: self.report_error(node, f"Do not know how to bind type: {type(value)} in for statement") raise NotImplementedError def bind_assign_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any: """Value binding methods when parsing assign statement. e.g. binding vi, vj, vk with T.axis.remap("SSR", [i, j, k]), when parsing vi, vj, vk = T.axis.remap("SSR", [i, j, k]). Parameters ---------- self : Parser The current parser. node : doc.expr The doc AST expression node for error reporting. var_name : str The variable name. value : Any The value to be bound with. Returns ------- res : Any The bound value. """ if isinstance(value, T.meta_var): return value.value elif isinstance(value, (list, tuple)): for i, v in enumerate(value): bind_assign_value(self, node, f"{var_name}_{i}", v) return value elif isinstance(value, Frame): value.add_callback(partial(value.__exit__, None, None, None)) res = value.__enter__() IRBuilder.name(var_name, res) return res elif isinstance(value, (Buffer, IterVar)) or ( isinstance(value, Var) and not self.var_table.exist(value) ): IRBuilder.name(var_name, value) return value elif isinstance(value, PrimExpr): var = T.var(value.dtype) IRBuilder.name(var_name, var) frame = T.let(var, value) frame.add_callback(partial(frame.__exit__, None, None, None)) frame.__enter__() return var return value @dispatch.register(token="tir", type_name="For") def visit_for(self: Parser, node: doc.For) -> None: """The for visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.For The doc AST for node. """ for_frame = self.eval_expr(node.iter) if not isinstance(for_frame, T.frame.ForFrame): self.report_error( node.iter, "Expect the for loop to be one of the following: " "range, T.serial, T.grid, T.parallel, T.vectorized, T.unroll, T.thread_binding", ) with self.var_table.with_frame(): with for_frame as iters: self.eval_assign(target=node.target, source=iters, bind_value=bind_for_value) self.visit_body(node.body) @dispatch.register(token="tir", type_name="While") def visit_while(self: Parser, node: doc.While) -> None: """The while visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.While The doc AST while node. """ with self.var_table.with_frame(): cond = self.eval_expr(node.test) with T.While(cond): self.visit_body(node.body) @dispatch.register(token="tir", type_name="Assign") def visit_assign(self: Parser, node: doc.Assign) -> None: """The assign visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.Assign The doc AST assign node. """ if len(node.targets) != 1: self.report_error(node, "Consequential assignments like 'a = b = c' are not supported.") lhs = node.targets[0] rhs = self.eval_expr(node.value) if isinstance(lhs, doc.Subscript): if isinstance(lhs.slice, doc.Tuple): indices = [] for index in lhs.slice.elts: indices.append(self.eval_expr(index)) else: indices = [self.eval_expr(lhs.slice)] T.buffer_store(self.eval_expr(lhs.value), rhs, indices) else: self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value) @dispatch.register(token="tir", type_name="AugAssign") def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None: """The augmented assign visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.AugAssign The doc AST augmented assign node. """ lhs_pos = ( node.target.lineno, node.target.col_offset, node.target.end_lineno, node.target.end_col_offset, ) rhs_pos = ( node.value.lineno, node.value.col_offset, node.value.end_lineno, node.value.end_col_offset, ) node.target.ctx = doc.Load(*lhs_pos) with self.var_table.with_frame(): lhs_name = "__tvm_tmp_value_aug_assign_lhs" rhs_name = "__tvm_tmp_value_aug_assign_rhs" lhs_expr = self.eval_expr(node.target) rhs_expr = self.eval_expr(node.value) self.var_table.add(lhs_name, lhs_expr) self.var_table.add(rhs_name, rhs_expr) op = doc.BinOp( doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos), node.op, doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos), *lhs_pos, ) rhs = self.eval_expr(op) lhs = node.target lhs.ctx = doc.Store(*lhs_pos) if isinstance(lhs, doc.Subscript): if isinstance(lhs.slice, doc.Tuple): indices = [] for index in lhs.slice.elts: indices.append(self.eval_expr(index)) else: indices = [self.eval_expr(lhs.slice)] T.buffer_store(self.eval_expr(lhs.value), rhs, indices) else: self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value) @dispatch.register(token="tir", type_name="AnnAssign") def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None: """The annotated assign visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.AnnAssign The doc AST annotated assign node. """ lhs = node.target rhs = self.eval_expr(node.value) ann_var = self.visit_tvm_annotation(node.annotation) if not isinstance(ann_var, Var): self.report_error(node.annotation, "Annotation should be Var") self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value) frame = T.let(ann_var, rhs) frame.add_callback(partial(frame.__exit__, None, None, None)) frame.__enter__() @dispatch.register(token="tir", type_name="With") def visit_with(self: Parser, node: doc.With) -> None: """The with visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.With The doc AST with node. """ with contextlib.ExitStack() as stack: stack.enter_context(self.var_table.with_frame()) for item in node.items: frame = self.eval_expr(item.context_expr) if not isinstance(frame, Frame): self.report_error( item.context_expr, "Invalid context expression in the with-statement." ) rhs = stack.enter_context(frame) if item.optional_vars is not None: self.eval_assign(target=item.optional_vars, source=rhs, bind_value=bind_with_value) self.visit_body(node.body) @dispatch.register(token="tir", type_name="FunctionDef") def visit_function_def(self: Parser, node: doc.FunctionDef) -> None: """The function definition visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.FunctionDef The doc AST function definition node. """ with self.var_table.with_frame(): self.var_table.add("range", T.serial) with T.prim_func(): T.func_name(node.name) if node.returns is not None: ret_type = self.eval_expr(node.returns) if callable(ret_type): ret_type = PrimType(ret_type().dtype) T.func_ret(ret_type) with self.with_dispatch_token("tir"): self.visit(node.args) self.visit_body(node.body) @dispatch.register(token="tir", type_name="arguments") def visit_arguments(self: Parser, node: doc.arguments) -> None: """The arguments visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.arguments The doc AST arguments node. """ # TODO: handle different types of arguments: # - vararg: arg | None # - kwonlyargs: list[arg] # - kw_defaults: list[expr | None] # - kwarg: arg | None # - defaults: list[expr] # - posonlyargs: list[arg] arg: doc.arg for arg in node.args: if arg.annotation is None: self.report_error(arg, "Type annotation is required for function parameters.") param = T.arg(arg.arg, self.visit_tvm_annotation(arg.annotation)) self.var_table.add(arg.arg, param) @dispatch.register(token="tir", type_name="tvm_annotation") def visit_tvm_annotation(self: Parser, node: doc.expr): """The TVM annotation visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.expr The doc AST expr node. """ annotation = self.eval_expr(node) if callable(annotation): annotation = annotation() return annotation @dispatch.register(token="tir", type_name="Expr") def visit_expr_stmt(self: Parser, node: doc.Expr) -> None: """The expr statement visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.Expr The doc AST Expr node. """ res = self.eval_expr(node.value) if isinstance(res, Frame): res.add_callback(partial(res.__exit__, None, None, None)) res.__enter__() @dispatch.register(token="tir", type_name="If") def visit_if(self: Parser, node: doc.If) -> None: """The if visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.If The doc AST if node. """ with self.var_table.with_frame(): with T.If(self.eval_expr(node.test)): with T.Then(): self.visit_body(node.body) if node.orelse: with T.Else(): self.visit_body(node.orelse) @dispatch.register(token="tir", type_name="Assert") def visit_assert(self: Parser, node: doc.Assert) -> None: """The assert visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.Assert The doc AST assert node. """ cond = self.eval_expr(node.test) msg = self.eval_expr(node.msg) frame = T.Assert(cond, msg) frame.add_callback(partial(frame.__exit__, None, None, None)) frame.__enter__() @dispatch.register(token="tir", type_name="Return") def visit_return(self: Parser, node: doc.Return) -> None: """The return visiting method for tir. Parameters ---------- self : Parser The visiting parser. node : doc.Return The doc AST return node. """ self.report_error(node, "Return is not allowed.")
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script APIs of TVM Python Package, aimed to support TIR""" from . import tir from .parser import ir_module, from_source
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.script""" import tvm._ffi tvm._ffi._init_api("script", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/context_maintainer.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Context Maintainer for TIR""" from typing import List, Mapping, Union, Optional, Dict, Callable import synr import tvm from tvm.ir import Span from tvm.ir.expr import Range from tvm.tir import Var, Buffer, PrimExpr, Stmt, MatchBufferRegion from tvm.runtime import Object from tvm.tir.expr import IterVar from .tir.node import BufferSlice class BlockInfo: """Information for block and block_realize signature Examples ---------- .. code-block:: python @T.prim_func def example_func(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") B = T.match_buffer(b, (16, 16), "float32") C = T.match_buffer(a, (16, 16), "float32") for i, j, k in T.grid(16, 16, 16): with T.block("matmul"): vi = T.axis.S(16, i) vj = T.axis.S(16, j) vk = T.axis.R(16, k) # iter_bindings = {vj: i, vj: j, vk: k} T.where(True) # predicate of the block_realize T.reads(A[0:16, 0:16], B[0: 16, 0: 16]) # reads region of the block T.writes(C[0: 16, 0: 16]) # writes region of the block T.block_attr({"attr_key": "attr_value"}) # block annotations # alloc_buffers inside the block CC = T.alloc_buffer((1, 1), dtype="float32") # match_buffers of the block, # which bind a sub-region of source buffer into a new buffer D = T.match_buffer(C[vi, vj], ()) # init part of the block, executed when all reduce axes are the beginning value with T.init(): C[vi, vj] = T.float32(0) # block body CC[0, 0] = A[vi, vk] * B[vj, vk] D[()] += CC[0, 0] # The same as C[vi, vj] += CC[0, 0] """ alloc_buffers: List[Buffer] = [] """List[Buffer]: list of T.alloc_buffer statements in the block signature""" match_buffers: List[MatchBufferRegion] = [] """List[MatchBufferRegion]: list of T.match_buffer statements in the block signature""" iter_values: List[PrimExpr] = [] """List[PrimExpr]: list of binding values for iter vars""" iter_vars: List[IterVar] = [] """List[PrimExpr]: list of iter vars in the block""" reads: Optional[List[BufferSlice]] = None """Optional[List[BufferSlice]]: list of T.reads statements in the block signature, None for not-visited""" writes: Optional[List[BufferSlice]] = None """Optional[List[BufferSlice]]: list of T.writes statements in the block signature, None for not-visited""" annotations: Optional[Mapping[str, Object]] = None """Optional[Mapping[str, Object]]: list of T.block_attr statements in the block signature, None for not-visited""" predicate: Optional[PrimExpr] = None """Optional[PrimExpr]: block realize predicate, None for not-visited""" init: Optional[Stmt] = None """Optional[Stmt]: init part of the block, None for not-visited""" def __init__(self): self.alloc_buffers = [] self.match_buffers = [] self.iter_values = [] self.iter_vars = [] self.reads = None self.writes = None self.annotations = None self.predicate = None self.init = None class ContextMaintainer: """Maintain all the necessary context info Parameters ---------- _report_error : Callable[[str, Union[Span, synr.ast.Span]], None] The report error function handle """ # scope context node_stack: List[List[synr.ast.Node]] = [] """List[List[synr.ast.Node]]: The ast nodes insides the current scope""" block_info_stack: List[BlockInfo] = [] """List[BlockInfo]: The block info for the current block scope""" loop_stack: Dict[Var, Range] = {} """Dict[Var, Range]: The dict from loop var to its domain outside the block""" symbols: List[Dict[str, Union[Var, Buffer]]] = [] """List[Dict[str, Union[Var, Buffer]]]: Symbol map from name to object for the current scope""" closure_vars: Dict[str, Object] = {} """ClosureVars: The closure vars defined in Python interpreter""" # function context func_params: List[Var] = [] """List[Var]: The function parameters""" func_buffer_map: Mapping[Var, Buffer] = {} """Mapping[Var, Buffer]: The function buffer map""" func_preflattened_buffer_map: Mapping[Var, Buffer] = {} """Mapping[Var, Buffer]: The function buffer map, prior to any flattening.""" func_dict_attr: Mapping[str, Object] = {} """Mapping[str, Object]: The function attrs""" func_var_env_dict: Mapping[Var, str] = {} """Mapping[Var, str]: The map from var to env thread""" # parser and analyzer analyzer: tvm.arith.Analyzer = tvm.arith.Analyzer() """tvm.arith.Analyzer: The analyzer for simplifying""" _report_error: Callable[[str, Union[Span, synr.ast.Span]], None] """Callable[[str, Union[Span, synr.ast.Span]], None]: The report error function handle""" # root alloc_buffer root_alloc_buffers: List[Buffer] = [] """List[Buffer]: The buffers allocated under root block""" def __init__( self, _report_error: Callable[[str, Union[Span, synr.ast.Span]], None], closure_vars: Dict[str, Object], ): # scope context self.node_stack = [] self.block_info_stack = [] self.loop_stack = {} self.symbols = [] self.closure_vars = closure_vars # function context self.func_params = [] self.func_buffer_map = {} self.func_preflattened_buffer_map = {} self.func_dict_attr = {} self.func_var_env_dict = {} # parser and analyzer self._report_error = _report_error self.analyzer = tvm.arith.Analyzer() # root alloc_buffer self.root_alloc_buffers = [] def enter_scope(self, nodes: Optional[List[synr.ast.Node]] = None): """Creates a new scope Note ---- This function is used for normal scopes that do not involve a `with block` scope. Use `enter_block_scope` for block scope cases. Parameters ---------- nodes : Optional[List[synr.ast.Node]] The synr AST nodes in new scope """ if nodes is None: nodes = [] self.node_stack.append(list(reversed(nodes))) self.symbols.append(dict()) def enter_block_scope(self, nodes: Optional[List[synr.ast.Node]] = None): """Creates a new block scope, the function will call `enter_scope` implicitly Besides the behaviors of `enter_scope`, it will update loop_stack and block_info_stack to maintain block info. Note ---- This function should be used to handle a block scope, aka the blocks that involve a `with block` scope. Parameters ---------- nodes : Optional[List[synr.ast.Node]] The synr AST nodes in new scope """ self.enter_scope(nodes) # Create a new BlockInfo for the new block self.block_info_stack.append(BlockInfo()) def exit_scope(self): """Pop the inner most scope""" self.symbols.pop() self.node_stack.pop() def exit_block_scope(self): """Pop the inner most block scope, the function will call `exit_scope` implicitly""" self.exit_scope() # Pop block_info self.block_info_stack.pop() def update_symbol(self, name: str, symbol: Union[Buffer, Var], node: synr.ast.Node): """Append a symbol into current scope""" if isinstance(symbol, Buffer): if name in self.symbols[0]: self.report_error("Duplicate Buffer name: " + symbol.name, node.span) self.symbols[0][name] = symbol else: self.symbols[-1][name] = symbol def remove_symbol(self, name: str): """Remove a symbol""" for symbols in reversed(self.symbols): if name in symbols: symbols.pop(name) return raise RuntimeError("Internal error of tvm script parser: no symbol named " + name) def lookup_symbol(self, name: str) -> Optional[Union[Buffer, Var]]: """Look up symbol by name""" for symbols in reversed(self.symbols): if name in symbols: return symbols[name] return self.closure_vars.get(name) def report_error(self, message: str, span: Union[Span, synr.ast.Span]): self._report_error(message, span) def current_block_scope(self) -> BlockInfo: if self.block_info_stack: return self.block_info_stack[-1] return None
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/diagnostics.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Bridge from synr's (the library used for parsing the python AST) DiagnosticContext to TVM's diagnostics """ from synr import DiagnosticContext, ast import tvm from tvm.ir.diagnostics import DiagnosticContext as TVMCtx from tvm.ir.diagnostics import get_renderer, DiagnosticLevel, Diagnostic class TVMDiagnosticCtx(DiagnosticContext): """TVM diagnostics for synr""" diag_ctx: TVMCtx def __init__(self) -> None: self.diag_ctx = TVMCtx(tvm.IRModule(), get_renderer()) self.source_name = None def to_tvm_span(self, src_name, ast_span: ast.Span) -> tvm.ir.Span: return tvm.ir.Span( src_name, ast_span.start_line, ast_span.end_line, ast_span.start_column, ast_span.end_column, ) def add_source(self, name: str, source: str) -> None: src_name = self.diag_ctx.module.source_map.add(name, source) self.source_name = src_name def emit(self, _level, message, span): span = self.to_tvm_span(self.source_name, span) self.diag_ctx.emit(Diagnostic(DiagnosticLevel.ERROR, span, message)) self.diag_ctx.render() # Raise exception on the first error we hit. TODO remove def render(self): self.diag_ctx.render()
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/meta_unparser.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unparse meta AST node into a dict""" # pylint: disable=invalid-name from synr import Transformer class MetaUnparser(Transformer): """Python AST Visitor to unparse meta AST node into a dict""" def transform(self, node): method = "transform_" + node.__class__.__name__ visitor = getattr(self, method, None) if visitor is None: self.error(f"Unexpected node type {type(node)} when parsing __tvm_meta__", node.span) return visitor(node) def transform_DictLiteral(self, node): keys = [self.visit(key) for key in node.keys] values = [self.visit(value) for value in node.values] return dict(zip(keys, values)) def transform_Tuple(self, node): return tuple(self.visit(element) for element in node.elts) def transform_ArrayLiteral(self, node): return [self.visit(element) for element in node.elts] def transform_Constant(self, node): return node.value
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/parser.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser For TIR We use [synr](https://synr.readthedocs.io) to get an AST that is stable over different python versions. Synr also provides an error handling context that we use for error reporting. """ # pylint: disable=invalid-name, inconsistent-return-statements, no-else-return, broad-except import types import json import operator import inspect from typing import Any, Callable, Dict, List, Optional, Union from synr import ast, Transformer, to_ast import tvm from tvm import IRModule from tvm._ffi.base import TVMError from tvm.ir import GlobalVar from tvm.ir.function import BaseFunc from tvm.tir import buffer from tvm.tir.function import PrimFunc from . import _ffi_api from . import tir from .context_maintainer import ContextMaintainer from .meta_unparser import MetaUnparser from .registry import Registry from .diagnostics import TVMDiagnosticCtx from .utils import tvm_span_from_synr, synr_span_from_tvm, call_with_error_reporting from .tir.intrin import Intrin from .tir.node import Slice, BufferSlice from .tir.scope_handler import ScopeHandler, WithScopeHandler, ForScopeHandler from .tir.special_stmt import SpecialStmt from .tir import ty class CallArgumentReader(object): """Helper class to read required arguments from passed arguments. When parsing a function call, we need to match the arguments provided in the AST to the required arguments of the function. This class makes sure all the positional arguments are filled and also fill keyword arguments with thier default value if a different value was not provided. """ def __init__(self, func_name, args, kwargs, parser, node): self.func_name = func_name self.args = args self.kwargs = kwargs self.parser = parser self.node = node def get_pos_only_arg(self, pos, name): """Get corresponding position only function argument from argument list""" if len(self.args) >= pos: arg = self.args[pos - 1] elif name not in self.kwargs: # If no positional argument was found in the AST, we see if it was # defined by name instead. # TODO(tkonolige): this error message is not quite correct. The # number of required arguments is >= pos self.parser.report_error( f"{self.func_name} requires {pos} arguments, but only {len(self.args)} were given.", self.node.span, ) else: arg = self.kwargs[name] return arg def get_kwarg(self, pos, name, default): """Get corresponding keyword function argument from argument list. If the user hasn't provided the argument, set it to the default value. """ if len(self.args) >= pos: arg = self.args[pos - 1] elif name in self.kwargs: arg = self.kwargs[name] else: return default return arg def get_varargs(self, pos): """Get corresponding variable argument from argument list""" if len(self.args) >= pos and len(self.kwargs) == 0: return self.args[pos - 1 :] return [] class TVMScriptParser(Transformer): """Synr AST visitor pass which finally lowers to TIR. Notes for Extension ------------------- 1. To support a new type of AST node, add a function transform_xxx(). 2. To support new functions, add the function to the appropriate registry: We divide allowed function calls in TVM script into 3 categories, intrin, scope_handler and special_stmt. 1. intrin functions are low level functions like mod, load, and constants. They correspond to a tir `IRNode`. They must have a return value. The user can register intrin functions for the parser to use. 2. scope_handler functions have no return value. They take two arguments: the parser and the AST node. scope_handler functions are used in with and for statements. 3. special_stmt functions handle cases that do not have a corresponding tir `IRNode`. These functions take the parser and the AST node as arguments and may return a value. When visiting a Call node, we check the special_stmt registry first. If no registered function is found, we then check the intrin registry. When visiting With node, we check the with_scope registry. When visiting For node, we check the for_scope registry. """ _binop_maker = { ast.BuiltinOp.Add: tvm.tir.Add, ast.BuiltinOp.Sub: tvm.tir.Sub, ast.BuiltinOp.Mul: tvm.tir.Mul, ast.BuiltinOp.Div: tvm.tir.Div, ast.BuiltinOp.FloorDiv: tvm.tir.FloorDiv, ast.BuiltinOp.Mod: tvm.tir.FloorMod, ast.BuiltinOp.BitOr: lambda lhs, rhs, span: operator.or_(lhs, rhs), ast.BuiltinOp.BitAnd: lambda lhs, rhs, span: operator.and_(lhs, rhs), ast.BuiltinOp.BitXor: lambda lhs, rhs, span: operator.xor(lhs, rhs), ast.BuiltinOp.GT: tvm.tir.GT, ast.BuiltinOp.GE: tvm.tir.GE, ast.BuiltinOp.LT: tvm.tir.LT, ast.BuiltinOp.LE: tvm.tir.LE, ast.BuiltinOp.Eq: tvm.tir.EQ, ast.BuiltinOp.NotEq: tvm.tir.NE, ast.BuiltinOp.And: tvm.tir.And, ast.BuiltinOp.Or: tvm.tir.Or, } _unaryop_maker = { ast.BuiltinOp.USub: lambda rhs, span: operator.neg(rhs), ast.BuiltinOp.Invert: lambda rhs, span: operator.invert(rhs), ast.BuiltinOp.Not: tvm.tir.Not, } # pylint gets confused here with synr.Transformer which doesn't have a # custom init, so just disable it def __init__( self, base_lineno, tir_namespace, closure_vars ): # pylint: disable=super-init-not-called self.context = None self.base_lineno = base_lineno self.current_lineno = 0 self.current_col_offset = 0 self.tir_namespace = tir_namespace self.closure_vars = closure_vars self.meta = None self._inside_buffer_sugar = False def init_function_parsing_env(self): """Initialize function parsing environment""" self.context = ContextMaintainer(self.report_error, self.closure_vars) # scope emitter def init_meta(self, meta_dict): if meta_dict is not None: self.meta = tvm.ir.load_json(json.dumps(meta_dict)) def transform(self, node): """Generic transformation for visiting the AST. Dispatches to `transform_ClassName` for the appropriate ClassName.""" old_lineno, old_col_offset = self.current_lineno, self.current_col_offset if hasattr(node, "lineno"): self.current_lineno = self.base_lineno + node.lineno - 1 if hasattr(node, "col_offset"): self.current_col_offset = node.col_offset method = "transform_" + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) transform_res = visitor(node) self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return transform_res def match_tir_namespace(self, identifier: str) -> bool: """Check if the namespace is equal to tvm.script.tir""" return identifier in self.tir_namespace def report_error(self, message: str, span: Union[ast.Span, tvm.ir.Span]): """Report an error occuring at a location. This just dispatches to synr's DiagnosticContext. Parameters ---------- message : str Error message span : Union[synr.ast.Span, tvm.ir.Span] Location of the error """ if isinstance(span, tvm.ir.Span): span = synr_span_from_tvm(span) self.error(message, span) def parse_body(self, parent): """Parse remaining statements in this scope. Parameters ---------- parent : synr.ast.Node Parent node of this scope. Errors will be reported here. """ body = [] spans = [] stmt = parent while len(self.context.node_stack[-1]) > 0: stmt = self.context.node_stack[-1].pop() spans.append(stmt.span) res = self.transform(stmt) if res is not None: body.append(res) if len(body) == 0: self.report_error( "Expected another statement at the end of this block. Perhaps you " "used a concise statement and forgot to include a body afterwards.", stmt.span, ) else: return ( tvm.tir.SeqStmt(body, tvm_span_from_synr(ast.Span.union(spans))) if len(body) > 1 else body[0] ) def parse_arg_list(self, func, node_call): """Match the arguments of a function call in the AST to the required arguments of the function. This handles positional arguments, positional arguments specified by name, keyword arguments, and varargs. Parameters ---------- func : Function The function that provides the signature node_call: Union[ast.Call, ast.TypeApply, ast.TypeCall] The AST call node that calls into the function. Returns ------- arg_list : list The parsed positional argument. """ assert isinstance(node_call, (ast.Call, ast.TypeApply, ast.TypeCall)) # collect arguments args = [self.transform(arg) for arg in node_call.params] if isinstance(node_call, ast.TypeApply): kw_args = {} # TypeApply (e.g. foo[bar]) doesn't have kwargs defined in synr else: kw_args = { self.transform(k): self.transform(v) for k, v in node_call.keyword_params.items() } # get the name and parameter list of func if isinstance(func, (Intrin, ScopeHandler, SpecialStmt)): func_name, param_list = func.signature() else: self.report_error( "Internal Error: function must be of type Intrin, ScopeHandler or SpecialStmt, " f"but it is {type(func).__name__}", node_call.span, ) # check arguments and parameter list and get a list of arguments reader = CallArgumentReader(func_name, args, kw_args, self, node_call) pos_only, kwargs, varargs = param_list internal_args = list() for i, arg_name in enumerate(pos_only): internal_args.append(reader.get_pos_only_arg(i + 1, arg_name)) for i, arg_info in enumerate(kwargs): arg_name, default = arg_info internal_args.append(reader.get_kwarg(i + 1 + len(pos_only), arg_name, default=default)) if varargs is not None: internal_args.extend(reader.get_varargs(len(pos_only) + len(kwargs) + 1)) elif len(args) + len(kw_args) > len(pos_only) + len(kwargs): self.report_error( "Arguments mismatched. " + f"Expected {len(pos_only) + len(kwargs)} args but got " + f"{len(args) + len(kw_args)}", node_call.span, ) return internal_args def parse_type(self, type_node, parent): """Parse a type annotation. We require the parent object to the type so that we have a place to report the error message if the type does not exist. """ if type_node is None: self.report_error("A type annotation is required", parent.span) res_type = self.transform(type_node) return tvm.ir.TupleType([]) if res_type is None else res_type.evaluate() def generic_visit(self, node): """Fallback visitor if node type is not handled. Reports an error.""" self.report_error(type(node).__name__ + " AST node is not supported", node.span) def transform_Module(self, node): """Module visitor Right now, we only support two formats for TVM Script. Example ------- 1. Generate a PrimFunc (If the code is printed, then it may also contain metadata) .. code-block:: python import tvm @tvm.script def A(...): ... # returns a PrimFunc func = A 2. Generate an IRModule .. code-block:: python import tvm @tvm.script.ir_module class MyMod(): @T.prim_func def A(...): ... @T.prim_func def B(...): ... __tvm_meta__ = ... # returns an IRModule mod = MyMod """ if len(node.funcs) == 1: return self.transform(next(iter(node.funcs.values()))) elif len(node.funcs) == 0: self.report_error( "You must supply at least one class or function definition", node.span ) else: self.report_error( "Only one-function, one-class or function-with-meta source code is allowed", ast.Span.union([x.span for x in list(node.funcs.values())[1:]]), ) def transform_Class(self, node): """Class definition visitor. A class can have multiple function definitions and a single :code:`__tvm_meta__` statement. Each class corresponds to a single :code:`IRModule`. Example ------- .. code-block:: python @tvm.script.ir_module class MyClass: __tvm_meta__ = {} def A(): T.evaluate(0) """ if len(node.assignments) == 1: if not ( len(node.assignments[0].lhs) == 1 and isinstance(node.assignments[0].lhs[0], ast.Var) and node.assignments[0].lhs[0].id.name == "__tvm_meta__" ): self.report_error( "The only top level assignments allowed are `__tvm_meta__ = ...`", node.assignments[0].span, ) self.init_meta( MetaUnparser().do_transform(node.assignments[0].rhs, self._diagnostic_context) ) elif len(node.assignments) > 1: self.report_error( "Only a single top level `__tvm_meta__` is allowed", ast.Span.union([x.span for x in node.assignments[1:]]), ) return IRModule( {GlobalVar(name): self.transform(func) for name, func in node.funcs.items()} ) def transform_Function(self, node): """Function definition visitor. Each function definition is translated to a single :code:`PrimFunc`. There are a couple restrictions on TVM Script functions: 1. Function arguments must have their types specified. 2. The body of the function can contain :code:`func_attr` to specify attributes of the function (like it's name). 3. The body of the function can also contain multiple :code:`buffer_bind`s, which give shape and dtype information to arguments. 4. Return statements are implicit. Example ------- .. code-block:: python @T.prim_func def my_function(x: T.handle): # 1. Argument types T.func_attr({"global_symbol": "mmult"}) # 2. Function attributes X_1 = tir.buffer_bind(x, [1024, 1024]) # 3. Buffer binding T.evaluate(0) # 4. This function returns 0 """ def check_as_torch_decorator(decorator: Union[ast.Call, ast.Var]): if isinstance(decorator, ast.Call): if len(decorator.params) != 1: return False func_name = decorator.func_name else: func_name = decorator if isinstance(func_name, ast.Var): return func_name.id.name == "as_torch" def check_decorator(decorators: List[ast.Expr]) -> bool: """Check the decorator is `T.prim_func""" if len(decorators) > 2 or len(decorators) == 0: return False if len(decorators) == 2 and not check_as_torch_decorator(decorators[0]): return False d: ast.Expr = decorators[-1] return ( isinstance(d, ast.Attr) and isinstance(d.object, ast.Var) and self.match_tir_namespace(d.object.id.name) and d.field.name == "prim_func" ) self.init_function_parsing_env() self.context.enter_scope(nodes=node.body.stmts) # add parameters of function for arg in node.params: # Note that this case is for T.match_buffer syntax sugar if isinstance(arg.ty, (ast.TypeCall, ast.TypeApply)) and isinstance( self.transform(arg.ty.func_name), ty.GenericBufferType ): result = self.handle_match_buffer_type(arg.ty, arg.name) if not isinstance(result, buffer.Buffer): self.report_error( "The result type of evaluating TypeCall and TypeApply stmt" f" is wrong: {type(result)}. It should be a Buffer", node.span, ) arg_name_with_handle = arg.name + "_handle" arg_var = tvm.te.var(arg_name_with_handle, tvm.ir.PrimType("handle")) self.context.func_buffer_map[arg_var] = result self.context.update_symbol(arg.name, result, node) else: arg_var = tvm.te.var(arg.name, self.parse_type(arg.ty, arg)) self.context.update_symbol(arg.name, arg_var, node) self.context.func_params.append(arg_var) if not check_decorator(node.decorators): self.report_error( "All functions should be decorated by `T.prim_func`", node.span, ) # fetch the body of root block body = self.parse_body(node.body) # return a tir.PrimFunc dict_attr = self.context.func_dict_attr ret_type = self.parse_type(node.ret_type, node) if node.ret_type is not None else None func = tvm.tir.PrimFunc( self.context.func_params, body, ret_type, buffer_map=self.context.func_buffer_map, preflattened_buffer_map=self.context.func_preflattened_buffer_map, attrs=tvm.ir.make_node("DictAttrs", **dict_attr) if dict_attr else None, span=tvm_span_from_synr(node.span), ) # New Scope : Implicit root block # Each function contains an implicit root block in TensorIR, # so here we need a block scope for it. # If the PrimFunc is not a TensorIR func (e.g. TE scheduled func or low-level func), # the root block will not be added. The logic to add root block is in `_ffi_api.Complete` # Fix the PrimFunc # 1. generate root block if necessary # 2. generate surrounding loops for blocks if necessary func = call_with_error_reporting( self.report_error, node.span, _ffi_api.Complete, func, self.context.root_alloc_buffers, ) self.context.exit_scope() return func def transform_Lambda(self, node): """Lambda visitor Return an array of input parameters and the transformed lambda body. """ self.context.enter_scope(nodes=[node.body]) # add parameters of the lambda arg_vars = [] for arg in node.params: # Use "void" for dtype here. The actual type is not yet known and will be # determined later. Using void type will allow IRSubstitute to do the # replacement without flagging a type-mismatch error. arg_var = tvm.te.var(arg.name, dtype="") arg_vars.append(arg_var) self.context.update_symbol(arg.name, arg_var, node) # the body of a lambda must be an expr if not isinstance(node.body, ast.Expr): self.report_error("The body of a lambda must be an expression", node.span) # transform the body of the lambda body = self.transform(node.body) self.context.exit_scope() return arg_vars, body def transform_Assign(self, node): """Assign visitor AST abstract grammar: Assign(expr* targets, expr value, string? type_comment) By now 5 patterns of Assign is supported: 1. special stmts with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. (BufferStore) Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. (Store) Var[PrimExpr] = PrimExpr 4. with scope handlers with concise scoping and var def 4.1 var = T.allocate() 5. A call to a pure python function, consuming and producing TVMScript values. The outputs are inlined into the following body (no variable is created). x, y = f(...) """ if isinstance(node.rhs, ast.Call): # Pattern 1 & Pattern 4 if isinstance(node.rhs.func_name, ast.Op): func = None else: func = self.transform(node.rhs.func_name) if isinstance(func, WithScopeHandler): if not func.concise_scope or not func.def_symbol: self.report_error( "with scope handler " + func.signature()[0] + " is not suitable here", node.rhs.span, ) # Pattern 4 arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) elif isinstance(func, SpecialStmt): # Pattern 1 arg_list = self.parse_arg_list(func, node.rhs) func.handle(node, self.context, arg_list, node.rhs.func_name.span) return self.parse_body(node) elif isinstance(func, types.FunctionType): # Pattern 5 args = [self.transform(arg) for arg in node.rhs.params] try: out = func(*args) except Exception as e: self.report_error( "Error occurred when invoking the function " + func.__name__ + ": \n" + str(e), node.rhs.span, ) if len(node.lhs) == 1 and not isinstance(out, list): out = [out] assert len(out) == len(node.lhs) for var, value in zip(node.lhs, out): self.context.update_symbol(var.id.name, value, node) body = self.parse_body(node) for var, value in zip(node.lhs, out): self.context.remove_symbol(var.id.name) return body if isinstance(node.rhs, (ast.Call, ast.Constant)): # Pattern 4 of let binding value = self.transform(node.rhs) if len(node.lhs) == 1 and not isinstance(node.lhs[0], ast.Var): # This is a little confusing because it only is true when # we have taken this branch. We might need to clarify what # exectly is allowed in Assignments in tvmscript. self.report_error( "Left hand side of assignment must be an unqualified variable", node.span, ) ast_var = node.lhs[0] if node.ty is None and hasattr(value, "dtype"): var_ty = value.dtype else: var_ty = self.parse_type(node.ty, ast_var) var = tvm.te.var( ast_var.id.name, var_ty, span=tvm_span_from_synr(ast_var.span), ) self.context.update_symbol(var.name, var, node) body = self.parse_body(node) self.context.remove_symbol(var.name) return tvm.tir.LetStmt(var, value, body, span=tvm_span_from_synr(node.span)) self.report_error( """Assignments should be one of: 1. A "special statement" with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. A store into a buffer: Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. A store into a variable: Var[PrimExpr] = PrimExpr 4. A with scope handler with concise scoping and var def 4.1 var = T.allocate() 5. The right-hand side being a call to a pure python function, consuming and producing TVMScript values. x, y = f(...)""", node.span, ) def transform_SubscriptAssign(self, node): """Visitor for statements of the form :code:`x[1] = 2`.""" symbol = self.transform(node.params[0]) indexes = self.transform(node.params[1]) rhs = self.transform(node.params[2]) rhs_span = tvm_span_from_synr(node.params[2].span) if isinstance(symbol, tvm.tir.Buffer): if len(indexes) != len(symbol.shape): self.report_error( f"Buffer {symbol.name} is {len(symbol.shape)}-dimensional, " f"cannot be indexed by {len(indexes)}-dimensional indices.", node.params[1].span, ) def __convert_index(x): if isinstance(x, Slice): return x.as_index_expr(self.report_error) return x # BufferStore indexes = [__convert_index(x) for x in indexes] return tvm.tir.BufferStore( symbol, tvm.runtime.convert(rhs, span=rhs_span), indexes, span=tvm_span_from_synr(node.span), ) else: if symbol.dtype == "handle" and len(indexes) != 1: self.report_error( "Handles only support one-dimensional indexing. Use `T.match_buffer` to " "construct a multidimensional buffer from a handle.", node.params[0].span, ) if len(indexes) != 1: self.report_error( f"Store is only allowed with one index, but {len(indexes)} were provided.", node.params[1].span, ) self.report_error( "Use of tir.Store has been deprecated in favor of tir.BufferStore.", node.span ) def transform_AttrAssign(self, node): """Visitor for statements of the form :code:`x.y = 2`.""" obj = self.transform(node.params[0]) field = node.params[1] value = self.transform(node.params[2]) if not hasattr(obj, field.name): self.error(f"Field {field.name} does not exist", field.span) var = getattr(obj, field.name) if not isinstance(var, tvm.tir.Var): self.error( f"Can only assign to tir.Var attributes, not {type(var).__name__}", node.span ) body = self.parse_body(node) return tvm.tir.LetStmt(var, value, body, span=tvm_span_from_synr(node.span)) def transform_Assert(self, node): """Assert visitor Pattern corresponds to concise mode of :code:`with T.Assert()`. """ condition = self.transform(node.condition) if node.msg is None: self.report_error("Assert statements must have an error message.", node.span) message = self.transform(node.msg) body = self.parse_body(node) return tvm.tir.AssertStmt( condition, tvm.runtime.convert(message), body, span=tvm_span_from_synr(node.span) ) def transform_For(self, node): """For visitor AST abstract grammar: For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment) By now 1 pattern of For is supported: 1. for scope handler for name in T.serial()/T.parallel()/T.vectorized()/T.unroll()/range()/ T.grid()/T.thread_binding() """ if not isinstance(node.rhs, ast.Call): self.report_error("The loop iterator should be a function call.", node.rhs.span) func = self.transform(node.rhs.func_name) if not isinstance(func, ForScopeHandler): self.report_error( "Only For scope handlers can be used in a for statement.", node.rhs.func_name.span ) # prepare for new for scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.span.start_line self.current_col_offset = node.span.start_column self.context.enter_scope(nodes=node.body.stmts) # for scope handler process the scope arg_list = [ tvm.runtime.convert(arg, span=tvm_span_from_synr(node.rhs.span)) for arg in self.parse_arg_list(func, node.rhs) ] func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_While(self, node): """While visitor AST abstract grammar: While(expr condition, stmt* body) """ condition = self.transform(node.condition) # body self.context.enter_scope(nodes=node.body.stmts) body = self.parse_body(node) self.context.exit_scope() return tvm.tir.While(condition, body, span=tvm_span_from_synr(node.span)) def transform_With(self, node): """With visitor AST abstract grammar: With(withitem* items, stmt* body, string? type_comment) withitem = (expr context_expr, expr? optional_vars) By now 2 patterns of With is supported: 1. with scope handler with symbol def with T.allocate() as targets: 2. with scope handler without symbol def with T.block(*axes)/T.let()/T.Assert()/T.attr()/T.realize() """ if not isinstance(node.rhs, ast.Call): self.report_error( "The context expression of a `with` statement should be a function call.", node.rhs.span, ) func = self.transform(node.rhs.func_name) if not isinstance(func, WithScopeHandler): self.report_error( f"Function {func} cannot be used in a `with` statement.", node.rhs.func_name.span ) # prepare for new block scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.body.span.start_line self.current_col_offset = node.body.span.start_column self.context.enter_block_scope(nodes=node.body.stmts) # with scope handler process the scope arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_block_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_If(self, node): """If visitor AST abstract grammar: If(expr test, stmt* body, stmt* orelse) """ condition = self.transform(node.condition) # then body self.context.enter_scope(nodes=node.true.stmts) then_body = self.parse_body(node) self.context.exit_scope() # else body if len(node.false.stmts) > 0: self.context.enter_scope(nodes=node.false.stmts) else_body = self.parse_body(node) self.context.exit_scope() else: else_body = None return tvm.tir.IfThenElse( condition, then_body, else_body, span=tvm_span_from_synr(node.span) ) def transform_Call(self, node): """Call visitor 3 different Call patterns are allowed: 1. Intrin representing a PrimExpr/IterVar 1.1 tir.int/uint/float8/16/32/64/floormod/floordiv/load/cast/ramp/broadcast/max 1.2 tir.range/reduce_axis/scan_axis/opaque_axis 2. tir.Op(dtype, ...) 3. other callable functions """ if isinstance(node.func_name, ast.Op): if node.func_name.name == ast.BuiltinOp.Subscript: return self.transform_Subscript(node) if node.func_name.name in self._binop_maker: lhs = self.transform(node.params[0]) # There is no supertype for everything that can appear in # an expression, so we manually add what we might get here. if not isinstance(lhs, (tvm.tir.PrimExpr, BufferSlice)): # We would really like to report a more specific # error here, but this parser contains no distinction # between parsing statements and parsing expressions. All # rules just call `transform`. self.report_error( f"Left hand side of binary op must be a PrimExpr, " "but it is a {type(lhs).__name__}", node.params[0].span, ) rhs = self.transform(node.params[1]) if not isinstance(rhs, (tvm.tir.PrimExpr, BufferSlice)): self.report_error( f"Right hand side of binary op must be a PrimExpr, " "but it is a {type(rhs).__name__}", node.params[1].span, ) return call_with_error_reporting( self.report_error, node.span, lambda node, lhs, rhs, span: self._binop_maker[node.func_name.name]( lhs, rhs, span=span ), node, lhs, rhs, tvm_span_from_synr(node.span), ) if node.func_name.name in self._unaryop_maker: rhs = self.transform(node.params[0]) if node.func_name.name == ast.BuiltinOp.USub and isinstance( node.params[0], ast.Constant ): # '-literal' should be parsed together for proper literal type inference if not isinstance(rhs, (tvm.tir.IntImm, tvm.tir.FloatImm)): self.report_error("The literal is illegal after -", node.params[0].span) return tvm.tir.const(-rhs.value) return self._unaryop_maker[node.func_name.name]( rhs, span=tvm_span_from_synr(node.span) ) self.report_error(f"Unsupported operator {node.func_name.name}.", node.func_name.span) else: func = self.transform(node.func_name) if isinstance(func, Intrin) and not func.stmt: # pattern 1 arg_list = self.parse_arg_list(func, node) return call_with_error_reporting( self.report_error, node.func_name.span, func.handle, arg_list, node.func_name.span, ) else: args = [self.transform(arg) for arg in node.params] kw_args = { self.transform(k): self.transform(v) for k, v in node.keyword_params.items() } if isinstance(func, tvm.tir.op.Op): if not "dtype" in kw_args.keys(): self.report_error(f"{func} requires a dtype keyword argument.", node.span) # pattern 2 return tvm.tir.Call( kw_args["dtype"], func, args, span=tvm_span_from_synr(node.span) ) elif callable(func): # pattern 3 return func(*args, **kw_args) else: self.report_error( f"Function is neither callable nor a tvm.tir.op.Op (it is a {type(func)}).", node.func_name.span, ) def transform_UnassignedCall(self, node): """Visitor for statements that are function calls. This handles function calls that appear on thier own line like `tir.realize`. Examples -------- .. code-block:: python @T.prim_func def f(): A = T.buffer_decl([10, 10]) T.realize(A[1:2, 1:2], "") # This is an UnassignedCall A[1, 1] = 2 # This is also an UnassignedCall """ # Only allowed builtin operator that can be a statement is x[1] = 3 i.e. subscript assign. if isinstance(node.call.func_name, ast.Op): if node.call.func_name.name == ast.BuiltinOp.SubscriptAssign: return self.transform_SubscriptAssign(node.call) if node.call.func_name.name == ast.BuiltinOp.AttrAssign: return self.transform_AttrAssign(node.call) self.report_error( "Binary and unary operators are not allowed as a statement", node.span ) # handle a regular function call func = self.transform(node.call.func_name) arg_list = self.parse_arg_list(func, node.call) if isinstance(func, tir.scope_handler.AssertHandler): self.report_error( "A standalone `T.Assert` is not allowed. Use `assert condition, message` " "instead.", node.call.func_name.span, ) if isinstance(func, Intrin): if func.stmt: return call_with_error_reporting( self.report_error, node.call.func_name.span, func.handle, arg_list, node.call.func_name.span, ) else: self.report_error(f"This intrinsic cannot be used as a statement.", node.call.span) elif isinstance(func, WithScopeHandler) and func.concise_scope and not func.def_symbol: func.enter_scope(node, self.context, arg_list, node.call.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.call.func_name.span) elif isinstance(func, SpecialStmt) and not func.def_symbol: func.handle(node, self.context, arg_list, node.call.func_name.span) return self.report_error( "Unexpected statement. Expected an assert, an intrinsic, a with statement, or a " f"special statement, but got {type(func).__name__}.", node.call.func_name.span, ) def transform_Slice(self, node): """Index slice visitor.""" start = self.transform(node.start) end = self.transform(node.end) if not ( isinstance(node.step, ast.Constant) and isinstance(node.step.value, int) and node.step.value > 0 ): self.report_error( "Only positive integer step size is supported for slices.", node.step.span ) return Slice(start, end, node.step.value, tvm_span_from_synr(node.span)) def transform_Subscript(self, node): """Array access visitor. By now only 3 types of Subscript are supported: 1. Buffer[index, index, ...], Buffer element access(BufferLoad & BufferStore) Var[index] Buffer element access() 2. Buffer[start: stop, start: stop, ...], BufferRealize(realize(buffer[...])) 3. Array[index], Buffer element access """ symbol = self.transform(node.params[0]) if symbol is None: self.report_error( f"Variable {node.params[0].id.name} is not defined.", node.params[0].span ) indexes = [self.transform(x) for x in node.params[1].values] if isinstance(symbol, tvm.tir.expr.Var): if symbol.dtype == "handle": self.report_error( "Cannot read directly from a handle, use `T.match_buffer` " "to create a buffer to read from.", node.params[0].span, ) if len(indexes) > 1: self.report_error( "Only a single index can be provided when indexing into a `var`.", node.params[1].span, ) index = indexes[0] if not isinstance(index, (tvm.tir.PrimExpr, int)): self.report_error( "Var load index should be an int or PrimExpr, but it is a" + type(index), node.span, ) self.report_error( "Use of tir.Load has been deprecated in favor of tir.BufferLoad", node.span ) elif isinstance(symbol, tvm.tir.Buffer): return BufferSlice( symbol, indexes, self.report_error, span=tvm_span_from_synr(node.span) ) elif isinstance(symbol, tvm.container.Array): if len(indexes) > 1: self.report_error( "Array access should be one-dimension access, but the indices are " + str(indexes), node.span, ) index = indexes[0] if not isinstance(index, (int, tvm.tir.expr.IntImm)): self.report_error( "Array access index expected int or IntImm, but got " + type(index), node.span, ) if int(index) >= len(symbol): self.report_error( f"Array access out of bound, size: {len(symbol)}, got index {index}.", node.span, ) return symbol[int(index)] else: self.report_error( f"Cannot subscript from a {type(symbol).__name__}. Only variables and " "buffers are supported.", node.params[0].span, ) def transform_Attr(self, node): """Visitor for field access of the form `x.y`. This visitor is used to lookup function and symbol names. We have two cases to handle here: 1. If we have a statement of the form `tir.something`, then we lookup `tir.something` in the `Registry`. If the function is not in the registry, then we try to find a `tvm.ir.op.Op` with the same name. 2. All other names `tvm.something` are lookup up in this current python namespace. """ def get_full_attr_name(node: ast.Attr) -> str: reverse_field_names = [node.field.name] while isinstance(node.object, ast.Attr): node = node.object reverse_field_names.append(node.field.name) if isinstance(node.object, ast.Var): reverse_field_names.append(node.object.id.name) return ".".join(reversed(reverse_field_names)) if isinstance(node.object, (ast.Var, ast.Attr)): full_attr_name = get_full_attr_name(node) attr_object, fields = full_attr_name.split(".", maxsplit=1) if self.match_tir_namespace(attr_object): func_name = "tir." + fields res = Registry.lookup(func_name) if res is not None: return res try: return tvm.ir.op.Op.get(func_name) except TVMError as e: # Check if we got an attribute error if e.args[0].find("AttributeError"): self.report_error(f"Unregistered function `tir.{fields}`.", node.span) else: raise e symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression.", node.object.span) if not hasattr(symbol, node.field.name): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field.name}`.", node.span ) res = getattr(symbol, node.field.name) return res def transform_TypeAttr(self, node): """Visitor for field access of the form `x.y` for types. We have two cases here: 1. If the type is of the form `T.something`, we look up the type in the `tir` namespace in this module. 2. If the type is of the form `tvm.x.something` then we look up `tvm.x.something` in this modules namespace. """ if isinstance(node.object, ast.TypeVar): if self.match_tir_namespace(node.object.id.name): if not hasattr(tir, node.field.name): self.report_error( f"Invalid type annotation `tir.{node.field.name}`.", node.span ) return getattr(tir, node.field.name) symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression", node.object.span) if not hasattr(symbol, node.field): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field}`.", node.span ) res = getattr(symbol, node.field) return res def transform_DictLiteral(self, node): """Dictionary literal visitor. Handles dictionary literals of the form `{x:y, z:2}`. """ keys = [self.transform(key) for key in node.keys] values = [self.transform(value) for value in node.values] return dict(zip(keys, values)) def transform_Tuple(self, node): """Tuple visitor. Handles tuples of the form `(x, y, 2)`. """ return tuple(self.transform(element) for element in node.values) def transform_ArrayLiteral(self, node): """List literal visitor. Handles lists of the form `[x, 2, 3]`. """ return [self.transform(element) for element in node.values] def transform_Var(self, node): """Variable visitor Handles variables like `x` in `x = 2`. """ name = node.id.name if name == "meta": return self.meta symbol = Registry.lookup(name) if symbol is not None: return symbol symbol = self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_TypeVar(self, node): """Type variable visitor. Equivalent to `transform_Var` but for types. """ name = node.id.name symbol = Registry.lookup(name) or self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_Constant(self, node): """Constant value visitor. Constant values include `None`, `"strings"`, `2` (integers), `4.2` (floats), and `true` (booleans). """ return tvm.runtime.convert(node.value, span=tvm_span_from_synr(node.span)) def transform_TypeConstant(self, node): """Constant value visitor for types. See `transform_Constant`. """ if self._inside_buffer_sugar: return self.transform_Constant(node) return node.value def transform_TypeTuple(self, node): """Tuple value visitor for types. Mostly used in `transform_TypeCall` and `transform_TypeApply`. """ return [self.transform(value) for value in node.values] def transform_TypeCall(self, node): """TypeCall visitor This occurs when an expression is used inside a T.Buffer parameter annotation. """ # ast.Call has the BuiltinOp as node.func_name.name, where # ast.TypeCall has the BuiltinOp as node.func_name. So we can # delegate to self.transform_Call, but the error messages for # unsupported operations will highlight the entire expression # and not just the function itself. op = ast.Op(node.span, node.func_name) call = ast.Call(node.span, op, node.params, node.keyword_params) return self.transform_Call(call) def transform_TypeApply(self, node): """Visitor for Type[Type] expressions. Mostly used for ``T.Ptr`` expressions. """ func = self.transform(node.func_name) if not isinstance(func, ty.TypeGeneric) or not hasattr(func, "__getitem__"): self.report_error( f"Use of type arguments requires a type that accepts type arguments (e.g. T.Ptr), " f"but found {type(func).__name__} instead.", node.span, ) param_types = [] for idx, param in enumerate(node.params): param_type = self.transform(param) if not isinstance(param_type, ty.TypeGeneric) and func.require_type_generic_at(idx): self.report_error( f"Expected a type but found {type(param).__name__} " f"at {idx}th type argument", param.span, ) param_types.append(param_type) if len(param_types) == 1: return func[param_types[0]] else: return func[param_types] def handle_match_buffer_type(self, node, buffer_name): """special function to handle syntax sugar for match buffer. This method is for buffer declarations in the function parameters. """ func = self.transform(node.func_name) assert isinstance(func, SpecialStmt) # parse args and kwargs for TypeCall and TypeApply self._inside_buffer_sugar = True try: arg_list = self.parse_arg_list(func, node) finally: self._inside_buffer_sugar = False # Note that the third element in arg_list would always be the 'name' # TODO: This index is hardcoded as a workaround. Better to make it programmatic if arg_list[2] is None: arg_list[2] = buffer_name buf = func.handle(node, self.context, arg_list, node.func_name.span) return buf def transform_Return(self, node): self.report_error( "TVM script does not support return statements. Instead the last statement in any " "block is implicitly returned.", node.span, ) def get_tir_namespace(script: Union[Callable, type]) -> List[str]: assert inspect.isfunction(script) or inspect.isclass(script) env: Dict[str, Any] = script.__globals__ return [key for key in env.keys() if env[key] == tir] def from_source( input_func: Union[str, Callable], tir_prefix: Optional[List[str]] = None ) -> Union[PrimFunc, IRModule]: """Parse function or string into PrimFunc or IRModule. If possible, pass the TVM script in as a function so that line numbers and filename will be accurate. Parameters ---------- input_module : Union[str, Callable] The python function to be parsed. tir_prefix : Optional[List[str]] The tir prefix list. Only works for str input, default by "tir" and "T". Returns ------- output : Union[Function, Module] The Function or Module in IR. """ if isinstance(input_func, str): tir_prefix = ["T", "tir"] if tir_prefix is None else tir_prefix return to_ast(input_func, TVMDiagnosticCtx(), TVMScriptParser(0, tir_prefix, {})) elif inspect.isfunction(input_func): _, start_line = inspect.getsourcelines(input_func) env: Dict[str, Any] = input_func.__globals__ namespace = [key for key in env.keys() if env[key] is tir] _closure_vars = inspect.getclosurevars(input_func) closure_vars = {**_closure_vars.nonlocals, **_closure_vars.globals} parser = TVMScriptParser(start_line, namespace, closure_vars) result = to_ast(input_func, TVMDiagnosticCtx(), parser) return result else: raise TypeError("Only function definitions are supported.") def ir_module(input_module: type) -> IRModule: """Decorate a python class as tvm IRModule. Parameters ---------- input_module : type The python class to be parsed. Returns ------- output : IRModule The result IRModule. """ if inspect.isclass(input_module): func_dict = { name: f for name, f in input_module.__dict__.items() if isinstance(f, BaseFunc) } return IRModule(func_dict) raise TypeError("Only class definitions are supported.")
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/registry.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Function Registry """ # pylint: disable=inconsistent-return-statements, relative-beyond-top-level, import-outside-toplevel import types from typing import Union, Callable, Dict, Optional, Any class Registry(object): """Registration map All these maps are static """ registrations: Dict[str, type] = dict() @staticmethod def lookup(name: str) -> Optional[Any]: if name in Registry.registrations: # every time we create a new handler # since we may want to keep some local info inside it return Registry.registrations[name]() return None def register(inputs: Union[Callable, type]) -> type: """Register Intrin/ScopeHandler/SpecialStmt""" registration: type if isinstance(inputs, types.FunctionType): # is function from .tir.intrin import Intrin def create_new_intrin(func) -> type: class NewIntrin(Intrin): def __init__(self): super().__init__(func) return NewIntrin registration = create_new_intrin(inputs) elif isinstance(inputs, type): # is class registration = inputs else: raise ValueError() key: str = registration().signature()[0] Registry.registrations[key] = registration return registration
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVMScript for TIR""" # Type system from .ty import void, boolean, handle, Ptr, Tuple, Buffer from .ty import bool # pylint: disable=redefined-builtin from .prim_func import prim_func # add all floating point and integer datatypes to the module for _dtype in ["float", "uint", "int"]: for _size in ["8", "16", "32", "64"]: for _lanes in ["", "x4", "x8", "x16", "x32", "x64"]: from . import ty _name = _dtype + _size + _lanes if hasattr(ty, _name): globals()[_name] = getattr(ty, _name)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/intrin.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Intrinsic Classes""" # pylint: disable=redefined-builtin, relative-beyond-top-level import builtins from typing import Any, List import tvm.tir from tvm.tir import FloatImm from ....target import codegen from ..registry import register from ..utils import get_param_list, tvm_span_from_synr class Intrin: def __init__(self, intrin, stmt=False): self.intrin = intrin self.stmt = stmt def signature(self): return "tir." + self.intrin.__name__, get_param_list(self.intrin) def handle(self, arg_list: List[Any], span: tvm.ir.Span): return self.intrin(*arg_list, span=tvm_span_from_synr(span)) @register def bool(imm, span): return imm.astype("bool", span) # register all datatypes for _dtype in ["float", "uint", "int"]: for _size in ["8", "16", "32", "64"]: for _lanes in ["", "x4", "x8", "x16", "x32"]: _name = _dtype + _size + _lanes # nest closures so we copy the name string def wrap(name): def f(imm, span): if name.startswith("float"): if imm in {"inf", "-inf", "nan"}: return FloatImm(dtype=name, value=float(imm), span=span) return imm.astype(name, span) f.__name__ = name return f _intrin = wrap(_name) register(_intrin) @register def min_value(dtype, span): return tvm.tir.min_value(dtype, span) @register def max_value(dtype, span): return tvm.tir.max_value(dtype, span) @register def floordiv(x, y, span): return tvm.tir.floordiv(x, y, span) @register def floormod(x, y, span): return tvm.tir.floormod(x, y, span) @register def truncmod(x, y, span): return tvm.tir.truncmod(x, y, span) @register def truncdiv(x, y, span): return tvm.tir.truncdiv(x, y, span) @register def ceildiv(x, y, span): return tvm.tir.ceildiv(x, y, span) @register def abs(x, span): return tvm.tir.abs(x, span) @register def load(dtype, var, index, predicate=None, span=None): return tvm.tir.Load(dtype, var, index, predicate, span) @register def cast(value, dtype, span): return tvm.tir.Cast(dtype, value, span) @register def ramp(base, stride, lanes, span): return tvm.tir.Ramp(base, stride, lanes.value, span) @register def broadcast(value, lanes, span): return tvm.tir.Broadcast(value, lanes.value, span) @register def iter_var(var, dom, iter_type, thread_tag, span): iter_type = getattr(tvm.tir.IterVar, iter_type) return tvm.tir.IterVar(dom, var, iter_type, thread_tag, span) @register def max(a, b, span): # pylint: disable=redefined-builtin return tvm.tir.Max(a, b, span) @register def min(a, b, span): # pylint: disable=redefined-builtin return tvm.tir.Min(a, b, span) def get_axis(begin, end, iter_type, span): ana = tvm.arith.Analyzer() extent = ana.simplify(end - begin) block_var_dom = tvm.ir.Range.from_min_extent(begin, extent) iter_type_dict = {"data_par": 0, "reduce": 2, "scan": 3, "opaque": 4} return tvm.tir.IterVar(block_var_dom, "bv", iter_type_dict[iter_type], span=span) @register def range(begin, end, span): return get_axis(begin, end, "data_par", span) @register def reduce_axis(begin, end, span): return get_axis(begin, end, "reduce", span) @register def scan_axis(begin, end, span): return get_axis(begin, end, "scan", span) @register def opaque_axis(begin, end, span): return get_axis(begin, end, "opaque", span) @register def Select(cond, if_body, else_body, span): # pylint: disable=invalid-name return tvm.tir.Select(cond, if_body, else_body, span) @register def Let(var, value, body, span): # pylint: disable=invalid-name return tvm.tir.Let(var, value, body, span) @register class EvaluateIntrin(Intrin): def __init__(self): def evaluate(value, span): return tvm.tir.Evaluate(value, span) super().__init__(evaluate, stmt=True) @register class StoreIntrin(Intrin): def __init__(self): def store(var, index, value, predicate=True, span=None): return tvm.tir.Store(var, value, index, predicate, span) super().__init__(store, stmt=True) @register class AssumeIntrin(Intrin): def __init__(self): def assume(constraint, span): return tvm.tir.Evaluate( tvm.tir.call_intrin("bool", "tir.assume", constraint, span=span) ) super().__init__(assume, stmt=True) @register def comm_reducer(lambda_io, identities, span): """Create a CommReducer from lambda inputs/outputs and the identities""" lambda_input = lambda_io[0] lambda_output = lambda_io[1] num_args = len(lambda_input) num_arg_per_group = num_args // 2 x = [lambda_input[i] for i in builtins.range(0, num_arg_per_group)] y = [lambda_input[i] for i in builtins.range(num_arg_per_group, num_args)] if not isinstance(lambda_output, tuple): lambda_output = (lambda_output,) return tvm.tir.CommReducer(x, y, lambda_output, identities, span) @register def llvm_lookup_intrinsic_id(name, span): # pylint: disable=unused-argument return codegen.llvm_lookup_intrinsic_id(name) @register def FloorMod(x, y, span): # pylint: disable=invalid-name return tvm.tir.FloorMod(x, y, span) @register def FloorDiv(x, y, span): # pylint: disable=invalid-name return tvm.tir.FloorDiv(x, y, span) @register def Mul(x, y, span): # pylint: disable=invalid-name return tvm.tir.Mul(x, y, span) @register def Div(x, y, span): # pylint: disable=invalid-name return tvm.tir.Div(x, y, span) @register def Add(x, y, span): # pylint: disable=invalid-name return tvm.tir.Add(x, y, span) @register def Sub(x, y, span): # pylint: disable=invalid-name return tvm.tir.Sub(x, y, span) @register def LT(x, y, span): # pylint: disable=invalid-name return tvm.tir.LT(x, y, span) @register def LE(x, y, span): # pylint: disable=invalid-name return tvm.tir.LE(x, y, span) @register def GT(x, y, span): # pylint: disable=invalid-name return tvm.tir.GT(x, y, span) @register def GE(x, y, span): # pylint: disable=invalid-name return tvm.tir.GE(x, y, span) @register def EQ(x, y, span): # pylint: disable=invalid-name return tvm.tir.EQ(x, y, span) @register def NE(x, y, span): # pylint: disable=invalid-name return tvm.tir.NE(x, y, span) @register def And(x, y, span): # pylint: disable=invalid-name return tvm.tir.And(x, y, span) @register def Or(x, y, span): # pylint: disable=invalid-name return tvm.tir.Or(x, y, span) @register def Cast(dtype, value, span): # pylint: disable=invalid-name return tvm.tir.Cast(dtype, value, span)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/node.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=redefined-builtin """TVM Script nodes.""" from typing import Optional, Union, List, Callable import synr from tvm.arith import Analyzer from tvm.runtime import ObjectGeneric, convert from tvm.tir import PrimExpr, Buffer, BufferLoad, IntImm, Ramp, BufferRegion from tvm.ir import Span, Range class Slice: """A helper class to present slice information for BufferSlice Parameters ---------- start : Union[PrimExpr, int] The start index. stop : Optional[Union[PrimExpr, int]] The stop index, None means the Slice is an element-wise index step : int The slice step span : Optional[Span] The location of the slice in the source. """ start: Union[PrimExpr, int] stop: Optional[Union[PrimExpr, int]] step: int span: Optional[Span] def __init__( self, start: Union[PrimExpr, int], stop: Optional[Union[PrimExpr, int]] = None, step: int = 1, span: Optional[Span] = None, ): self.start = start self.stop = stop self.step = step self.span = span def as_index_expr(self, report_error: Callable[[str, Union[Span, synr.ast.Span]], None]): """Helper to create index PrimExpr from slice object Parameters ---------- report_error: Callable[[str, Union[Span, synr.ast.Span]], None] The error report func """ if self.stop is None: # scalar index return self.start if self.step < 1: report_error("Slice's step should be positive integer", self.span) lanes = Analyzer().simplify((self.stop - self.start + self.step - 1) // self.step) if not isinstance(lanes, (int, IntImm)): report_error("Slice's lanes should be constant for buffer indices", self.span) if lanes == 1: return self.start return Ramp(self.start, self.step, int(lanes), self.span) class BufferSlice(ObjectGeneric): """A generic object for representing general buffer access. Following cases are supported: - element wise access buffer[i, j], which can be converted to BufferLoad if necessary - slice access buffer[i: i + 1, j : j + 2] - union of element and slice buffer[i, j: j + 2] This node is used in TVMScript to parse BufferLoad, BufferRegion and Realize Parameters ---------- buffer : Buffer The buffer. indices : List[Union[Slice, PrimExpr, int]] The access indexes can be slice, PrimExpr or int. report_error: Callable[[str, Union[Span, synr.ast.Span]], None] The error report func span : Optional[Span] The location of the buffer access in the source. """ buffer: Buffer slices: List[Slice] report_error: Callable[[str, Union[Span, synr.ast.Span]], None] span: Optional[Span] def __init__( self, buffer: Buffer, indices: List[Union[Slice, PrimExpr, int]], report_error: Callable[[str, Union[Span, synr.ast.Span]], None], span: Optional[Span] = None, ): def check_index(index: Union[int, PrimExpr]): """Check input index is non-negative integer or PrimExpr""" if isinstance(index, int): if index < 0: report_error("Negative index is not allowed during buffer access", span) elif isinstance(index, PrimExpr): element_dtype = index.dtype.split("x", maxsplit=1)[0] if element_dtype[:3] != "int": report_error( "index expected an integer type PrimExpr but got " + str(index.dtype), index.span, ) else: report_error( "Unsupported index type, expected int or tvm.tir.PrimExpr, but got " + str(type(index)), span, ) slices: List[Union[Slice, BufferSlice]] = [] for index in indices: if isinstance(index, Slice): index.start, index.stop = [convert(_) for _ in [index.start, index.stop]] check_index(index.start) check_index(index.stop) slices.append(index) elif isinstance(index, (PrimExpr, int)): check_index(index) slices.append(Slice(index)) elif isinstance(index, BufferSlice): buffer_load = index.asobject() check_index(buffer_load) slices.append(Slice(buffer_load)) else: report_error( "Unsupported index type for BufferSlice, " + "expected int, tvm.tir.PrimExpr, tvm.tir.Slice, but got " + str(type(index)), span, ) self.buffer = buffer self.slices = slices self.report_error = report_error self.span = span def __str__(self): regions: List[str] = [] for s in self.slices: if s.stop is None: regions.append(str(s.start)) else: regions.append(str(s.start) + ": " + str(s.stop)) return self.buffer.name + "[" + ", ".join(regions) + "]" def asobject(self) -> BufferLoad: """Convert object.""" indices = [s.as_index_expr(self.report_error) for s in self.slices] return BufferLoad(self.buffer, indices, span=self.span) def as_buffer_region(self, analyzer: Optional[Analyzer] = None) -> BufferRegion: """Construct BufferRegion from BufferSlice Parameters ---------- analyzer : Optional[tvm.arith.Analyzer] The analyzer for simplifying. If not provided, the method will construct a new one Returns ------- buffer_region : BufferRegion The constructed BufferRegion. """ region: List[Range] = [] for s in self.slices: start = s.start if isinstance(s.start, PrimExpr) else IntImm("int32", s.start) extent = IntImm(start.dtype, 1) if s.stop is None else s.stop - s.start if not analyzer: analyzer = Analyzer() if isinstance(extent, PrimExpr): extent = analyzer.simplify(extent) if s.step != 1: self.report_error("BufferRegion do not support non-trivial stride", s.span) region.append(Range.from_min_extent(start, extent, span=s.span)) return BufferRegion(self.buffer, region) def astype(self, dtype: str, span: Optional[Span] = None) -> PrimExpr: return self.asobject().astype(dtype, span) @property def dtype(self) -> str: """Return the dtype referenced by the slice. Implemented as a property so that ``slice.dtype`` has the same calling convention as ``primexpr.dtype``. This allows a BufferSlice object can be assigned to a variable without requiring a type annotation on the variable, similar to other expressions. """ return self.asobject().dtype
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/prim_func.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Interface for PrimFunc""" import inspect from typing import Callable from tvm.tir.function import PrimFunc from ..parser import from_source def prim_func(input_func: Callable) -> PrimFunc: """Decorate a python function as tvm script. Parameters ---------- func : input_func The function to be parsed. Returns ------- output : PrimFunc The result functions. """ if inspect.isfunction(input_func): result = from_source(input_func) result.__name__ = input_func.__name__ result.__qualname__ = input_func.__qualname__ return result raise TypeError("Only function definitions are supported.")
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/scope_handler.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Scope Handler Classes""" # pylint: disable=redefined-builtin, unused-argument, invalid-name, relative-beyond-top-level from typing import Tuple, Any, Callable, Optional, List, Union, Mapping import synr import numpy as np import tvm.tir from tvm.runtime import Object, String, convert from tvm.ir import Span, Range from tvm.tir import Stmt, PrimExpr, IterVar, Var, Buffer, BufferRegion, ForKind from .node import BufferSlice from ..context_maintainer import ContextMaintainer from ..registry import register from ..utils import ( get_param_list, tvm_span_from_synr, call_with_error_reporting, ) class ScopeHandler: """Base class for all scope handlers""" def __init__(self, func: Callable): self.func: Callable = func self.body: Optional[Stmt] = None self.node: Optional[synr.ast.Node] = None self.context: Optional[ContextMaintainer] = None def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir." + self.func.__name__, get_param_list(self.func) def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): pass def exit_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): self.node = node self.context = context return call_with_error_reporting( context.report_error, span, self.func, *arg_list, span=tvm_span_from_synr(span) ) class WithScopeHandler(ScopeHandler): """Base class for all with scope handlers""" def __init__(self, func, concise_scope, def_symbol): super().__init__(func) self.concise_scope = concise_scope self.def_symbol = def_symbol @staticmethod def get_optional_vars(node, context): """Get a list synr.ast.With's optional_vars""" assert isinstance( node, synr.ast.With ), f"WithScopeHandler expected synr.ast.With but got {type(node)}" if isinstance(node.lhs, list): for var in node.lhs: if not isinstance(var, synr.ast.Var): context.report_error( f"Invalid optional var definition, expected Var but got {type(var)}", node.span, ) vars = node.lhs else: context.report_error( f"Invalid optional var definition, expected list of Var but got {type(node.lhs)}", node.span, ) return vars @register class Allocate(WithScopeHandler): """With scope handler T.allocate(extents, dtype, scope, condition, annotations)""" def __init__(self): def allocate(extents, dtype, scope, condition=True, annotations=None, span=None): condition = tvm.runtime.convert(condition) scope = tvm.runtime.convert(scope) return tvm.tir.Allocate( self.buffer_var, dtype, extents, condition, self.body, annotations=annotations, span=span, ) super().__init__(allocate, concise_scope=True, def_symbol=True) self.buffer_var = None def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): # define buffer vars in symbol table if isinstance(node, synr.ast.With): vars = WithScopeHandler.get_optional_vars(node, context) if len(vars) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(vars)}", node.span) name = vars[0].id.name var_span = vars[0].id.span elif isinstance(node, synr.ast.Assign): if len(node.lhs) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(node.lhs)}", node.span) name = node.lhs[0].id.name var_span = node.lhs[0].id.span else: raise Exception("Internal Bug") def setup_buffer_var( extents, dtype, scope, condition=True, annotations=None, span: Span = None ): """Setup buffer var for a given type.""" buffer_ptr_type = tvm.ir.PointerType(tvm.ir.PrimType(dtype), scope) self.buffer_var = tvm.tir.Var(name, buffer_ptr_type, span) setup_buffer_var(*arg_list, span=tvm_span_from_synr(var_span)) context.update_symbol(name, self.buffer_var, node) @register class AllocateConst(WithScopeHandler): """With scope handler T.allocate_const(data, extents, dtype, condition) TIR constant node to represent non-scalar constant """ def __init__(self): def allocate_const(raw_data, dtype, shape, annotations=None, span=None): list_data = [] for i in raw_data: list_data.append(i.value) nd_data = tvm.nd.array(np.asarray(list_data, dtype=dtype)) n = tvm.tir.AllocateConst( self.buffer_var, dtype, shape, nd_data, self.body, annotations=annotations, span=span, ) return n super().__init__(allocate_const, concise_scope=True, def_symbol=True) self.buffer_var = None def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): # define buffer vars in symbol table if isinstance(node, synr.ast.With): vars = WithScopeHandler.get_optional_vars(node, context) if len(vars) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(vars)}", node.span) name = vars[0].id.name var_span = vars[0].id.span elif isinstance(node, synr.ast.Assign): if len(node.lhs) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(node.lhs)}", node.span) name = node.lhs[0].id.name var_span = node.lhs[0].id.span else: raise Exception("Internal Bug") def setup_buffer_var(data, dtype, shape, annotations: dict = None, span: Span = None): """Setup buffer var for a given type.""" buffer_ptr_type = tvm.ir.PointerType(tvm.ir.PrimType(dtype)) self.buffer_var = tvm.tir.Var(name, buffer_ptr_type, span) setup_buffer_var(*arg_list, span=tvm_span_from_synr(var_span)) context.update_symbol(name, self.buffer_var, node) @register class DeclBuffer(WithScopeHandler): """Special Stmt decl_buffer(shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators) Example ------- .. code-block:: python A = T.decl_buffer((128, 128), dtype="float32") """ def __init__(self): def decl_buffer( shape, dtype="float32", data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): decl_buffer = tvm.tir.DeclBuffer(self.buffer, self.body, span=span) if data is None: # when data is not specified, the buffer is implicitly allocated return tvm.tir.Allocate( self.buffer.data, dtype, shape, tvm.runtime.convert(True), decl_buffer, span=span, ) return decl_buffer super().__init__(decl_buffer, concise_scope=True, def_symbol=True) def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): # define buffer vars in symbol table if isinstance(node, synr.ast.With): vars = WithScopeHandler.get_optional_vars(node, context) if len(vars) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(vars)}", node.span) name = vars[0].id.name var_span = vars[0].id.span elif isinstance(node, synr.ast.Assign): if len(node.lhs) != 1: context.report_error(f"Unexpected number of vars: 1 vs. {len(node.lhs)}", node.span) name = node.lhs[0].id.name var_span = node.lhs[0].id.span else: raise Exception("Internal Bug") def setup_buffer( shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span: Span = None, ): self.buffer = tvm.tir.decl_buffer( shape=shape, dtype=dtype, data=data, strides=strides, elem_offset=elem_offset, scope=scope, data_alignment=align, offset_factor=offset_factor, buffer_type=buffer_type, axis_separators=axis_separators, name=name, span=span, ) setup_buffer(*arg_list, span=tvm_span_from_synr(var_span)) context.update_symbol(name, self.buffer, node) @register class LaunchThread(WithScopeHandler): """With scope handler T.launch_thread(env_var, extent)""" def __init__(self): def launch_thread(env_var, extent, span): extent = tvm.runtime.convert(extent, span=span) thread_id = self.context.func_var_env_dict[env_var] attr_key = "virtual_thread" if thread_id == "vthread" else "thread_extent" return tvm.tir.AttrStmt( IterVar( (0, extent), env_var, getattr(IterVar, "ThreadIndex"), thread_id, span=span, ), attr_key, extent, self.body, span=span, ) super().__init__(launch_thread, concise_scope=True, def_symbol=False) @register class Realize(WithScopeHandler): """With scope handler T.realize(buffer_bounds, scope, condition)""" def __init__(self): def realize( buffer_slice: BufferSlice, scope: str, condition: bool = True, span: bool = None ): assert self.context, "call 'exit_scope' before 'enter_scope'" buffer: Buffer = buffer_slice.buffer bounds: List[Range] = [] for s in buffer_slice.slices: min: Union[PrimExpr, int] = s.start extent: Union[PrimExpr, int] = 1 if s.stop is None else s.stop - s.start if isinstance(extent, PrimExpr): extent = self.context.analyzer.simplify(extent) bounds.append(Range.from_min_extent(min, extent, span=s.span)) scope = tvm.runtime.convert(scope, span=span) return tvm.tir.AttrStmt( buffer, "realize_scope", scope, tvm.tir.BufferRealize(buffer, bounds, condition, self.body, span=span), span=span, ) super().__init__(realize, concise_scope=True, def_symbol=False) @register class Attr(WithScopeHandler): """With scope handler T.attr(attr_node, attr_key, value)""" def __init__(self): def attr(attr_node, attr_key, value, span): attr_node = tvm.runtime.convert(attr_node, span=span) value = tvm.runtime.convert(value, span=span) return tvm.tir.AttrStmt(attr_node, attr_key, value, self.body, span=span) super().__init__(attr, concise_scope=True, def_symbol=False) @register class AssertHandler(WithScopeHandler): """With scope handler T.Assert(condition, message)""" def __init__(self): def Assert(condition, message, span): return tvm.tir.AssertStmt(condition, tvm.runtime.convert(message), self.body, span=span) super().__init__(Assert, concise_scope=True, def_symbol=False) @register class Let(WithScopeHandler): """With scope handler T.let(var, value)""" def __init__(self): def let(var, value, span): return tvm.tir.LetStmt(var, value, self.body, span=span) super().__init__(let, concise_scope=False, def_symbol=False) def __call__(self, var: tvm.tir.Var, value: tvm.tir.PrimExpr, body: tvm.tir.PrimExpr): return tvm.tir.Let(var, value, body) @register class Block(WithScopeHandler): """With scope handler T.block(name)""" def __init__(self): def block(name_hint: str = "", span: Optional[Span] = None): assert ( self.node and self.context and self.body ), "call 'exit_scope' before 'enter_scope'" block_info = self.context.block_info_stack[-1] # create block read/write regions reads: List[BufferRegion] = ( [read.as_buffer_region() for read in block_info.reads] if block_info.reads else [] ) writes: List[BufferRegion] = ( [write.as_buffer_region() for write in block_info.writes] if block_info.writes else [] ) region_detect_mask: int = (block_info.reads is None) | ( (block_info.writes is None) << 1 ) annotations = {} if block_info.annotations is None else block_info.annotations if region_detect_mask != 0: annotations["tir.script_parsing_detect_access"] = region_detect_mask inner = tvm.tir.Block( block_info.iter_vars, reads, writes, name_hint, self.body, block_info.init, block_info.alloc_buffers, block_info.match_buffers, annotations, span, ) assert len(block_info.iter_vars) == len(block_info.iter_values) predicate = ( tvm.tir.const(True, "bool") if block_info.predicate is None else block_info.predicate ) body = tvm.tir.BlockRealize(block_info.iter_values, predicate, inner, span) return body super().__init__(func=block, concise_scope=False, def_symbol=True) self.block_vars = None def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): # define block vars assert isinstance( node, synr.ast.With ), f"BlockScopeHandler expected to work on synr.ast.With but got {type(node)}" optional_vars = [var.id.name for var in WithScopeHandler.get_optional_vars(node, context)] if optional_vars: context.report_error( f"Block expected no optional_vars (e.g., `x` in `with block() as x`), " f"but got {optional_vars}", node.span, ) @register class InitBlock(WithScopeHandler): """With scope handler T.init()""" def __init__(self): def init(span: Span = None): assert self.context, "call 'exit_scope' before 'enter_scope'" if self.context.block_info_stack[-2].init is not None: self.context.report_error("Duplicate init block declaration", span) self.context.block_info_stack[-2].init = self.body super().__init__(func=init, concise_scope=False, def_symbol=True) class LoopInfo: """Helper class for loop information""" loop_var: Var begin: PrimExpr extent: PrimExpr kind: ForKind thread_binding: Optional[str] annotations: Optional[Mapping[str, Object]] def __init__( self, begin: PrimExpr, extent: PrimExpr, kind: ForKind, thread_binding: Optional[str] = None, annotations: Optional[Mapping[str, Object]] = None, ) -> None: self.begin = begin self.extent = extent self.kind = kind self.thread_binding = thread_binding self.annotations = annotations class ForScopeHandler(ScopeHandler): """Base class for all for scope handlers""" def __init__(self, func): super().__init__(func) self.loop_vars: List[Var] = [] self.loop_info: List[LoopInfo] = [] def enter_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): assert isinstance( node, synr.ast.For ), f"ForScopeHandler expected synr.ast.For but got {type(node)}" loop_var_names = list() spans = list() if isinstance(node.lhs, synr.ast.Var): loop_var_names.append(node.lhs.id.name) spans.append(tvm_span_from_synr(node.lhs.id.span)) elif isinstance(node.lhs, list): for elt in node.lhs: if not isinstance(elt, synr.ast.Var): context.report_error( f"Invalid loop var. Expected a var, but got {type(elt)}", elt.span ) loop_var_names.append(elt.id.name) spans.append(tvm_span_from_synr(elt.id.span)) else: context.report_error( f"Invalid loop var. Expected var or list of vars as lhs, but got {type(node.lhs)}", span, ) self.node = node self.context = context # collect loop infos by calling self.func call_with_error_reporting(context.report_error, span, self.func, *arg_list) if len(loop_var_names) != len(self.loop_info): self.context.report_error( f"Inconsistent number of vars and loops, got {len(loop_var_names)} " + f"vs {len(self.loop_info)}", self.node.span, ) # generate loop vars self.loop_vars = [] for name, lv_span, li in zip(loop_var_names, spans, self.loop_info): if not li.begin.dtype.startswith("int"): raise NotImplementedError(f"Unsupported dtype in loop begin: {li.begin.dtype}") if not li.extent.dtype.startswith("int"): raise NotImplementedError(f"Unsupported dtype in loop extent: {li.extent.dtype}") dtype = "int64" if "int64" in [li.begin.dtype, li.extent.dtype] else "int32" self.loop_vars.append(tvm.te.var(name, dtype=dtype, span=lv_span)) for loop_var, loop_info in zip(self.loop_vars, self.loop_info): context.update_symbol(loop_var.name, loop_var, node) context.loop_stack[loop_var] = Range.from_min_extent(loop_info.begin, loop_info.extent) def exit_scope( self, node: synr.ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): assert self.loop_vars, "call 'exit_scope' before 'enter_scope'" for loop_var in self.loop_vars: context.loop_stack.pop(loop_var) # Use assert here since we have check it in `enter_scope` assert len(self.loop_vars) == len(self.loop_info) body = self.body for var, info in zip(reversed(self.loop_vars), reversed(self.loop_info)): body = tvm.tir.For( var, info.begin, info.extent, info.kind, body, info.thread_binding, info.annotations, span=tvm_span_from_synr(span), ) return body def create_loop_info( self, begin: Optional[PrimExpr], end: PrimExpr, kind: ForKind, thread_binding: Optional[str] = None, annotations: Optional[Mapping[str, Object]] = None, ) -> None: """ Helper function for creating For in TVM Script parser. Parameters ---------- begin : Optional[PrimExpr] The beginning value. If None, it will be set to 0. end : PrimExpr The endding value. kind : ForKind The type of the for. thread_binding: Optional[str] The thread this loop binds to. annotations : Optional[Mapping[str, Object]] Additional annotation hints. span : Optional[Span] The location of this for in the source code. Returns ------- for : For The constructed For. """ end = convert(end) if begin is None: begin = tvm.tir.const(0, end.dtype) else: begin = convert(begin) assert self.context and self.node, "call 'exit_scope' before 'enter_scope'" extent = ( end if self.context.analyzer.can_prove_equal(begin, 0) else self.context.analyzer.simplify(end - begin) ) self.annotations: Mapping[str, Object] = {} if annotations is not None: self.annotations = { key: String(val) if isinstance(val, str) else val for key, val in annotations.items() } self.loop_info.append(LoopInfo(begin, extent, kind, thread_binding, annotations)) @register class Serial(ForScopeHandler): """For scope handler T.serial(begin, end, annotations)""" def __init__(self): def serial( begin: PrimExpr, end: PrimExpr = None, annotations: Optional[Mapping[str, Object]] = None, ): if end is None: end, begin = begin, end self.create_loop_info(begin, end, ForKind.SERIAL, annotations=annotations) super().__init__(serial) @register class Parallel(ForScopeHandler): """For scope handler T.parallel(begin, end, annotations)""" def __init__(self): def parallel( begin: PrimExpr, end: PrimExpr = None, annotations: Optional[Mapping[str, Object]] = None, ): if end is None: end, begin = begin, end self.create_loop_info(begin, end, ForKind.PARALLEL, annotations=annotations) super().__init__(parallel) @register class Vectorized(ForScopeHandler): """For scope handler T.vectorized(begin, end, annotations)""" def __init__(self): def vectorized( begin: PrimExpr, end: PrimExpr = None, annotations: Optional[Mapping[str, Object]] = None, ): if end is None: end, begin = begin, end self.create_loop_info(begin, end, ForKind.VECTORIZED, annotations=annotations) super().__init__(vectorized) @register class Unroll(ForScopeHandler): """For scope handler T.unroll(begin, end, annotations)""" def __init__(self): def unroll( begin: PrimExpr, end: PrimExpr = None, annotations: Optional[Mapping[str, Object]] = None, ): if end is None: end, begin = begin, end self.create_loop_info(begin, end, ForKind.UNROLLED, annotations=annotations) super().__init__(unroll) @register class ThreadBinding(ForScopeHandler): """For scope handler T.thread_binding(begin, end, thread, annotations)""" def __init__(self): def thread_binding( begin: PrimExpr, end: PrimExpr = None, thread: str = None, annotations: Optional[Mapping[str, Object]] = None, ): if thread is None: if isinstance(end, str): # handle case like thread_binding(128, "threadIdx.x") thread = end end = None else: raise ValueError("Thread cannot be None for thread_binding") if end is None: end, begin = begin, end thread_iter_var = IterVar(None, None, IterVar.ThreadIndex, thread) self.create_loop_info( begin, end, ForKind.THREAD_BINDING, thread_binding=thread_iter_var, annotations=annotations, ) super().__init__(thread_binding) @register class RangeHandler(ForScopeHandler): """For scope handler range(begin, end, annotations) Note that tir.range is totally the same as T.serial """ def __init__(self): def for_range( begin: PrimExpr, end: PrimExpr = None, annotations: Optional[Mapping[str, Object]] = None, ): if end is None: end, begin = begin, end self.create_loop_info(begin, end, ForKind.SERIAL, annotations=annotations) super().__init__(for_range) def signature(self): return "range", get_param_list(self.func) @register class Grid(ForScopeHandler): """For scope handler T.grid(extents)""" def __init__(self): def grid(*extents: List[PrimExpr]): for extent in extents: self.create_loop_info(None, extent, ForKind.SERIAL) super().__init__(grid)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/special_stmt.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Special Stmt Classes""" # pylint: disable=unused-argument, no-self-argument, inconsistent-return-statements # pylint: disable=relative-beyond-top-level from typing import Callable, List, Optional, Tuple, Any, Mapping, Union import synr from synr import ast from tvm.ir.expr import PrimExpr, Range import tvm.tir from tvm.runtime import Object, String from tvm.target import Target from tvm.ir import Span from tvm.tir import IntImm, IterVar, Var from .node import BufferSlice from ..context_maintainer import BlockInfo, ContextMaintainer from ..registry import register from ..utils import ( get_param_list, tvm_span_from_synr, call_with_error_reporting, ) def convert_to_int( value: Union[IntImm, int], arg_name: str, report_error: Callable, span: Union[Span, synr.ast.Span], ) -> int: """convert a const int or TVM IntImm to Python int. Reports an error when input cannot be converted to int. Parameters ---------- value : Union[tvm.tir.IntImm, int] The input value to be converted. arg_name : str Function argument name for error reporting. report_error: Callable The report error function handle span : Union[synr.ast.Span, tvm.ir.Span] Location of the error """ if isinstance(value, IntImm): return value.value if isinstance(value, int): return value report_error( f"Expected int or IntImm for {arg_name}, but got {str(type(value))}", span, ) class SpecialStmt: """Base class for all Special Stmts""" def __init__(self, func: Callable, def_symbol: bool): self.func: Callable = func self.def_symbol: bool = def_symbol self.node: Optional[synr.ast.Node] = None self.context: Optional[ContextMaintainer] = None def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir." + self.func.__name__, get_param_list(self.func) def handle( self, node: ast.Node, context: ContextMaintainer, arg_list: List[Any], span: synr.ast.Span, ): self.node = node self.context = context return call_with_error_reporting( context.report_error, span, self.func, *arg_list, span=tvm_span_from_synr(span) ) @register class MatchBuffer(SpecialStmt): """Special Stmt match_buffer(param, shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators) Note ---- This Special Stmt will perform different behavior depends on the type of param. If the param is a var in function parameter, it will create a buffer from DLTensor. Else if the param is a subregion of other buffers, then create a subregion match inside a block. Example ------- Match buffer from function parameter .. code-block:: python A = T.match_buffer(a, (128, 128), dtype="float32") Match buffer from Buffer subregion .. code-block:: python A = T.match_buffer(B[0:128, i * 128 : i * 128 + 128], (128, 128), dtype="float32") """ def __init__(self): def match_buffer( param, shape=None, dtype=None, data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`match_buffer` must be assigned to a single buffer, " "e.g. A = match_buffer(...)", self.node.span, ) if strides is None: strides = [] align = convert_to_int(align, "align", self.context.report_error, self.node.span) offset_factor = convert_to_int( offset_factor, "offset_factor", self.context.report_error, self.node.span ) buffer_name: str = self.node.lhs[0].id.name if isinstance(param, tvm.tir.Var): if shape is None: self.context.report_error( "Shape must be specified when binding input param", self.node.rhs.span, ) if dtype is None: dtype = "float32" buffer = tvm.tir.decl_buffer( shape, dtype, buffer_name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span=span, ) if param not in self.context.func_params: self.context.report_error( "Can not bind non-input param to buffer", self.node.rhs.params[0].span ) self.context.func_buffer_map[param] = buffer elif isinstance(param, BufferSlice): buffer_region = param.as_buffer_region() if shape is None: shape = [dim.extent for dim in buffer_region.region] if dtype is None: dtype = buffer_region.buffer.dtype if elem_offset is None and offset_factor == 0: offset_factor = 1 buffer = tvm.tir.decl_buffer( shape, dtype, buffer_name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span=span, ) self.context.current_block_scope().match_buffers.append( tvm.tir.MatchBufferRegion(buffer, buffer_region) ) else: self.context.report_error( "The source of match_buffer expected Var or BufferSlice, but got " + str(type(param)), self.node.rhs.params[0].span, ) self.context.update_symbol(buffer_name, buffer, self.node) super().__init__(match_buffer, def_symbol=True) @register class BufferDeclare(SpecialStmt): """Special Stmt buffer_decl(shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators) Example ------- .. code-block:: python A = T.buffer_decl((128, 128), dtype="float32") """ def __init__(self): def buffer_decl( shape, dtype="float32", data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`buffer_decl` must be assigned to a single buffer, e.g. A = buffer_decl(...)", self.node.span, ) if strides is None: strides = [] align = convert_to_int(align, "align", self.context.report_error, self.node.span) offset_factor = convert_to_int( offset_factor, "offset_factor", self.context.report_error, self.node.span ) buffer_name: str = self.node.lhs[0].id.name buffer = tvm.tir.decl_buffer( shape, dtype, buffer_name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span=span, ) self.context.update_symbol(buffer_name, buffer, self.node) return buffer super().__init__(buffer_decl, def_symbol=True) @register class AllocBuffer(SpecialStmt): """Special function alloc_buffer(shape, dtype, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators) Example ------- .. code-block:: python A = T.alloc_buffer((128, 128), dtype="float32") """ def __init__(self): def alloc_buffer( shape, dtype="float32", data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`alloc_buffer` must be assigned to a single buffer, " "e.g. A = alloc_buffer(...)", self.node.span, ) if strides is None: strides = [] align = convert_to_int(align, "align", self.context.report_error, self.node.span) offset_factor = convert_to_int( offset_factor, "offset_factor", self.context.report_error, self.node.span ) buffer_name: str = self.node.lhs[0].id.name buffer = tvm.tir.decl_buffer( shape, dtype, buffer_name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span=span, ) if self.context.current_block_scope(): self.context.current_block_scope().alloc_buffers.append(buffer) else: # If it is allocated outside all blocks, allocate it under root block. self.context.root_alloc_buffers.append(buffer) self.context.update_symbol(buffer_name, buffer, self.node) super().__init__(alloc_buffer, def_symbol=True) @register class BlockReads(SpecialStmt): """Special function reads([read_regions], *other_regions) Note ---- *other_region is an unpackable list of BufferSlice to support reads syntax sugar like reads(BufferRegion1, BufferRegion2, ...) Example ------- .. code-block:: python T.reads([A[vi: vi + 4, vk: vk + 4], B[vk: vk + 4, vj]]) """ def __init__(self): def reads( *read_regions: Union[BufferSlice, List[BufferSlice]], span: Span = None, ): assert self.context, "call 'exit_scope' before 'enter_scope'" block_scope = self.context.current_block_scope() if block_scope is None: self.context.report_error( "Expected to declare read regions inside a block.", span, ) if block_scope.reads is not None: self.context.report_error( "Duplicate write region declaration, " + "previous one is " + str(", ".join(str(x) for x in block_scope.reads)), span, ) if len(read_regions) > 1: for read_region in read_regions: if not isinstance(read_region, BufferSlice): self.context.report_error( "Incorrect input type. Expected *BufferSlice or List[BufferSlice]," + f" but got {type(read_regions)}", span, ) elif len(read_regions) == 1: if isinstance(read_regions[0], list): read_regions = read_regions[0] block_scope.reads = read_regions super().__init__(reads, def_symbol=False) @register class BlockWrites(SpecialStmt): """Special function writes([write_regions], *other_regions) Note ---- *other_region is an unpackable list of BufferSlice to support writes syntax sugar like writes(BufferRegion1, BufferRegion2, ...) Example ------- .. code-block:: python T.writes([C[vi: vi + 4, vj]) """ def __init__(self): def writes( *write_regions: Union[BufferSlice, List[BufferSlice]], span: Span = None, ): assert self.context, "call 'exit_scope' before 'enter_scope'" block_scope = self.context.current_block_scope() if block_scope is None: self.context.report_error( "Expected to declare write regions inside a block.", span, ) if block_scope.writes is not None: self.context.report_error( "Duplicate write region declaration, " + "previous one is " + str(", ".join(str(x) for x in block_scope.writes)), span, ) if len(write_regions) > 1: for write_region in write_regions: if not isinstance(write_region, BufferSlice): self.context.report_error( "Incorrect input type. Expected *BufferSlice or List[BufferSlice]," + f" but got {type(write_regions)}", span, ) elif len(write_regions) == 1: if isinstance(write_regions[0], list): write_regions = write_regions[0] block_scope.writes = write_regions super().__init__(writes, def_symbol=False) @register class BlockAttr(SpecialStmt): """Special function block_attr({attr_key: attr_value}) Example ------- .. code-block:: python T.block_attr({"double_buffer_scope": 1}) """ def __init__(self): def block_attr(attrs: Mapping[str, Object], span: Span = None): assert self.context, "call 'exit_scope' before 'enter_scope'" block_scope = self.context.current_block_scope() if block_scope is None: self.context.report_error( "Expected to declare block annotations inside a block.", span, ) if block_scope.annotations is not None: self.context.report_error( "Duplicate block annotations declaration, " + "previous one is " + str(block_scope.annotations), span, ) attrs = { key: String(val) if isinstance(val, str) else val for key, val in attrs.items() } block_scope.annotations = attrs super().__init__(block_attr, def_symbol=False) class BlockAxis(SpecialStmt): """Special stmt for defining a spatial block axis axis.S(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.S(128, i * 4 + j) """ def axis( self, var_name: str, dom: Union[PrimExpr, Range], value: PrimExpr, iter_type: int, span: Optional[Span] = None, ) -> None: """ Helper function for creating block axis Parameters ---------- var_name : str The name_hint of var dom : Union[PrimExpr, Range] The iter domain. value : PrimExpr The binding value iter_type : int The iteration type. span : Optional[Span] The location of this for in the source code. """ assert self.context, "call 'exit_scope' before 'enter_scope'" block_scope: BlockInfo = self.context.current_block_scope() if block_scope is None: self.context.report_error( "Expected to declare block axes inside a block.", self.node.span, ) if var_name in [iter_var.var.name for iter_var in block_scope.iter_vars]: self.context.report_error("Duplicate block axis " + var_name, self.node.span) dom = tvm.runtime.convert(dom) if isinstance(dom, PrimExpr): dom = tvm.ir.Range(dom) elif isinstance(dom, tvm.ir.container.Array) and len(dom) == 2: dom = tvm.ir.Range(dom[0], dom[1]) elif not isinstance(dom, tvm.ir.Range): self.context.report_error( f"Block axis domain expected PrimExpr or Range, but got {type(dom)}", self.node.span, ) block_var = tvm.tir.Var(var_name, dtype=dom.extent.dtype) value = tvm.runtime.convert(value) if not isinstance(value, PrimExpr): self.context.report_error( f"Block axis value expected PrimExpr, but got {type(value)}", self.node.span, ) iter_var = tvm.tir.IterVar(dom, block_var, iter_type) block_scope.iter_vars.append(iter_var) block_scope.iter_values.append(value) self.context.update_symbol(var_name, block_var, self.node) @register class BlockAxisSpatial(BlockAxis): """Special stmt for defining a spatial block axis axis.spatial(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.spatial(128, k) """ def __init__(self): def axis_spatial( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.spatial` must be assigned to a var, e.g. vi = axis.spatial(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DataPar) super().__init__(axis_spatial, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.spatial", get_param_list(self.func) @register class BlockAxisS(BlockAxis): """The sugar special stmt for defining a spatial block axis axis.S(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.S(128, k) """ def __init__(self): def axis_spatial( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.S` must be assigned to a var, e.g. vi = axis.S(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DataPar) super().__init__(axis_spatial, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.S", get_param_list(self.func) @register class BlockAxisReduce(BlockAxis): """Special stmt for defining a reduce block axis axis.reduce(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.reduce(128, k) """ def __init__(self): def axis_reduce( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.reduce` must be assigned` to a var, e.g. vi = axis.reduce(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.CommReduce) super().__init__(axis_reduce, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.reduce", get_param_list(self.func) @register class BlockAxisR(BlockAxis): """The sugar special stmt for defining a reduce block axis axis.R(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.R(128, k) """ def __init__(self): def axis_reduce( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.R` must be assigned to a var, e.g. vi = axis.R(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.CommReduce) super().__init__(axis_reduce, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.R", get_param_list(self.func) @register class BlockAxisScan(BlockAxis): """Special stmt for defining a ordered block axis axis.scan(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.scan(128, k) """ def __init__(self): def axis_scan( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.scan` must be assigned to a var, e.g. vi = axis.scan(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.Ordered) super().__init__(axis_scan, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.scan", get_param_list(self.func) @register class BlockAxisOpaque(BlockAxis): """Special stmt for defining a opaque block axis axis.opaque(dom, iter_value) Example ------- .. code-block:: python vi = T.axis.opaque(128, k) """ def __init__(self): def axis_opaque( dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None ): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1: self.context.report_error( "`axis.opaque` must be assigned to a var, e.g. vi = axis.opaque(...)", self.node.span, ) self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DimInfo) super().__init__(axis_opaque, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.opaque", get_param_list(self.func) @register class BlockAxisRemap(BlockAxis): """Special stmt for remapping loops vars to block axes. axis.remap(iter_type, iter_value) Note ---- Iter_type is a string consisting of 'S' and 'R', where 'S' means for spatial and 'R' means for reduce. Example ------- .. code-block:: python vi, vj = T.axis.remap("SS", [i, j]) """ def __init__(self): def axis_remap(iter_types: str, loop_vars: List[tvm.tir.expr.Var], span: Span = None): if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) >= 1: self.context.report_error( "`axis.remap` must be assigned to one or more vars, " "e.g. vi, vj = axis.remap(...)", self.node.span, ) var_num: int = len(self.node.lhs) if var_num != len(iter_types): self.context.report_error( f"`iter_type` expected {var_num} charactor(s), " f"but got {len(iter_types)}: {iter_types}", span, ) if var_num != len(loop_vars): self.context.report_error( f"`iter_type` expected {var_num} loop var(s), " f"but got {len(loop_vars)}: {loop_vars}", span, ) for var, iter_ty, loop_var in zip(self.node.lhs, iter_types, loop_vars): iter_type: int if iter_ty == "S": iter_type = IterVar.DataPar elif iter_ty == "R": iter_type = IterVar.CommReduce else: self.context.report_error( f'`iter_type` only expected "S" (for spatial) or "R" (for reduce), ' f'but got "{iter_ty}"', span, ) if not isinstance(loop_var, tvm.tir.expr.Var): self.context.report_error( f"Values of `axis.remap` expected single loop var, but got {loop_var}", loop_var.span, ) loops = self.context.loop_stack if loop_var not in loops: self.context.report_error( f"Cannot find loop var {loop_var} in loop nesting.", span, ) self.axis(var.id.name, loops[loop_var], loop_var, iter_type) super().__init__(axis_remap, def_symbol=True) def signature(self) -> Tuple[str, Tuple[list, list, Any]]: return "tir.axis.remap", get_param_list(self.func) @register class BlockPredicate(SpecialStmt): """Special function where(predicate) Example ------- .. code-block:: python T.where(i < 4) """ def __init__(self): def where(predicate, span=None): assert self.context, "call 'exit_scope' before 'enter_scope'" block_scope = self.context.current_block_scope() if block_scope is None: self.context.report_error( "Expected to declare the predicate inside a block.", span, ) if block_scope.predicate is not None: self.context.report_error( "Duplicate block predicate declaration, " + "previous one is " + str(block_scope.predicate), span, ) block_scope.predicate = predicate super().__init__(where, def_symbol=False) @register class VarDef(SpecialStmt): """Special function for defining a Var""" def __init__(self): def var(dtype, span): assert isinstance( self.node, ast.Assign ), f"VarDef expected ast.Assign but got {type(self.node)}" names = [x.id.name for x in self.node.lhs] if len(names) != 1: self.context.report_error( f"VarDef expected assign to only one var, but got {names}", span ) v = Var(names[0], dtype, span=span) self.context.update_symbol(v.name, v, self.node) super().__init__(var, def_symbol=True) @register class BufferVarDef(SpecialStmt): """Special function for defining a variable of pointer type""" def __init__(self): def buffer_var(dtype, storage_scope, span): assert isinstance( self.node, ast.Assign ), f"BufferVarDef expected ast.Assign but got {type(self.node)}" names = [x.id.name for x in self.node.lhs] if len(names) != 1: self.context.report_error( f"VarDef expected assign to only one var, but got {names}", span ) ptr_type = tvm.ir.PointerType(tvm.ir.PrimType(dtype), storage_scope) v = Var(names[0], ptr_type, span=span) self.context.update_symbol(v.name, v, self.node) super().__init__(buffer_var, def_symbol=True) @register class EnvThread(SpecialStmt): """Bind a var to thread env""" def __init__(self): def env_thread(env_name, span): assert isinstance( self.node, ast.Assign ), f"EnvThread expected ast.Assign but got {type(self.node)}" names = [x.id.name for x in self.node.lhs] if len(names) != 1: self.context.report_error( f"VarDef expected assign to only one var, but got {names}", span ) v = Var(names[0], dtype="int32", span=span) self.context.func_var_env_dict[v] = env_name self.context.update_symbol(v.name, v, self.node) super().__init__(env_thread, def_symbol=True) @register class FuncAttr(SpecialStmt): """Special Stmt for declaring the DictAttr of PrimFunc Example ------- .. code-block:: python T.func_attr({"tir.noalias": True, "global_symbol"}) """ def __init__(self): def func_attr(dict_attr, span): self.context.func_dict_attr = dict_attr super().__init__(func_attr, def_symbol=False) @register class PreflattenedBufferMap(SpecialStmt): """Special Stmt for declaring the PrimFunc::preflattened_buffer_map Example ------- .. code-block:: python A0 = T.match_buffer(A, (48,), dtype="float32") T.preflattened_buffer_map(A, (1, 4, 4, 3), elem_offset=1, align=4, dtype="float32") """ def __init__(self): def preflattened_buffer( postflattened, shape, dtype="float32", data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", span=None, ): param = None for key, value in self.context.func_buffer_map.items(): if value.same_as(postflattened): param = key break assert ( param is not None ), f"Post-flatten buffer {postflattened.name} does not appear in the buffer map." if data is None: data = self.context.func_buffer_map[param].data buffer_name: str = f"{postflattened.name}_preflatten" if align != -1: if isinstance(align, IntImm): align = align.value else: assert isinstance(align, int), f"align: want int or IntImm, got {align!r}" if offset_factor != 0: if isinstance(offset_factor, IntImm): offset_factor = offset_factor.value else: assert isinstance( offset_factor, int ), f"offset_factor: want int or IntImm, got {offset_factor!r}" preflattened = tvm.tir.decl_buffer( shape, dtype, buffer_name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, span=span, ) self.context.func_preflattened_buffer_map[param] = preflattened super().__init__(preflattened_buffer, def_symbol=False) @register class TargetAttrValue(SpecialStmt): """Special Stmt for target attr value. Example ------- .. code-block:: python T.target("llvm") """ def __init__(self): def target(*args, span): self.context.report_error(f"T.target should not appear as a stmt", span) super().__init__(target, def_symbol=False) def __call__(self, target_config): if not isinstance(target_config, (str, dict)): raise ValueError( f"T.target expected a config dict or string, but got {type(target_config)}" ) return Target(target_config)
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/tir/ty.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """TVM Script Parser Typing Class for TIR This module provides typing class for TVM script type annotation usage, it can be viewed as a wrapper for uniform Type system in IR """ # pylint: disable=invalid-name from numbers import Integral import tvm from .special_stmt import SpecialStmt, convert_to_int class TypeGeneric: # pylint: disable=too-few-public-methods """Base class for all the TVM script typing class""" def evaluate(self): """Return an actual ir.Type Object that this Generic class wraps""" raise TypeError("Cannot get tvm.Type from a generic type") def require_type_generic_at(self, idx): # pylint: disable=unused-argument """If True, the `idx`th type argument must be TypeGeneric""" return True # This function is added here to avoid a pylint error # for T.int/float below not being callable def __call__(self): raise NotImplementedError() class ConcreteType(TypeGeneric): # pylint: disable=too-few-public-methods, abstract-method """TVM script typing class for uniform Type objects Params ------ vtype: Union[str, tvm.ir.Type] The IR type represented by the type annotation. If a string (e.g. "float32"), this represents a `ir.PrimType` generated from that string. If a `ir.Type` is provided, this represents the type provided. """ def __init__(self, vtype): if isinstance(vtype, tvm.ir.Type): self.type = vtype else: self.type = tvm.ir.PrimType(vtype) def __call__(self, *args): # pylint: disable=arguments-differ pass def evaluate(self): return self.type class VoidType(ConcreteType): # pylint: disable=too-few-public-methods, abstract-method """TVM script typing class for void type""" def __init__(self): super().__init__("") class GenericPtrType(TypeGeneric): # pylint: disable=abstract-method """TVM script typing class generator for PtrType [] operator is overloaded, accepts a ConcreteType and an optional storage scope string, returns a ConcreteType wrapping PtrType """ def __getitem__(self, args): if isinstance(args, TypeGeneric): args = [args] if len(args) == 1: vtype, scope = args[0], "global" elif len(args) == 2: vtype, scope = args[0], args[1] else: raise TypeError(f"Illegal type argument num for Ptr") if not isinstance(vtype, TypeGeneric): raise TypeError(f"Ptr expects a type argument, but received {type(vtype).__name__}") if not isinstance(scope, str): raise TypeError(f"Ptr expects storage scope argument be a string") return ConcreteType(tvm.ir.PointerType(vtype.evaluate(), scope)) def require_type_generic_at(self, idx): return idx != 1 # the second argument is storage scope for Ptr class GenericTupleType(TypeGeneric): # pylint: disable=abstract-method """TVM script typing class generator for TupleType [] operator is overloaded, accepts a list of ConcreteType and returns a ConcreteType wrapping TupleType """ def __getitem__(self, vtypes): if isinstance(vtypes, TypeGeneric): vtypes = [vtypes] return ConcreteType(tvm.ir.TupleType([vtype.evaluate() for vtype in vtypes])) class GenericBufferType(SpecialStmt): # pylint: disable=too-few-public-methods, abstract-method """TVM script typing class for uniform Type objects""" def __init__(self, vtype): def match_buffer_syntax_sugar( shape, dtype: str = "float32", name: str = None, data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): if strides is None: strides = [] align = convert_to_int(align, "align", self.context.report_error, self.node.span) offset_factor = convert_to_int( offset_factor, "offset_factor", self.context.report_error, self.node.span ) buffer = tvm.tir.decl_buffer( shape, dtype, name, data, strides, elem_offset, scope, align, offset_factor, buffer_type, axis_separators, span=span, ) return buffer self.type = vtype super().__init__(match_buffer_syntax_sugar, def_symbol=True) def __call__( self, shape, dtype="float32", *, name: str = None, data=None, strides=None, elem_offset=None, scope="global", align=-1, offset_factor=0, buffer_type="default", axis_separators=None, span=None, ): """ This function is for Buffer(...) syntax sugar. """ pass # pylint: disable=unnecessary-pass def __getitem__(self, args): """ This function is for Buffer[...] syntax sugar Note that args is the list of all arguments """ if len(args) < 2: raise ValueError("T.Buffer[...] needs at least two arguments: shape and dtype.") shape = args[0] dtype = args[1] valid_shape = isinstance(shape, (tvm.ir.PrimExpr, Integral, tuple, list)) valid_dtype = isinstance(dtype, str) if not (valid_shape and valid_dtype): raise ValueError( "The first argument of T.Buffer[...] needs to be a tuple, " "followed by the second argument dtype as a string" ) # add all floating point and integer datatypes to the module for _dtype in ["float", "uint", "int"]: for _size in ["8", "16", "32", "64"]: for _lanes in ["", "x4", "x8", "x16", "x32", "x64"]: _name = _dtype + _size + _lanes globals()[_name] = ConcreteType(_name) # All other DataType annotations are represented with the same string # as is used by `tvm.runtime.DataType`. This does redefine the Python # built-in bool, but only within the context of `tvm.script.tir.ty` # and `tvm.script.tir` modules. The `T.boolean` alias is maintained # for backwards compatibility. bool = ConcreteType("bool") # pylint: disable=redefined-builtin boolean = bool handle = ConcreteType("handle") void = VoidType() Ptr = GenericPtrType() Tuple = GenericTupleType() # we don't have 'buffer' type on the cpp side # thus 'handle' is used here for convenience's sake Buffer = GenericBufferType("handle")
https://github.com/zk-ml/tachikoma
python/tvm/script/parser_v1/utils.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Helper functions in TVM Script Parser""" from typing import Callable, List, Any, Optional, Tuple import inspect import synr from tvm.ir import Span, SourceName from tvm.error import DiagnosticError def get_param_list( func: Callable, ) -> Tuple[List[str], List[Tuple[str, Tuple[Any, ...]]], Optional[str]]: """Get the parameter list from definition of function""" full_arg_spec: inspect.FullArgSpec = inspect.getfullargspec(func) args: List[str] defaults: Optional[Tuple[Any, ...]] kwonlyargs: List[str] args, defaults, kwonlyargs = ( full_arg_spec.args, full_arg_spec.defaults, full_arg_spec.kwonlyargs, ) if defaults is None: defaults = tuple() if full_arg_spec.varkw is not None: raise RuntimeError( "TVM Script register error : variable keyword argument is not supported now" ) if len(kwonlyargs) == 1 and kwonlyargs[0] == "span": pass elif not len(kwonlyargs) == 0: raise RuntimeError("TVM Script register error : keyword only argument is not supported now") pos_only: List[str] = list() for arg in args[: len(args) - len(defaults)]: if arg != "span": pos_only.append(arg) kwargs: List[Tuple[str, Tuple[Any, ...]]] = list() for default, arg in zip(defaults, args[len(args) - len(defaults) :]): if arg != "span": kwargs.append((arg, default)) return pos_only, kwargs, full_arg_spec.varargs def tvm_span_from_synr(span: synr.ast.Span) -> Span: """Convert a synr span to a TVM span""" return Span( SourceName(span.filename), span.start_line, span.end_line, span.start_column, span.end_column, ) def synr_span_from_tvm(span: Span) -> synr.ast.Span: """Convert a TVM span to a synr span""" return synr.ast.Span( span.source_name.name, span.line, span.column, span.end_line, span.end_column, ) def call_with_error_reporting( report_error, node_span, func, *args, **kwargs, ): """Call function with exception handling and report error using node_span""" try: return func(*args, **kwargs) except DiagnosticError: raise except Exception as err: # pylint: disable=broad-except # printing last non-empty row of error message. error_msg = list(filter(None, str(err).split("\n")))[-1] report_error(error_msg, node_span)
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ TVMScript Unified Printer This package provides a set of APIs to print supported TVM IR into TVMScript in a roundtrippable way. https://github.com/apache/tvm-rfcs/blob/main/rfcs/0074-tvmscript-unified-printer.md """ from . import _ffi_api from .entry import script
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.script.printer""" import tvm._ffi tvm._ffi._init_api("script.printer", __name__) # pylint: disable=protected-access
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/doc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Doc types for TVMScript Unified Printer""" from enum import IntEnum, unique from typing import Dict, List, Optional, Sequence, Tuple, Union from tvm._ffi import register_object from tvm.runtime import Object, ObjectPath from tvm.tir import FloatImm, IntImm from . import _ffi_api class Doc(Object): """Base class of all Docs""" @property def source_paths(self) -> Sequence[ObjectPath]: """ The list of object paths of the source IR node. This is used to trace back to the IR node position where this Doc is generated, in order to position the diagnostic message. """ return self.__getattr__("source_paths") # pylint: disable=unnecessary-dunder-call @source_paths.setter def source_paths(self, value): return _ffi_api.DocSetSourcePaths(self, value) # type: ignore # pylint: disable=no-member class ExprDoc(Doc): """Base class of all expression Docs""" def attr(self, name: str) -> "AttrAccessDoc": """ Create a doc that represents attribute access on self. Parameters ---------- name : str The attribute name to access Returns ------- doc : AttrAccessDoc """ return _ffi_api.ExprDocAttr(self, name) # type: ignore # pylint: disable=no-member def call(self, *args: Tuple["ExprDoc"], **kwargs: Dict[str, "ExprDoc"]) -> "CallDoc": """ Create a doc that represents function call, with self as callee. Parameters ---------- *args : ExprDoc The positional arguments of the function call. **kwargs The keyword arguments of the function call. Returns ------- doc : CallDoc """ kwargs_keys = list(kwargs.keys()) kwargs_values = list(kwargs.values()) return _ffi_api.ExprDocCall(self, args, kwargs_keys, kwargs_values) # type: ignore # pylint: disable=no-member _IndexType = Union["ExprDoc", "SliceDoc"] def __getitem__(self, indices: Union[Tuple[_IndexType], _IndexType]) -> "IndexDoc": """ Create a doc that represents index access on self. Parameters ---------- indices : Union[Tuple[Union["ExprDoc", "SliceDoc"]], Union["ExprDoc", "SliceDoc"]] The indices to access Returns ------- doc : IndexDoc """ if not isinstance(indices, tuple): indices = (indices,) return _ffi_api.ExprDocIndex(self, indices) # type: ignore # pylint: disable=no-member def __iter__(self): """ This is implemented to prevent confusing error message when trying to use ExprDoc as iterable. According to PEP-234, An object can be iterated over if it implements __iter__() or __getitem__(). If an object has only __getitem__ but not __iter__, interpreter will iterate the object by calling __getitem__ with 0, 1, 2, ..., until an IndexError is raised. https://peps.python.org/pep-0234/#python-api-specification """ raise RuntimeError(f"{self.__class__} cannot be used as iterable.") class StmtDoc(Doc): """Base class of statement doc""" @property def comment(self) -> Optional[str]: """ The comment of this doc. The actual position of the comment depends on the type of Doc and also the DocPrinter implementation. It could be on the same line as the statement, or the line above, or inside the statement if it spans over multiple lines. """ # It has to call the dunder method to avoid infinite recursion return self.__getattr__("comment") # pylint: disable=unnecessary-dunder-call @comment.setter def comment(self, value): return _ffi_api.StmtDocSetComment(self, value) # type: ignore # pylint: disable=no-member @register_object("script.printer.StmtBlockDoc") class StmtBlockDoc(Doc): """The container doc that holds a list of StmtDoc. Note: `StmtBlockDoc` is never used in the IR, but a temporary container that allows holding a list of StmtDoc. """ stmts: Sequence[StmtDoc] def __init__(self, stmts: List[StmtDoc]): self.__init_handle_by_constructor__(_ffi_api.StmtBlockDoc, stmts) # type: ignore # pylint: disable=no-member @register_object("script.printer.LiteralDoc") class LiteralDoc(ExprDoc): """Doc that represents literal value""" value: Union[str, IntImm, FloatImm, None] def __init__(self, value: Union[str, float, bool, int, None]): if value is None: self.__init_handle_by_constructor__(_ffi_api.LiteralDocNone) # type: ignore # pylint: disable=no-member elif isinstance(value, str): self.__init_handle_by_constructor__(_ffi_api.LiteralDocStr, value) # type: ignore # pylint: disable=no-member elif isinstance(value, float): self.__init_handle_by_constructor__(_ffi_api.LiteralDocFloat, value) # type: ignore # pylint: disable=no-member elif isinstance(value, bool): self.__init_handle_by_constructor__(_ffi_api.LiteralDocBoolean, value) # type: ignore # pylint: disable=no-member elif isinstance(value, int): self.__init_handle_by_constructor__(_ffi_api.LiteralDocInt, value) # type: ignore # pylint: disable=no-member else: raise TypeError(f"Unsupported type {type(value)} for LiteralDoc") @register_object("script.printer.IdDoc") class IdDoc(ExprDoc): """Doc that represents identifier""" name: str def __init__(self, name: str): self.__init_handle_by_constructor__(_ffi_api.IdDoc, name) # type: ignore # pylint: disable=no-member @register_object("script.printer.AttrAccessDoc") class AttrAccessDoc(ExprDoc): """Doc that represents attribute access on an expression""" value: ExprDoc name: str def __init__(self, value: ExprDoc, name: str): self.__init_handle_by_constructor__(_ffi_api.AttrAccessDoc, value, name) # type: ignore # pylint: disable=no-member @register_object("script.printer.IndexDoc") class IndexDoc(ExprDoc): """Doc that represents index access on an expression""" value: ExprDoc indices: Sequence[Union[ExprDoc, "SliceDoc"]] def __init__(self, value: ExprDoc, indices: List[Union[ExprDoc, "SliceDoc"]]): self.__init_handle_by_constructor__(_ffi_api.IndexDoc, value, indices) # type: ignore # pylint: disable=no-member @register_object("script.printer.CallDoc") class CallDoc(ExprDoc): """Doc that represents function call""" callee: ExprDoc args: Sequence[ExprDoc] kwargs_keys: Sequence[str] kwargs_values: Sequence[ExprDoc] def __init__(self, callee: ExprDoc, *args: Tuple[ExprDoc], **kwargs: Dict[str, ExprDoc]): kwargs_keys = list(kwargs.keys()) kwargs_values = list(kwargs.values()) self.__init_handle_by_constructor__( _ffi_api.CallDoc, # type: ignore # pylint: disable=no-member callee, args, kwargs_keys, kwargs_values, ) @unique class OperationKind(IntEnum): """ This enum represents the kind of operation (operator) in OperationDoc It's mirrored from OperationDocNode::Kind at include/tvm/script/printer/doc.h """ # The name convention follows https://docs.python.org/3/library/ast.html # pylint: disable=invalid-name _UnaryStart = 0 USub = 1 Invert = 2 Not = 3 _UnaryEnd = 4 _BinaryStart = 5 Add = 6 Sub = 7 Mult = 8 Div = 9 FloorDiv = 10 Mod = 11 Pow = 12 LShift = 13 RShift = 14 BitAnd = 15 BitOr = 16 BitXor = 17 Lt = 18 LtE = 19 Eq = 20 NotEq = 21 Gt = 22 GtE = 23 And = 24 Or = 25 _BinaryEnd = 26 _SpecialStart = 27 IfThenElse = 28 _SpecialEnd = 29 # pylint: enable=invalid-name @register_object("script.printer.OperationDoc") class OperationDoc(ExprDoc): """ Doc that represents operation It can be unary, binary and other special operators (for example, the if-then-else expression). """ kind: OperationKind operands: Sequence[ExprDoc] def __init__(self, kind: OperationKind, operands: List[ExprDoc]): self.__init_handle_by_constructor__(_ffi_api.OperationDoc, kind, operands) # type: ignore # pylint: disable=no-member @register_object("script.printer.LambdaDoc") class LambdaDoc(ExprDoc): """Doc that represents lambda function""" args: Sequence[IdDoc] body: ExprDoc def __init__(self, args: List[IdDoc], body: ExprDoc): self.__init_handle_by_constructor__(_ffi_api.LambdaDoc, args, body) # type: ignore # pylint: disable=no-member @register_object("script.printer.TupleDoc") class TupleDoc(ExprDoc): """Doc that represents tuple literal""" elements: Sequence[ExprDoc] def __init__(self, elements: List[ExprDoc]): self.__init_handle_by_constructor__(_ffi_api.TupleDoc, elements) # type: ignore # pylint: disable=no-member @register_object("script.printer.ListDoc") class ListDoc(ExprDoc): """Doc that represents list literal""" elements: Sequence[ExprDoc] def __init__(self, elements: List[ExprDoc]): self.__init_handle_by_constructor__(_ffi_api.ListDoc, elements) # type: ignore # pylint: disable=no-member @register_object("script.printer.DictDoc") class DictDoc(ExprDoc): """Doc that represents dict literal""" keys: Sequence[ExprDoc] values: Sequence[ExprDoc] def __init__(self, content: Dict[ExprDoc, ExprDoc]): keys = list(content.keys()) values = list(content.values()) self.__init_handle_by_constructor__(_ffi_api.DictDoc, keys, values) # type: ignore # pylint: disable=no-member @register_object("script.printer.SliceDoc") class SliceDoc(ExprDoc): """ Doc that represents slice in Index expression This doc can only appear in `IndexDoc.indices`. """ start: Optional[ExprDoc] stop: Optional[ExprDoc] step: Optional[ExprDoc] def __init__( self, start: Optional[ExprDoc] = None, stop: Optional[ExprDoc] = None, step: Optional[ExprDoc] = None, ): self.__init_handle_by_constructor__(_ffi_api.SliceDoc, start, stop, step) # type: ignore # pylint: disable=no-member @register_object("script.printer.AssignDoc") class AssignDoc(StmtDoc): """Doc that represents assign statement.""" lhs: ExprDoc rhs: Optional[ExprDoc] annotation: Optional[ExprDoc] def __init__(self, lhs: ExprDoc, rhs: Optional[ExprDoc], annotation: Optional[ExprDoc] = None): self.__init_handle_by_constructor__( _ffi_api.AssignDoc, # type: ignore # pylint: disable=no-member lhs, rhs, annotation, ) @register_object("script.printer.IfDoc") class IfDoc(StmtDoc): """Doc that represent if-then-else statement.""" predicate: ExprDoc then_branch: Sequence[StmtDoc] else_branch: Sequence[StmtDoc] def __init__(self, predicate: ExprDoc, then_branch: List[StmtDoc], else_branch: List[StmtDoc]): self.__init_handle_by_constructor__( _ffi_api.IfDoc, # type: ignore # pylint: disable=no-member predicate, then_branch, else_branch, ) @register_object("script.printer.WhileDoc") class WhileDoc(StmtDoc): """Doc that represents while statement.""" predicate: ExprDoc body: Sequence[StmtDoc] def __init__(self, predicate: ExprDoc, body: List[StmtDoc]): self.__init_handle_by_constructor__(_ffi_api.WhileDoc, predicate, body) # type: ignore # pylint: disable=no-member @register_object("script.printer.ForDoc") class ForDoc(StmtDoc): """Doc that represents for statement.""" lhs: ExprDoc rhs: ExprDoc body: Sequence[StmtDoc] def __init__(self, lhs: ExprDoc, rhs: ExprDoc, body: List[StmtDoc]): self.__init_handle_by_constructor__(_ffi_api.ForDoc, lhs, rhs, body) # type: ignore # pylint: disable=no-member @register_object("script.printer.ScopeDoc") class ScopeDoc(StmtDoc): """ Doc that represents special scopes. Specifically, this means the with statement in Python: with <rhs> as <lhs>: <body...> """ lhs: Optional[ExprDoc] rhs: ExprDoc body: Sequence[StmtDoc] def __init__(self, lhs: Optional[ExprDoc], rhs: ExprDoc, body: List[StmtDoc]): self.__init_handle_by_constructor__(_ffi_api.ScopeDoc, lhs, rhs, body) # type: ignore # pylint: disable=no-member @register_object("script.printer.ExprStmtDoc") class ExprStmtDoc(StmtDoc): """Doc that represents an expression as statement.""" expr: ExprDoc def __init__(self, expr: ExprDoc): self.__init_handle_by_constructor__(_ffi_api.ExprStmtDoc, expr) # type: ignore # pylint: disable=no-member @register_object("script.printer.AssertDoc") class AssertDoc(StmtDoc): """Doc that represents assert statement.""" test: ExprDoc msg: Optional[ExprDoc] def __init__(self, test: ExprDoc, msg: Optional[ExprDoc] = None): self.__init_handle_by_constructor__(_ffi_api.AssertDoc, test, msg) # type: ignore # pylint: disable=no-member @register_object("script.printer.ReturnDoc") class ReturnDoc(StmtDoc): """Doc that represents return statement.""" value: ExprDoc def __init__(self, value: ExprDoc): self.__init_handle_by_constructor__(_ffi_api.ReturnDoc, value) # type: ignore # pylint: disable=no-member @register_object("script.printer.FunctionDoc") class FunctionDoc(StmtDoc): """Doc that represents function definition.""" name: IdDoc args: Sequence[AssignDoc] decorators: Sequence[ExprDoc] return_type: Optional[ExprDoc] body: Sequence[StmtDoc] def __init__( self, name: IdDoc, args: List[AssignDoc], decorators: List[ExprDoc], return_type: Optional[ExprDoc], body: List[StmtDoc], ): self.__init_handle_by_constructor__( _ffi_api.FunctionDoc, # type: ignore # pylint: disable=no-member name, args, decorators, return_type, body, ) @register_object("script.printer.ClassDoc") class ClassDoc(StmtDoc): """Doc that represents class definition.""" name: IdDoc decorators: Sequence[ExprDoc] body: Sequence[StmtDoc] def __init__(self, name: IdDoc, decorators: List[ExprDoc], body: List[StmtDoc]): self.__init_handle_by_constructor__( _ffi_api.ClassDoc, # type: ignore # pylint: disable=no-member name, decorators, body, )
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/doc_printer.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Functions to print doc into text format""" from typing import Optional from tvm.runtime.object_path import ObjectPath from . import _ffi_api from .doc import Doc def to_python_script( doc: Doc, indent_spaces: int = 4, print_line_numbers: bool = False, num_context_lines: Optional[int] = None, path_to_underline: Optional[ObjectPath] = None, ) -> str: """Convert Doc into Python script. Parameters ---------- doc : Doc The doc to convert into Python script indent_spaces : int The number of indent spaces to use in the output print_line_numbers: bool Whether to print line numbers num_context_lines : Optional[int] Number of context lines to print around the underlined text path_to_underline : Optional[ObjectPath] Object path to be underlined Returns ------- script : str The text representation of Doc in Python syntax """ if num_context_lines is None: num_context_lines = -1 return _ffi_api.DocToPythonScript( # type: ignore doc, indent_spaces, print_line_numbers, num_context_lines, path_to_underline )
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/entry.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This file contains the entry point of TVMScript Unified Printer. """ from typing import Dict, Optional from tvm.runtime import Object, ObjectPath from . import _ffi_api def script( # pylint: disable=too-many-arguments root_node: Object, ir_name: str, ir_prefix: Dict[str, str], indent_spaces: int = 4, print_line_numbers: bool = False, num_context_lines: int = -1, path_to_underline: Optional[ObjectPath] = None, ) -> str: """ Print IR graph as TVMScript code Parameters ---------- root_node : Object The root node to print. ir_name : str The dispatch token of the target IR, e.g., "tir", "relax". ir_prefix : Dict[str, str] The symbol name for TVMScript IR namespaces. For example, {"tir": "T"}. indent_spaces : int The number of indent spaces to use in the output print_line_numbers: bool Whether to print line numbers num_context_lines : Optional[int] Number of context lines to print around the underlined text path_to_underline : Optional[ObjectPath] Object path to be underlined Returns ------- script : str The TVMScript code of the root_node """ return _ffi_api.Script( # type: ignore # pylint: disable=no-member root_node, ir_name, ir_prefix, indent_spaces, print_line_numbers, num_context_lines, path_to_underline, )
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/frame.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Frame is the core data structure for semantic information when printing IR graph into TVMScript code. """ from typing import Callable, Sequence from tvm._ffi import register_object from tvm.runtime import Object from tvm.script.printer.doc import StmtDoc from . import _ffi_api class Frame(Object): """ Frame is the core data structure for semantic information when printing IR graph into TVMScript code. Frame base class manages a list of callbacks to be executed when frame goes out of scope. """ def add_exit_callback(self, callback: Callable[[], None]) -> None: """ Adds a callback function to be executed when frame goes out of scope. Parameters ---------- callback : Callable[[], None] The callback function. """ _ffi_api.FrameAddExitCallback(self, callback) # type: ignore # pylint: disable=no-member def __enter__(self): _ffi_api.FrameEnterWithScope(self) # type: ignore # pylint: disable=no-member return self def __exit__(self, *exception_info): _ffi_api.FrameExitWithScope(self) # type: ignore # pylint: disable=no-member @register_object("script.printer.MetadataFrame") class MetadataFrame(Frame): """ MetadataFrame contains information like contant parameter array. """ metadata: Sequence[Object] def __init__(self): self.__init_handle_by_constructor__(_ffi_api.MetadataFrame) # type: ignore # pylint: disable=no-member @register_object("script.printer.VarDefFrame") class VarDefFrame(Frame): """ VarDefFrame contains information about the free variables that needs to be defined at the beginning of the printed snippet. """ stmts: Sequence[StmtDoc] def __init__(self): self.__init_handle_by_constructor__(_ffi_api.VarDefFrame) # type: ignore # pylint: disable=no-member
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/ir_docsifier.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ IRDocsifier is the top-level interface in the process of transforming IR graph into Doc tree, during printing IR graph as TVMScript code. """ import atexit from contextlib import ExitStack, contextmanager from typing import Callable, Dict, Generator, Mapping, Optional, Sequence, Set, Tuple, Type, TypeVar from tvm._ffi import get_object_type_index, register_object from tvm.runtime import Object, ObjectPath from . import _ffi_api from .doc import Doc from .frame import Frame from .var_table import VarTable _REGISTERED_TYPES: Set[Tuple[str, int]] = set() # {(dispatch_token, type_index)} def _cleanup_dispatch_function(): for dispatch_token, type_index in _REGISTERED_TYPES: _ffi_api.IRDocsifierRemoveDispatch(dispatch_token, type_index) # type: ignore # pylint: disable=no-member _CLEANUP_REGISTERED = False def _ensure_cleanup_function_registered(): """ Add a cleanup function to be called on interpreter termination, to remove all dispatch functions registered on the Python side. Without cleaning up those dispatch functions, program will segfault on termination. It's because dispatch functions are referenced from the static memory of libtvm, thus they will be cleaned up at the very end, making calls to Py_DecRef after Python interpreter terminates. """ global _CLEANUP_REGISTERED # pylint: disable=global-statement if not _CLEANUP_REGISTERED: atexit.register(_cleanup_dispatch_function) _CLEANUP_REGISTERED = True @register_object("script.printer.RootNodeContainer") class RootNodeContainer(Object): """ A wrapper object to provide injection point for printer of each IR. This class shouldn't be used directly. `IRDocsifier.set_root_dispatch` should be used instead. """ root_node: Object def __init__(self, root_node: Object): self.__init_handle_by_constructor__(_ffi_api.RootNodeContainer, root_node) # type: ignore # pylint: disable=no-member @register_object("script.printer.IRDocsifier") class IRDocsifier(Object): """ IRDocsifier is the top-level interface in the IR->Doc process. It provides methods to convert IR node object to Doc, operate on Frame objects and change dispatch tokens. """ ir_prefix: Mapping[str, str] vars: VarTable frames: Sequence[Frame] dispatch_tokens: Sequence[str] def __init__(self, ir_prefix: Dict[str, str]): """ Create a new IRDocsifier. Parameters ---------- ir_prefix : Dict[str, str] The ir prefix to use. Key is the IR dispatch token and value is the name of identifier for this IR's namespace in TVMScript. """ self.__init_handle_by_constructor__(_ffi_api.IRDocsifier, ir_prefix) # type: ignore # pylint: disable=no-member _TObject = TypeVar("_TObject", bound=Object) @classmethod def set_dispatch( cls, node_type: Type[_TObject], dispatch_function: Callable[[_TObject, ObjectPath, "IRDocsifier"], Doc], dispatch_token: str = "", ) -> None: """ Set the dispatch function to transform a particular IR node type to Doc Parameters ---------- node_type : Type[_TObject] The type of object to dispatch on. dispatch_function : Callable[[_TObject, ObjectPath, "IRDocsifier"], Doc] The dispatch function. It's called to transform IR node object to Doc. dispatch_token : str Function will only be called when this dispatch_token is the same as the one on the top of IRDocsifier's dispatch_tokens stack. An empty dispatch token means registering as default dispatch function, which will be called when there is no dispatch function registered with the current dispatch token. """ type_index = get_object_type_index(node_type) if type_index is None: raise TypeError(f"{type(node_type)} is not a registered TVM object type.") _ensure_cleanup_function_registered() _ffi_api.IRDocsifierSetDispatch( # type: ignore # pylint: disable=no-member dispatch_token, type_index, dispatch_function ) _REGISTERED_TYPES.add((dispatch_token, type_index)) @classmethod def set_root_dispatch( cls, dispatch_token: str, root_dispatch_function: Callable[[Object, "IRDocsifier"], Doc] ) -> None: """ Set the root dispatch function for an IR. The root dispatch function will be called with the root node of an IR graph that's being transformed to Doc. This provides an injection point for each IR's printer implemention to add specialized logic, for example, pushing a special Frame to the IRDocsifier before doing actual IR->Doc transformation. The simplest root dispatch function is ``` def f(obj, ir_docsifier) return ir_docsifier.as_doc(obj, ObjectPath.root()) ``` Parameters ---------- root_dispatch_function : Callable[[_TObject, "IRDocsifier"], Doc] The root dispatch function. It's called with the root node to be printed. dispatch_token : str The dispatch token of the IR that root_dispatch_funnction applies to. """ def dispatch_function(obj: RootNodeContainer, _, ir_docsifier): return root_dispatch_function(obj.root_node, ir_docsifier) cls.set_dispatch(RootNodeContainer, dispatch_function, dispatch_token) def as_doc(self, obj: Object, object_path: ObjectPath) -> Doc: """ Transform the input object into Doc. Parameters ---------- obj : Object The IR node object. object_path : ObjectPath The object path of this object. It's used for locating diagnostic message. Returns ------- doc : Doc The doc for this object. """ return _ffi_api.IRDocsifierAsDoc(self, obj, object_path) # type: ignore # pylint: disable=no-member def get_frame(self, frame_type: Type[Frame]) -> Optional[Frame]: """ Get the top frame with type `frame_type`. Parameters ---------- frame_type : Type[Frame] The target frame type. Returns ------- frame : Optional[Frame] The frame if found, otherwise None. """ for i in range(len(self.frames) - 1, -1, -1): if isinstance(self.frames[i], frame_type): return self.frames[i] return None @contextmanager def dispatch_token(self, token: str): """ Push a new dispatch token to the stack. Parameters ---------- token : str The token to push. Returns ------- A context manager that pops this dispatch token when exits. """ with ExitStack() as stack: _ffi_api.IRDocsifierPushDispatchToken(self, token) # type: ignore # pylint: disable=no-member stack.callback(_ffi_api.IRDocsifierPopDispatchToken, self) # type: ignore # pylint: disable=no-member yield _TFrame = TypeVar("_TFrame", bound=Frame) @contextmanager def frame(self, frame: _TFrame) -> Generator[_TFrame, None, None]: """ Push a new frame to the stack. Parameters ---------- frame : Frame The frame to push. Returns ------- A context manager that pops this frame when exits. """ with ExitStack() as stack: stack.enter_context(frame) _ffi_api.IRDocsifierPushFrame(self, frame) # type: ignore # pylint: disable=no-member stack.callback(_ffi_api.IRDocsifierPopFrame, self) # type: ignore # pylint: disable=no-member yield frame
https://github.com/zk-ml/tachikoma
python/tvm/script/printer/var_table.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Functions to print doc into text format""" from typing import Callable, Optional from tvm._ffi import register_object from tvm.runtime import Object, ObjectPath from . import _ffi_api from .doc import ExprDoc, IdDoc from .frame import Frame @register_object("script.printer.VarTable") class VarTable(Object): """ Variable Table manages mapping from variable object to ExprDoc during the process of printing TVMScript. """ def __init__(self): """ Create an empty VarTable. """ self.__init_handle_by_constructor__(_ffi_api.VarTable) # type: ignore # pylint: disable=no-member def define(self, obj: Object, name_hint: str, object_path: ObjectPath, frame: Frame) -> IdDoc: """ Define a variable by name. Parameters ---------- obj : Object The variable object. name_hint : str The hint for variable name. object_path : ObjectPath The object path to be associated with the returned ExprDoc. frame : Frame Then frame that this variable is defined in. Returns ------- doc : IdDoc The doc for this variable. """ return _ffi_api.VarTableDefine(self, obj, name_hint, object_path, frame) # type: ignore # pylint: disable=no-member def define_by_doc(self, obj: Object, doc_factory: Callable[[], ExprDoc], frame: Frame) -> None: """ Define a variable by ExprDoc. Parameters ---------- obj : Object The variable object. doc_factory : Callable[[], ExprDoc] The hint for variable name. frame : Frame Then frame that this variable is defined in. Returns ------- None """ _ffi_api.VarTableDefineByDoc(self, obj, doc_factory, frame) # type: ignore # pylint: disable=no-member def get_var_doc(self, obj: Object, object_path: ObjectPath) -> Optional[ExprDoc]: """ Get the doc for a variable. Parameters ---------- obj : Object The variable object. object_path : ObjectPath The object path to be associated with the returned ExprDoc. Returns ------- doc : ExprDoc The doc for this variable. """ return _ffi_api.VarTableGetVarDoc(self, obj, object_path) # type: ignore # pylint: disable=no-member def is_var_defined(self, obj: Object) -> bool: """ Check whether a variable is defined. Parameters ---------- obj : Object The variable object. Returns ------- is_defined : bool Whether the variable is defined. """ return _ffi_api.VarTableIsVarDefined(self, obj) # type: ignore # pylint: disable=no-member def __contains__(self, obj: Object) -> bool: return self.is_var_defined(obj)
https://github.com/zk-ml/tachikoma
python/tvm/support.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Support infra of TVM.""" import json import textwrap import ctypes import os import sys import tvm import tvm._ffi from .runtime.module import Module from . import get_global_func def libinfo(): """Returns a dictionary containing compile-time info, including cmake flags and git commit hash Returns ------- info: Dict[str, str] The dictionary of compile-time info. """ get_lib_info_func = get_global_func("support.GetLibInfo", allow_missing=True) if get_lib_info_func is not None: lib_info = get_lib_info_func() if lib_info is None: return {} else: return {} return dict(lib_info.items()) def describe(): """ Print out information about TVM and the current Python environment """ info = list((k, v) for k, v in libinfo().items()) info = dict(sorted(info, key=lambda x: x[0])) print("Python Environment") sys_version = sys.version.replace("\n", " ") uname = os.uname() uname = f"{uname.sysname} {uname.release} {uname.version} {uname.machine}" lines = [ f"TVM version = {tvm.__version__}", f"Python version = {sys_version} ({sys.maxsize.bit_length() + 1} bit)", f"os.uname() = {uname}", ] print(textwrap.indent("\n".join(lines), prefix=" ")) print("CMake Options:") print(textwrap.indent(json.dumps(info, indent=2), prefix=" ")) class FrontendTestModule(Module): """A tvm.runtime.Module whose member functions are PackedFunc.""" def __init__(self, entry_name=None): underlying_mod = get_global_func("testing.FrontendTestModule")() handle = underlying_mod.handle # Set handle to NULL to avoid cleanup in c++ runtime, transferring ownership. # Both cython and ctypes FFI use c_void_p, so this is safe to assign here. underlying_mod.handle = ctypes.c_void_p(0) super(FrontendTestModule, self).__init__(handle) if entry_name is not None: self.entry_name = entry_name def add_function(self, name, func): self.get_function("__add_function")(name, func) def __setitem__(self, key, value): self.add_function(key, value) tvm._ffi._init_api("support", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/target/__init__.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Target description and codgen module. TVM's target string is in format ``<target_kind> [-option=value]...``. Note ---- The list of options include: - **-device=<device name>** The device name. - **-mtriple=<target triple>** Specify the target triple, which is useful for cross compilation. - **-mcpu=<cpuname>** Specify a specific chip in the current architecture to generate code for. By default this is infered from the target triple and autodetected to the current architecture. - **-mattr=a1,+a2,-a3,...** Override or control specific attributes of the target, such as whether SIMD operations are enabled or not. The default set of attributes is set by the current CPU. - **-mabi=<abi>** Generate code for the specified ABI, for example "lp64d". - **-system-lib** Build TVM system library module. System lib is a global module that contains self registered functions in program startup. User can get the module using :any:`tvm.runtime.system_lib`. It is useful in environments where dynamic loading api like dlopen is banned. The system lib will be available as long as the result code is linked by the program. We can use :py:func:`tvm.target.Target` to create a tvm.target.Target from the target string. We can also use other specific function in this module to create specific targets. """ from .target import Target, create, TargetKind from .target import ( cuda, rocm, mali, intel_graphics, arm_cpu, rasp, vta, bifrost, riscv_cpu, hexagon, stm32, ) from .virtual_device import VirtualDevice from .compilation_config import make_compilation_config from .tag import list_tags from .generic_func import GenericFunc from .generic_func import generic_func, get_native_generic_func, override_native_generic_func from . import datatype from . import codegen
https://github.com/zk-ml/tachikoma
python/tvm/target/_ffi_api.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """FFI APIs for tvm.target""" import tvm._ffi tvm._ffi._init_api("target", __name__)
https://github.com/zk-ml/tachikoma
python/tvm/target/codegen.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Code generation related functions.""" from . import _ffi_api from .target import Target def build_module(mod, target): """Build IRModule into Module. Parameters ---------- mod : tvm.IRModule The ir module. target : str The target module type. Returns ------- module : runtime.Module The corressponding module. """ target = Target(target) if isinstance(target, str) else target return _ffi_api.Build(mod, target) def llvm_lookup_intrinsic_id(name): """Lookup LLVM intrinsic id by name. Parameters ---------- name : str The name of the intrinsic. Returns ------- intrin_id : int The intrinsic id. """ return _ffi_api.llvm_lookup_intrinsic_id(name) def llvm_get_intrinsic_name(intrin_id: int) -> str: """Get the name of an intrinsic for a given id. Parameters ---------- intrin_id : int The id of the intrinsic. Returns ------- name : str The name of the intrinsic. """ return _ffi_api.llvm_get_intrinsic_name(intrin_id) def llvm_version_major(allow_none=False): """Get the major LLVM version. Parameters ---------- allow_none : bool Whether do we allow none. Returns ------- major : int The major LLVM version. """ try: return _ffi_api.llvm_version_major() except AttributeError: if allow_none: return None raise RuntimeError("LLVM version is not available, please check if you built TVM with LLVM")
https://github.com/zk-ml/tachikoma
python/tvm/target/compilation_config.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Python bindings for creating CompilationConfigs.""" import tvm from . import _ffi_api def make_compilation_config(ctxt, target, target_host=None): """Returns a CompilationConfig appropriate for target and target_host, using the same representation conventions as for the standard build interfaces. Intended only for unit testing.""" raw_targets = tvm.target.Target.canon_multi_target_and_host(target, target_host) return _ffi_api.MakeCompilationConfig(ctxt, raw_targets)
https://github.com/zk-ml/tachikoma
python/tvm/target/datatype.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Bring Your Own Datatypes custom datatype framework TODO(@gussmith23 @hypercubestart) link to BYODT docs when they exist""" import tvm from tvm.runtime import convert, DataType from tvm.tir.expr import ( Call as _Call, Cast as _Cast, FloatImm as _FloatImm, BinaryOpExpr as _BinaryOpExpr, ) from tvm.tir.op import call_pure_extern from tvm._ffi import register_func as _register_func from tvm.tir import call_intrin def register(type_name, type_code): """Register a custom datatype with the given type name and type code Currently, the type code is manually allocated by the user, and the user must ensure that no two custom types share the same code. Generally, this should be straightforward, as the user will be manually registering all of their custom types. Example: .. code-block:: python # Register a dtype named 'posites2' under type code 130. tvm.target.datatype.register('posites2', 130) Parameters ---------- type_name : str The name of the custom datatype. type_code : int The type's code, which should be >= kCustomBegin. See include/tvm/runtime/data_type.h. """ tvm.runtime._ffi_api._datatype_register(type_name, type_code) def get_type_name(type_code): """Get the type name of a custom datatype from the type code. Note that this only works for custom datatypes registered with tvm.target.datatype.register(). It does not work for TVM-native types. Example: .. code-block:: python tvm.target.datatype.register('posites2', 130) assert tvm.target.datatype.get_type_name(130) == 'posites2' Parameters ---------- type_code : int The type code of the custom datatype. Returns ------- type_name : String The name of the custom datatype. """ return tvm.runtime._ffi_api._datatype_get_type_name(type_code) def get_type_code(type_name): """Get the type code of a custom datatype from its type name Note that this only works for custom datatypes registered with tvm.target.datatype.register(). It does not work for TVM-native types. Example: .. code-block:: python tvm.target.datatype.register('posites2', 130) assert tvm.target.datatype.get_type_code('posites2') == 130 Parameters ---------- type_name : str The type name Returns ------- type_code : int The type code of the custom datatype. """ return tvm.runtime._ffi_api._datatype_get_type_code(type_name) def get_type_registered(type_code): """Returns true if a custom datatype is registered under the given type code Example: .. code-block:: python tvm.target.datatype.register('posites2', 130) assert tvm.target.datatype.get_type_registered(130) Parameters ---------- type_code: int The type code Returns ------- type_registered : bool True if a custom datatype is registered under this type code, and false otherwise. """ return tvm.runtime._ffi_api._datatype_get_type_registered(type_code) def register_op( lower_func, op_name, target, src_type_name, dest_type_name=None, intrinsic_name=None ): """Register a lowering function for a specific operator of a custom datatype At build time, Relay must lower operators over custom datatypes into operators it understands how to compile. For each custom datatype operator which Relay finds while lowering custom datatypes, Relay expects to find a user-defined lowering function. Users register their user-defined lowering functions using this function. Users should use create_lower_func to create their lowering function. It should serve most use-cases. Currently, this will work with Casts, intrinsics (e.g. sqrt, sigmoid), and binary expressions (e.g. Add, Sub, Mul, Div). See the LowerCustomDatatypes pass to see how registered functions are used. Lowering Functions ------------------ TODO(@gussmith23) Get the terminology right here. Lowering functions take in a Relay node, and should return a semantically equivalent Relay node which Relay can build. This means that the returned node should not contain any custom datatypes. Users should likely not need to define lowering functions by hand -- see the helper function create_lower_func. Parameters ---------- lower_func : function The lowering function to call. See create_lower_func. op_name : str The name of the operation which the function computes, given by its class name (e.g. Add, LE, Cast, Call). target : str The name of codegen target. src_type_name : str The name of the custom datatype, e.g. posites2 (but not custom[posites2]32). If op_name is not "Cast", then target type is guaranteed to be the same as src_type_name. dest_type_name : str If op_name is "Cast", then this is required and should be set to the dest datatype of the argument to the Cast. If op_name is not "Cast", this is unused. intrinsic_name : str If op_name is "Call" and intrinsic_name is not None, then we assume the op is a Call to an Intrinsic, and intrinsic_name is the intrinsic's name. """ if op_name == "Cast": assert dest_type_name is not None lower_func_name = ( "tvm.datatype.lower." + target + "." + op_name + "." + dest_type_name + "." + src_type_name ) elif op_name == "Call" and intrinsic_name is not None: lower_func_name = ( "tvm.datatype.lower." + target + "." + op_name + ".intrin." + intrinsic_name + "." + src_type_name ) else: lower_func_name = "tvm.datatype.lower." + target + "." + op_name + "." + src_type_name tvm._ffi.register_func(lower_func_name, lower_func) def register_min_func(func, type_name): """Register the function that returns the minimum representable value of type_name. Operators such as max pooling and argmax require the minimum finite value representable by the datatype the op operating on. Users can use this function to register a function that returns a TIR expression node outputting the minimum representable value of their custom data type. Users should use create_min_lower_func to create their lowering function. It should serve most use-cases. Note: for special cases when it is known that the custom datatype is representable by a float, the user can create their own lowering func that returns a FloatImm. The benefits are allowing optimizations such as rewrites to work as expected on custom datatypes. Parameters ---------- func : function Input is an integer num_bits, should return a TIR expression node that represents a scalar tensor of type custom[type_name]num_bits with the minimum representable value. type_name : str The name of the custom datatype, e.g. posites2 (but not custom[posites2]32). """ _register_func("tvm.datatype.min." + type_name, func) def create_min_lower_func(extern_func_map, type_name): """Returns a lowering function for getting the minimum value of a custom datatype. Parameters ---------- extern_func_map : map A map from bit lengths to the name of the extern "C" function to lower to. type_name : string The name of the custom datatype, e.g. posites2 (but not custom[posites2]32). """ def lower(num_bits): dtype = f"custom[{type_name}]{num_bits}" if num_bits not in extern_func_map: raise RuntimeError("missing minimum function for {dtype}") return call_pure_extern(dtype, extern_func_map[num_bits]) return lower def create_lower_func(extern_func_map): """Returns a function which lowers an operation to a function call. Parameters ---------- extern_func_map : map If lowering a Cast, extern_func_map should be a map from tuples of (src_bit_length, dest_bit_length) to the name of the extern "C" function to lower to. Otherwise, for unary and binary ops, it should simply be a map from bit_length to the name of the extern "C" function to lower to. """ def lower(op): """ Takes an op---either a Cast, Call, or a binary op (e.g. an Add) and returns a call to the specified external function, passing the op's argument or arguments. The return type of the call depends on the type of the op: if it is a custom type, then a uint of the same width as the custom type is returned. Otherwise, the type is unchanged.""" dtype = op.dtype t = DataType(dtype) if get_type_registered(t.type_code): dtype = "uint" + str(t.bits) if t.lanes > 1: dtype += "x" + str(t.lanes) key = t.bits if isinstance(op, _Cast): src_bits = DataType(op.value.dtype).bits key = (src_bits, t.bits) if key not in extern_func_map: raise RuntimeError(f"missing key {key} in extern_func_map for {op.astext()}") if isinstance(op, _Cast): return call_pure_extern(dtype, extern_func_map[key], op.value) if isinstance(op, _FloatImm): return call_pure_extern(dtype, extern_func_map[key], op.value) if isinstance(op, _Call): return call_pure_extern(dtype, extern_func_map[key], *op.args) if isinstance(op, _BinaryOpExpr): return call_pure_extern(dtype, extern_func_map[key], op.a, op.b) raise RuntimeError(f"lowering unsupported op: {op.astext()}") return lower def lower_ite(ite_op): """Lowered if then else function that calls intrinsic if_then_else. Unlike a function lowered by create_lower_func, this function calls the tvm intrinsic if_then_else. Parameters ---------- ite_op : Op Takes an if then else op and returns a call to tir.if_then_else function, passing the op's arguments. The return type of the call if a uint of the same width as the custom type is returned. """ dtype = ite_op.dtype t = tvm.DataType(dtype) assert get_type_registered(t.type_code) dtype = "uint" + str(t.bits) if t.lanes > 1: dtype += "x" + str(t.lanes) return call_intrin( dtype, "tir.if_then_else", convert(ite_op.args[0]), convert(ite_op.args[1]), convert(ite_op.args[2]), ) def lower_call_pure_extern(op): """Lowered call pure extern function that calls intrinsic call_pure_extern. Unlike a function lowered by create_lower_func, this function calls the tvm intrinsic call_pure_extern. Parameters ---------- ite_op : Op Takes a call_pure_extern op and returns a call to tir.call_pure_extern function, passing the op's arguments. The return type of the call if a uint of the same width as the custom type is returned. """ dtype = op.dtype t = tvm.DataType(dtype) assert get_type_registered(t.type_code) dtype = "uint" + str(t.bits) if t.lanes > 1: dtype += "x" + str(t.lanes) return call_intrin(dtype, "tir.call_pure_extern", *op.args)
https://github.com/zk-ml/tachikoma
python/tvm/target/generic_func.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Generic function.""" import tvm._ffi try: from decorator import decorate except ImportError: # Allow decorator to be missing in runtime if not tvm._ffi.base._RUNTIME_ONLY: raise from tvm.runtime import Object from .target import Target from . import _ffi_api @tvm._ffi.register_object class GenericFunc(Object): """GenericFunc node reference. This represents a generic function that may be specialized for different targets. When this object is called, a specialization is chosen based on the current target. Note ---- Do not construct an instance of this object, it should only ever be used as a return value from calling into C++. """ def __call__(self, *args): return _ffi_api.GenericFuncCallFunc(self, *args) def set_default(self, func, allow_override=False): """Set the default function to be used if no specializations match the current target. Parameters ---------- func : function The default function allow_override : bool Whether to allow the current default to be overridden """ _ffi_api.GenericFuncSetDefault(self, func, allow_override) def register(self, func, key_list, allow_override=False): """Register a specialization for this GenericFunc. Parameters ---------- func : function The function to be registered. key : str or list of str The key to be registered. allow_override : bool, optional Whether to allow existing keys to be overridden. """ key_list = [key_list] if isinstance(key_list, str) else key_list _ffi_api.GenericFuncRegisterFunc(self, func, key_list, allow_override) def get_packed_func(self): """Get the packed function specified for the current target. Returns ------- func : PackedFunc The function specified for the current target. Return the default function if no specializations match the current target. """ return _ffi_api.GenericFuncGetPackedFunc(self) def get_native_generic_func(name): """Get a generic function from the global registry. If no function is registered under the given name, a new generic function is created. Parameters ---------- name : string The name of the generic function to get Returns ------- func : GenericFunc The generic function for the given name """ return _ffi_api.GenericFuncGetGlobal(name) def override_native_generic_func(func_name): """Override a generic function defined in C++ Generic function allows registration of further functions that can be dispatched on current target context. If no registered dispatch is matched, the fdefault will be called. Parameters ---------- func_name : string The name of the generic func to be overridden Returns ------- fgeneric : function A wrapped generic function. Example ------- .. code-block:: python import tvm # wrap function as target generic @tvm.target.override_native_generic_func("my_func") def my_func(a): return a + 1 # register specialization of my_func under target cuda @my_func.register("cuda") def my_func_cuda(a): return a + 2 # displays 3, because my_func is called print(my_func(2)) # displays 4, because my_func_cuda is called with tvm.target.cuda(): print(my_func(2)) """ generic_func_node = get_native_generic_func(func_name) def fdecorate(fdefault): """Wrap a target generic function, overriding the previous default that was set for the generic function. Parameters ---------- fdefault : function The default function. Returns ------- fgeneric : function A wrapped generic function. """ generic_func_node.set_default(fdefault, allow_override=True) def register(key, func=None, override=True): """Register function to be the dispatch function. Parameters ---------- key : str or list of str The key to be registered. func : function The function to be registered. override : bool, optional Whether override existing registration. Returns ------- The register function is necessary. """ def _do_reg(myf): generic_func_node.register(myf, key, override) return myf if func: return _do_reg(func) return _do_reg def dispatch_func(func, *args, **kwargs): # pylint: disable=unused-argument """The wrapped dispath function""" if kwargs: raise RuntimeError( "Keyword arguments cannot be used when invoking generic_func %s" % func_name ) return generic_func_node(*args) fresult = decorate(fdefault, dispatch_func) fresult.fdefault = fdefault fresult.register = register fresult.generic_func_node = generic_func_node return fresult return fdecorate def generic_func(fdefault): """Wrap a target generic function. Generic function allows registration of further functions that can be dispatched on current target context. If no registered dispatch is matched, the fdefault will be called. Parameters ---------- fdefault : function The default function. Returns ------- fgeneric : function A wrapped generic function. Example ------- .. code-block:: python import tvm # wrap function as target generic @tvm.target.generic_func def my_func(a): return a + 1 # register specialization of my_func under target cuda @my_func.register("cuda") def my_func_cuda(a): return a + 2 # displays 3, because my_func is called print(my_func(2)) # displays 4, because my_func_cuda is called with tvm.target.cuda(): print(my_func(2)) """ dispatch_dict = {} func_name = fdefault.__name__ def register(key, func=None, override=False): """Register function to be the dispatch function. Parameters ---------- key : str or list of str The key to be registered. func : function The function to be registered. override : bool Whether override existing registration. Returns ------- The register function is necessary. """ def _do_reg(myf): key_list = [key] if isinstance(key, str) else key for k in key_list: if k in dispatch_dict and not override: raise ValueError("Key is already registered for %s" % func_name) dispatch_dict[k] = myf return myf if func: return _do_reg(func) return _do_reg def dispatch_func(func, *args, **kwargs): """The wrapped dispatch function""" target = Target.current() if target is None: return func(*args, **kwargs) for k in target.keys: if k in dispatch_dict: return dispatch_dict[k](*args, **kwargs) return func(*args, **kwargs) def get_packed_func(): """The wrapped to get dispatched function""" target = Target.current() if target is None: return fdefault for k in target.keys: if k in dispatch_dict: return dispatch_dict[k] return fdefault fdecorate = decorate(fdefault, dispatch_func) fdecorate.register = register fdecorate.fdefault = fdefault fdecorate.dispatch_dict = dispatch_dict fdecorate.get_packed_func = get_packed_func return fdecorate
https://github.com/zk-ml/tachikoma
python/tvm/target/intrin.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Target dependent intrinsic registration.""" from tvm.ir import register_intrin_lowering from tvm.tir import call_pure_extern def _rule_float_suffix(op): """Intrinsic rule: Add float suffix if it is float32. This is an example intrinsic generation rule. Parameters ---------- op : PrimExpr The call expression of original intrinsic. Returns ------- ret : PrimExpr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_lowering : The registration function for intrinsic lowering rule. """ name = op.op.name assert name.startswith("tir.") prefix = name[4:] if op.dtype == "float32": return call_pure_extern(op.dtype, "%sf" % prefix, *op.args) if op.dtype == "float64": return call_pure_extern(op.dtype, prefix, *op.args) return op def _rule_float_direct(op): """Intrinsic rule: Directly call pure extern function for floats. This is an example intrinsic generation rule. Parameters ---------- op : PrimExpr The call expression of original intrinsic. Returns ------- ret : PrimExpr The translated intrinsic rule. Return same op if no translation is possible. See Also -------- register_intrin_lowering : The registration function for intrinsic lowering rule. """ if str(op.dtype).startswith("float"): return call_pure_extern(op.dtype, op.op.name[4:], *op.args) return None # opencl pattern for exp register_intrin_lowering("tir.exp", target="opencl", f=_rule_float_direct, level=99) # default pattern for exp register_intrin_lowering("tir.exp", target="default", f=_rule_float_suffix, level=99)
https://github.com/zk-ml/tachikoma
python/tvm/target/tag.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Target tags""" from typing import Any, Dict, Optional from . import _ffi_api from .target import Target def list_tags() -> Optional[Dict[str, Target]]: """Returns a dict of tags, which maps each tag name to its corresponding target. Returns ------- tag_dict : Optional[Dict[str, Target]] The dict of tags mapping each tag name to its corresponding target. None if TVM is built in runtime-only mode. """ if hasattr(_ffi_api, "TargetTagListTags"): return _ffi_api.TargetTagListTags() return None def register_tag(name: str, config: Dict[str, Any], override: bool = False) -> Optional[Target]: """Add a user-defined tag into the target tag registry. Parameters ---------- name: str Name of the target, e.g. "nvidia/gtx1080ti" config : Dict[str, Any] The config dict used to create the target override: bool A boolean flag indicating if overriding existing tags are allowed. If False and the tag has been registered already, an exception will be thrown. Returns ------- target : Optional[Target] The target corresponding to the tag None if TVM is built in runtime-only mode. Examples -------- .. code-block:: python register_tag("nvidia/gtx1080ti", config={ "kind": "cuda", "arch": "sm_61", }) """ if hasattr(_ffi_api, "TargetTagAddTag"): return _ffi_api.TargetTagAddTag(name, config, override) return None # To check the correctness of all registered tags, the call is made in library loading time. list_tags() # We purposely maintain all tags in the C++ side to support pure C++ use cases, # and the Python API is only used for fast prototyping. register_tag( "nvidia/gtx1080ti", config={ "kind": "cuda", "arch": "sm_61", }, )
https://github.com/zk-ml/tachikoma
python/tvm/target/target.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Target data structure.""" import json import re import warnings import tvm._ffi from tvm._ffi import register_func as _register_func from tvm.runtime import Object, convert from tvm.runtime.container import String from tvm.ir.container import Map, Array from . import _ffi_api @tvm._ffi.register_object class TargetKind(Object): """Kind of a compilation target""" @property def options(self): """Returns the dict of available option names and types""" return dict(_ffi_api.ListTargetKindOptions(self)) @staticmethod def options_from_name(kind_name: str): """Returns the dict of available option names and types from a name of TargetKind""" return dict(_ffi_api.ListTargetKindOptionsFromName(kind_name)) class TargetFeatures: def __init__(self, target): self.target = target def __getattr__(self, name: str): return _ffi_api.TargetGetFeature(self.target, name) @tvm._ffi.register_object class Target(Object): """Target device information, use through TVM API. Note ---- You can create target using the constructor or the following functions - :py:func:`tvm.target.arm_cpu` create arm_cpu target - :py:func:`tvm.target.cuda` create CUDA target - :py:func:`tvm.target.rocm` create ROCM target - :py:func:`tvm.target.mali` create Mali target - :py:func:`tvm.target.intel_graphics` create Intel Graphics target """ def __init__(self, target, host=None): """Construct a TVM target object from 1) Raw target string 2) Target config dict 3) Target tag Parameters ---------- target : Union[str, Dict[str, Any]] Can be one of a literal target string, a json string describing a configuration, or a dictionary of configuration options. When using a dictionary or json string to configure target, the possible values are: kind : str (required) Which codegen path to use, for example 'llvm' or 'cuda'. keys : List of str (optional) A set of strategies that can be dispatched to. When using "kind=opencl" for example, one could set keys to ["mali", "opencl", "gpu"]. device : str (optional) A single key that corresponds to the actual device being run on. This will be effectively appended to the keys. libs : List of str (optional) The set of external libraries to use. For example ['cblas', 'mkl']. system-lib : bool (optional) If True, build a module that contains self registered functions. Useful for environments where dynamic loading like dlopen is banned. mcpu : str (optional) The specific cpu being run on. Serves only as an annotation. model : str (optional) An annotation indicating what model a workload came from. runtime : str (optional) An annotation indicating which runtime to use with a workload. mtriple : str (optional) The llvm triplet describing the target, for example "arm64-linux-android". mattr : List of str (optional) The llvm features to compile with, for example ["+avx512f", "+mmx"]. mfloat-abi : str (optional) An llvm setting that is one of 'hard' or 'soft' indicating whether to use hardware or software floating-point operations. mabi : str (optional) An llvm setting. Generate code for the specified ABI, for example "lp64d". host : Union[str, Dict[str, Any]] (optional) Description for target host. Can be recursive. Similar to target. host : Optional[Union[str, Dict[str, Any]]] Similar to target but for target host. Can be one of a literal target host string, a json string describing a configuration, or a dictionary of configuration options. When using a dictionary or json string to configure target, the possible values are same as target. """ if isinstance(target, str) and "-libs=mkldnn" in target: target = target.replace("mkldnn", "dnnl") warnings.warn( "Legacy support of mkldnn is going to be deprecated. " "Please use -libs=dnnl instead.", ) if isinstance(target, (dict, str)): target = convert(target) if isinstance(host, (dict, str)): host = convert(host) if target is None or not isinstance(target, (Map, String, Target)): raise ValueError("target has to be a string or dictionary.") if host is not None: if not isinstance(host, (Map, String, Target)): raise ValueError("target host has to be a string or dictionary.") self.__init_handle_by_constructor__(_ffi_api.Target, Target(target), Target(host)) else: self.__init_handle_by_constructor__(_ffi_api.Target, target) def __enter__(self): _ffi_api.TargetEnterScope(self) return self def __exit__(self, ptype, value, trace): _ffi_api.TargetExitScope(self) def export(self): return _ffi_api.TargetExport(self) def with_host(self, host=None): return _ffi_api.WithHost(self, Target(host)) @staticmethod def current(allow_none=True): """Returns the current target. Parameters ---------- allow_none : bool Whether allow the current target to be none Raises ------ ValueError if current target is not set. """ return _ffi_api.TargetCurrent(allow_none) @property def arch(self): """Returns the cuda arch from the target if it exists.""" return str(self.attrs.get("arch", "")) @property def max_num_threads(self): """Returns the max_num_threads from the target if it exists.""" return int(self.attrs["max_num_threads"]) @property def thread_warp_size(self): """Returns the thread_warp_size from the target if it exists.""" return int(self.attrs["thread_warp_size"]) @property def max_function_args(self): return int(self.attrs.get("max_function_args", -1)) @property def device_name(self): return str(self.attrs.get("device", "")) @property def model(self): """Returns model from the target if it exists.""" return str(self.attrs.get("model", "unknown")) @property def mcpu(self): """Returns the mcpu from the target if it exists.""" return str(self.attrs.get("mcpu", "")) @property def mattr(self): """Returns the mattr from the target if it exists.""" return list(self.attrs.get("mattr", [])) @property def supports_integer_dot_product(self): if self.attrs.get("supports_integer_dot_product", []): return bool(self.attrs["supports_integer_dot_product"]) if self.kind.name == "cuda": sm_version = int(self.arch.split("_")[1]) if sm_version >= 61: return True return False @property def libs(self): return list(self.attrs.get("libs", [])) @property def features(self): return TargetFeatures(self) def get_kind_attr(self, attr_name): """Get additional attribute about the target kind. Parameters ---------- attr_name : str The attribute name. Returns ------- value : object The attribute value """ return _ffi_api.TargetKindGetAttr(self.kind, attr_name) def get_target_device_type(self): """Returns the device_type for this target.""" return _ffi_api.TargetGetDeviceType(self) @staticmethod def list_kinds(): """Returns the list of available target names.""" return list(_ffi_api.ListTargetKinds()) @staticmethod def canon_target(target): """Given a single target-like object, returns the TVM Target object representing it. Can convert from: - None (to None). - An existing TVM Target object. - A string, eg "cuda" or "cuda -arch=sm_80" - A Python dictionary, eg {"kind": "cuda", "arch": "sm_80" } """ if target is None: return None if isinstance(target, Target): return target return Target(target) @staticmethod def canon_target_and_host(target, target_host=None): """Returns a TVM Target capturing target and target_host. Also returns the host in canonical form. The given target can be in any form recognized by Target.canon_target. If given, target_host can be in any form recognized by Target.canon_target. If target_host is given it will be set as the 'host' in the result Target object (and a warning given). Note that this method does not support heterogeneous compilation targets. """ target = Target.canon_target(target) if target is None: assert target_host is None, "Target host is not empty when target is empty." return target, target_host if target.host is None and target_host is not None: warnings.warn( "target_host parameter is going to be deprecated. " "Please pass in tvm.target.Target(target, host=target_host) instead." ) target_host = Target.canon_target(target_host) target = target.with_host(target_host) if target is not None: # In case the target already had a host, extract it here. target_host = target.host return target, target_host @staticmethod def canon_multi_target(multi_targets): """Given a single target-like object, or a collection-like object of target-like objects, returns a TVM Array of TVM Target objects representing then. Can convert from: - None (to None). - A single target-like object in a form recognized by canon_target. - A Python list or TVM Array of target-like objects in a form recognized by canon_target. - A Python dict or TVM Map from TVM IntImm objects representing device types to a target-like object in a form recognized by canon_target. (This is a legacy method to represent heterogeneous targets. The keys are ignored.) """ if multi_targets is None: return None if isinstance(multi_targets, (dict, Map)) and "kind" not in multi_targets: # Convert legacy heterogeneous map representation to ordinary list of targets. return Target.canon_multi_target(list(multi_targets.values())) if isinstance(multi_targets, (list, Array)): # Multiple Target results. return convert([Target.canon_target(tgt) for tgt in multi_targets]) # Single Target result. return convert([Target.canon_target(multi_targets)]) @staticmethod def canon_multi_target_and_host(target, target_host=None): """Returns a TVM Array<Target> capturing target and target_host. The given target can be in any form recognized by Target.canon_multi_target. If given, target_host can be in any form recognized by Target.canon_target. If target_host is given it will be set as the 'host' in each result Target object (and a warning given). """ # Convert target to Array<Target>, but not yet accounting for any host. raw_targets = Target.canon_multi_target(target) assert raw_targets is not None and len(raw_targets) > 0 # Convert host to Target, if given. if raw_targets[0].host is None and target_host is not None: warnings.warn( "target_host parameter is going to be deprecated. " "Please pass in tvm.target.Target(target, host=target_host) instead." ) # Make sure the (canonical) host is captured in all the (canonical) targets. target_host = Target.canon_target(target_host) raw_targets = convert([tgt.with_host(target_host) for tgt in raw_targets]) return raw_targets @staticmethod def canon_target_map_and_host(target_map, target_host=None): """Returns target_map as a map from TVM Target's in canonical form to IRModules. The keys of the input target_map can be in any form recognized by Target.canon_target. Similarly, if given, target_host can be in any form recognized by Target.canon_target. The final target_map keys will capture the target_host in canonical form. Also returns the target_host in canonical form.""" new_target_map = {} canonical_target_host = None for tgt, mod in target_map.items(): tgt = Target.canon_target(tgt) assert tgt is not None if canonical_target_host is None: if tgt.host is not None: canonical_target_host = tgt.host elif target_host is not None: # No deprecation warning in this case since host may have been manufactured # behind the scenes in build_module.py build. canonical_target_host = Target.canon_target(target_host) if tgt.host is None and canonical_target_host is not None: tgt = tgt.with_host(canonical_target_host) new_target_map[tgt] = mod return new_target_map, canonical_target_host @staticmethod def target_or_current(target): """Returns target, or the current target in the environment if target is None""" if target is None: target = Target.current() if target is None: raise ValueError("Target is not set in env or passed as argument.") return target # TODO(@tvm-team): Deprecate the helper functions below. Encourage the usage of config dict instead. def _merge_opts(opts, new_opts): """Helper function to merge options""" if isinstance(new_opts, str): new_opts = new_opts.split() if new_opts: opt_set = set(opts) new_opts = [opt for opt in new_opts if opt not in opt_set] return opts + new_opts return opts def cuda(model="unknown", arch=None, options=None): """Returns a cuda target. Parameters ---------- model: str The model of cuda device (e.g. 1080ti) arch: str The cuda architecture (e.g. sm_61) options : str or list of str Additional options """ opts = _merge_opts(["-model=%s" % model], options) if arch: opts = _merge_opts(["-arch=%s" % arch], opts) if not any(["-arch" in opt for opt in opts]): warnings.warn("Try specifying cuda arch by adding 'arch=sm_xx' to your target.") return Target(" ".join(["cuda"] + opts)) def rocm(model="unknown", options=None): """Returns a ROCM target. Parameters ---------- model: str The model of this device options : str or list of str Additional options """ opts = _merge_opts(["-model=%s" % model], options) return Target(" ".join(["rocm"] + opts)) def mali(model="unknown", options=None): """Returns a ARM Mali GPU target. Parameters ---------- model: str The model of this device options : str or list of str Additional options """ opts = ["-device=mali", "-model=%s" % model] opts = _merge_opts(opts, options) return Target(" ".join(["opencl"] + opts)) def intel_graphics(model="unknown", options=None): """Returns an Intel Graphics target. Parameters ---------- model: str The model of this device options : str or list of str Additional options """ opts = ["-device=intel_graphics", "-model=%s" % model, "-thread_warp_size=16"] opts = _merge_opts(opts, options) return Target(" ".join(["opencl"] + opts)) MICRO_SUPPORTED_MODELS = { "host": [], "atsamd51": ["-mcpu=cortex-m4"], "cxd5602gg": ["-mcpu=cortex-m4"], "esp32": [], "imxrt10xx": ["-mcpu=cortex-m7"], "mps2_an521": ["-mcpu=cortex-m33"], "mps3_an547": ["-mcpu=cortex-m55"], "nrf52840": ["-mcpu=cortex-m4+nodsp"], "nrf5340dk": ["-mcpu=cortex-m33"], "rp2040": ["-mcpu=cortex-m0"], "sam3x8e": ["-mcpu=cortex-m3"], "stm32f746xx": ["-mcpu=cortex-m7", "-march=armv7e-m"], "stm32h7xx": ["-mcpu=cortex-m7"], "stm32l4r5zi": ["-mcpu=cortex-m4"], "stm32u5xx": ["-mcpu=cortex-m33"], "zynq_mp_r5": ["-mcpu=cortex-r5"], } def micro(model="unknown", options=None): """Returns a microTVM target. Parameters ---------- model : str Canonically identifies the target device. This is typically a device board level name. The allowed values are MICRO_SUPPORTED_MODELS.keys(). options : str or list of str Additional options """ if model not in MICRO_SUPPORTED_MODELS: raise ValueError(f"Model {model} not supported by tvm.target.micro.") opts = _merge_opts( MICRO_SUPPORTED_MODELS[model] + [f"-model={model}"], options, ) # NOTE: in the future, the default micro target will be LLVM except when # external dependencies are present. return Target(" ".join(["c"] + opts)) def arm_cpu(model="unknown", options=None): """Returns a ARM CPU target. This function will also download pre-tuned op parameters when there is none. Parameters ---------- model: str SoC name or phone name of the arm board. options : str or list of str Additional options """ trans_table = { "pixel2": ["-model=snapdragon835", "-mtriple=arm64-linux-android", "-mattr=+neon"], "mate10": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"], "mate10pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"], "p20": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"], "p20pro": ["-model=kirin970", "-mtriple=arm64-linux-android", "-mattr=+neon"], "rasp3b": ["-model=bcm2837", "-mtriple=armv7l-linux-gnueabihf", "-mattr=+neon"], "rasp4b": [ "-model=bcm2711", "-mtriple=armv8l-linux-gnueabihf", "-mattr=+neon", "-mcpu=cortex-a72", ], "rasp4b64": [ "-model=bcm2711", "-mtriple=aarch64-linux-gnu", "-mattr=+neon", "-mcpu=cortex-a72", ], "rk3399": ["-model=rk3399", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"], "pynq": ["-model=pynq", "-mtriple=armv7a-linux-eabi", "-mattr=+neon"], "ultra96": ["-model=ultra96", "-mtriple=aarch64-linux-gnu", "-mattr=+neon"], "beagleai": [ "-model=beagleai", "-mtriple=armv7a-linux-gnueabihf", "-mattr=+neon,+vfp4,+thumb2", "-mcpu=cortex-a15", ], "stm32mp1": [ "-model=stm32mp1", "-mtriple=armv7a-linux-gnueabihf", "-mattr=+neon,+vfp4,+thumb2", "-mcpu=cortex-a7", ], "thunderx": [ "-model=thunderx", "-mtriple=aarch64-linux-gnu", "-mattr=+neon,+crc,+lse", "-mcpu=thunderxt88", ], } pre_defined_opt = trans_table.get(model, ["-model=%s" % model]) opts = ["-keys=arm_cpu,cpu", "-device=arm_cpu"] + pre_defined_opt opts = _merge_opts(opts, options) return Target(" ".join(["llvm"] + opts)) def rasp(options=None): """Return a Raspberry 3b target. Parameters ---------- options : str or list of str Additional options """ warnings.warn( "tvm.target.rasp() is going to be deprecated. " 'Please use tvm.target.arm_cpu("rasp3b")' ) return arm_cpu("rasp3b", options) def vta(model="unknown", options=None): opts = ["-device=vta", "-keys=vta,cpu", "-model=%s" % model] opts = _merge_opts(opts, options) return Target(" ".join(["ext_dev"] + opts)) def bifrost(model="unknown", options=None): """Return an ARM Mali GPU target (Bifrost architecture). Parameters ---------- options : str or list of str Additional options """ opts = ["-device=bifrost", "-model=%s" % model] opts = _merge_opts(opts, options) return Target(" ".join(["opencl"] + opts)) def riscv_cpu(model="sifive-u54", options=None): """Returns a RISC-V CPU target. Default: sifive-u54 rv64gc Parameters ---------- model: str CPU name. options : str or list of str Additional options """ trans_table = { "sifive-e31": [ "-model=sifive-e31", "-mtriple=riscv32-unknown-linux-gnu", "-mcpu=sifive-e31", "-mabi=ilp32", # cc: riscv64-unknown-linux-gnu-g++ -march=rv32imac -mabi=ilp32 -mcpu=sifive-e31 ], "sifive-e76": [ "-model=sifive-e76", "-mtriple=riscv32-unknown-linux-gnu", "-mcpu=sifive-e76", "-mabi=ilp32", # cc: riscv64-unknown-linux-gnu-g++ -march=rv32imafc -mabi=ilp32 -mcpu=sifive-e76 ], "sifive-u54": [ "-model=sifive-u54", "-mtriple=riscv64-unknown-linux-gnu", "-mcpu=sifive-u54", "-mabi=lp64d", # cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u54 ], "sifive-u74": [ "-model=sifive-u74", "-mtriple=riscv64-unknown-linux-gnu", "-mcpu=sifive-u74", "-mabi=lp64d", # cc: riscv64-unknown-linux-gnu-g++ -march=rv64gc -mabi=lp64d -mcpu=sifive-u74 ], } pre_defined_opt = trans_table.get(model, ["-model=%s" % model]) opts = ["-keys=arm_cpu,cpu", "-device=arm_cpu"] + pre_defined_opt opts = _merge_opts(opts, options) return Target(" ".join(["llvm"] + opts)) def hexagon(cpu_ver="v66", **kwargs): """Returns a Hexagon target. Parameters ---------- cpu_ver : str (default: "v66") CPU version used for code generation. Not all allowed cpu str will be valid, LLVM will throw an error. Recognized keyword parameters ----------------------------- hvx : int (default: 128) Size of HVX vector in bytes. Value of 0 disables HVX codegen. llvm_options : str or list of str (default: None) User defined compiler arguments. use_qfloat : bool (default: True for cpu_ver >= v68, False otherwise) Whether to use QFloat HVX instructions. use_ieee_fp : bool (default: False) Whether to use IEEE HVX instructions num_cores : int (default: 4) The number of HVX threads. This attribute is required by meta scheduler. Note: Floating point support in HVX requires LLVM 14+. """ # Some of the target parameters correspond to target kind attributes # listed in src/target/target_kind.cc. For those parameters, their # names follow the attribute names with the exception of '_' being used # in place of '-'. # Example compiler arguments # llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b def get_arch_version(cpu_ver): m = re.match(r"v([0-9]+).*", cpu_ver) assert m return int(m.group(1)) # Check for valid codegen cpu valid_hex = ["v65", "v66", "v67", "v67t", "v68", "v69"] try: cpu_ver = cpu_ver[cpu_ver.index("v") :].lower() assert cpu_ver in valid_hex except: msg = "{} is not a valid Hexagon version\nvalid versions include {}" raise ValueError(msg.format(cpu_ver, valid_hex)) from None # Target configuration: arch_version = get_arch_version(cpu_ver) config = { "hvx": 128, "llvm_options": None, "use_qfloat": arch_version >= 68, "use_ieee_fp": False, } config.update(kwargs) # Warn about obsolete parameter names. if config.get("sim_args") or config.get("sim_options"): msg = ( "Setting simulator options in target is deprecated, set environment variable " "HEXAGON_SIM_ARGS instead" ) warnings.warn(msg, stacklevel=2) if config.get("llvm_args"): msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead" warnings.warn(msg, stacklevel=2) config.update({"llvm_options": config["llvm_args"]}) # LLVM target string def create_llvm_target(cpu_ver, config): """Create LLVM target string.""" target = " -mtriple=hexagon" mcpu = " -mcpu=hexagon" + cpu_ver # Process the options that affect target features and return the # target feature string. def create_target_features(config): features = { "use_qfloat": "hvx-qfloat", "use_ieee_fp": "hvx-ieee-fp", } tfs = [] if config["hvx"] > 0: valid_hvx = [0, 64, 128] if not config["hvx"] in valid_hvx: raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx)) tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(config["hvx"]) + "b"] else: tfs += ["-hvx"] # All the additional features happen to only apply to v68+. # Don't bother applying them (even with '-') to lower versions. if arch_version >= 68: tfs += ["-+"[config[f]] + features[f] for f in features] return "-mattr=" + ",".join(tfs) if tfs else "" return target + mcpu + " " + create_target_features(config) # LLVM options string def create_llvm_options(cpu_ver, config): # pylint: disable=unused-argument """Create LLVM options string.""" llvm_options = config["llvm_options"] # To enable auto-vectorization for v68 target added the below llvm-option by default if arch_version == 68: if not llvm_options: llvm_options = "" llvm_options += " -force-hvx-float" # TVM's option parser doesn't allow '=' in values, but '=' can # appear in LLVM flags. Replace it with '@', since it's unlikely # that '@' will be used in another context. if llvm_options is None or len(llvm_options.strip()) == 0: return "" args = [s.replace("=", "@") for s in llvm_options.split()] return "--llvm-options=" + ",".join(args) target_str = create_llvm_target(cpu_ver, config) llvm_str = create_llvm_options(cpu_ver, config) args_list = target_str.split() + llvm_str.split() num_cores = config["num_cores"] if "num_cores" in kwargs else 4 args_list.append("--num-cores=%d" % num_cores) return Target(" ".join(["hexagon"] + args_list)) STM32_SUPPORTED_SERIES = { # High-Performance "stm32H7xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m7", "-march=armv7e-m"], "stm32F7xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m7"], "stm32F4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"], "stm32F2xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"], # Mainstream "stm32G0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0+"], "stm32F0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0"], "stm32F1xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"], "stm32G4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"], "stm32F3xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"], # Low-power "stm32U5xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m33"], "stm32L5xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m33"], "stm32L4xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m4"], "stm32L1xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m3"], "stm32L0xx": ["-keys=arm_cpu,cpu", "-device=arm_cpu", "-mcpu=cortex-m0+"], } def stm32(series="unknown", options=None): """Returns a STM32 target. Parameters ---------- series: str Series name of a STM32 board series, eg. stm32H7xx or stm32F4xx options : str or list of str Additional options """ if series not in STM32_SUPPORTED_SERIES: raise ValueError(f"Series {series} is not supported by tvm.target.stm32.") opts = _merge_opts(STM32_SUPPORTED_SERIES[series], options) return Target(" ".join(["c"] + opts)) def adreno(model="unknown", options=None): """Returns a Qualcomm GPU target. Parameters ---------- model: str The model of this device options : str or list of str Additional options """ opts = ["-device=adreno", "-model=%s" % model] opts = _merge_opts(opts, options) return Target(" ".join(["opencl"] + opts)) def create(target): """Deprecated. Use the constructor of :py:mod:`tvm.target.Target` directly.""" warnings.warn("tvm.target.create() is being deprecated. Please use tvm.target.Target() instead") return Target(target) @_register_func("target._load_config_dict") def _load_config_dict(config_dict_str): try: config = json.loads(config_dict_str) except json.decoder.JSONDecodeError: return None if not isinstance(config, dict): return None for key in config.keys(): if not isinstance(key, str): return None return config
https://github.com/zk-ml/tachikoma