file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-scheduling Sparse Matrix Multiplication on CPU with Custom Sketch Rule =========================================================================== **Author**: `Chengfan Jia <https://github.com/jcf94/>`_ This is a tutorial on how to use the auto-scheduler to tune a sparse matrix multiplication for CPUs. Auto-scheduler is designed to explore the schedule with best performance for a given computation declaration automatically. While sometimes, we may have a demand to try some special ops which may not been well-supported by auto-scheduler's default sketch rules and result in poor performance. Fortunately, auto-scheduler currently allows user to provide a CustomSketch to cover these cases. We use sparse matrix multiplication as an example in this tutorial to demonstrate how to implement and plug a custom sketch rule to the auto-scheduler's search policy. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import numpy as np import tvm import tvm.testing from tvm import te, auto_scheduler, runtime, topi from tvm.auto_scheduler import _ffi_api from tvm.topi.utils import get_const_tuple from tvm.topi.sparse.utils import random_bsr_matrix ###################################################################### # Define the computation # ^^^^^^^^^^^^^^^^^^^^^^ # To begin with, let us define the computation of a sparse matmul with several relu and bias add. # The function should return the list of input/output tensors. # From these tensors, the auto-scheduler can get the whole computational graph. @auto_scheduler.register_workload def sparse_dense(M, N, K, w_data_shape, w_indices_shape, w_indptr_shape, dtype): X = te.placeholder(shape=(M, K), dtype=dtype) W_data = te.placeholder(shape=w_data_shape, dtype=dtype) W_indices = te.placeholder(shape=w_indices_shape, dtype="int32") W_indptr = te.placeholder(shape=w_indptr_shape, dtype="int32") B = te.placeholder(shape=(M, N), dtype=dtype) out = topi.nn.sparse_dense(topi.nn.relu(X), W_data, W_indices, W_indptr) out = te.compute((M, N), lambda i, j: out[i, j] + B[i, j], name="BiasAdd") out = topi.nn.relu(out) return [X, W_data, W_indices, W_indptr, B, out] ###################################################################### # Special step for sparse workload # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # During schedule tuning, auto-scheduler will use random inputs to measure the performance of a # generated schedule. While we cannot directly use a random array as the input of a sparse op, for # the "indices" and "indptr" array are meaningful for the computation. # # To solve this problem, we register these as special buffers, and load them when process program # measuring. # See the `tvm.auto_scheduler.measure.py` for more details. # Define the basic shapes of this sparse computation M = 128 K = 256 N = 512 BS_R = 16 BS_C = 1 density = 0.6 # Generate the test data with numpy X_np = np.random.randn(M, K).astype("float32") X_np = np.maximum(np.zeros((M, K), dtype="float32"), X_np) # Relu W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32") W_np = W_sp_np.todense() Y_np = X_np @ W_np.T # Process the matrix multiplication B_np = np.random.randn(M, N).astype("float32") Y_np = Y_np + B_np # Bias add Y_np = np.maximum(np.zeros((M, N), dtype="float32"), Y_np) # Relu ###################################################################### # Create the search task # ^^^^^^^^^^^^^^^^^^^^^^ # We then create a search task with M=N=K=512 and dtype="float32" # If your machine supports avx instructions, you can # # - replace "llvm" below with "llvm -mcpu=core-avx2" to enable AVX2 # - replace "llvm" below with "llvm -mcpu=skylake-avx512" to enable AVX-512 target = tvm.target.Target("llvm") # Register the sparse data to task inputs prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % ( N, K, BS_R, BS_C, W_sp_np.indices.shape[0], W_sp_np.indptr.shape[0], ) task = tvm.auto_scheduler.SearchTask( func=sparse_dense, args=(M, N, K, W_sp_np.data.shape, W_sp_np.indices.shape, W_sp_np.indptr.shape, "float32"), target=target, task_inputs={ prefix + "W_data": runtime.ndarray.array(W_sp_np.data), prefix + "W_indices": runtime.ndarray.array(W_sp_np.indices), prefix + "W_indptr": runtime.ndarray.array(W_sp_np.indptr), }, task_inputs_save_to_file=True, ) # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) ###################################################################### # Write the custom sketch for sparse dense op # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Before tuning, we will need to define the CustomSketchRule for the sparse dense op. # # CustomSketchRule consists of two parts: the condition function and the apply function. # # - condition function: describe when to apply this sketch rule. For example, we can only apply # the rule to the sparse ops by matching their name and tag. # - apply function: describe how to generate the initial sketch. You can implement it using # auto-scheduler provided loop state APIs. def meet_condition_func(search_policy, state, stage_id): state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag) if state.stages[stage_id].op.tag in [ "sparse_dense_sp_rhs_bsrmm", "sparse_dense_sp_rhs_bsrmm_block", ]: return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST else: return auto_scheduler.PreloadCustomSketchRule.PASS def apply_func(search_policy, state, stage_id): ret = [] s0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag) if s0.stages[stage_id].op.tag == "sparse_dense_sp_rhs_bsrmm_block": return [s0.state_object, stage_id - 1] sparse_dense = s0.stages[stage_id].op sparse_dense_block = s0.stages[stage_id - 1].op assert sparse_dense.tag == "sparse_dense_sp_rhs_bsrmm" assert sparse_dense_block.tag == "sparse_dense_sp_rhs_bsrmm_block" # Set the default consumer of compute block consumer = sparse_dense # If sparse dense has a single elementwise consumer # We can compute inline the sparse_dense output stage consumers = _ffi_api.SearchPolicyUtilsGetConsumers( search_policy.search_task, s0.state_object, stage_id ) if len(consumers) == 1: consumer_id = int(consumers.items()[0][0]) if _ffi_api.SearchPolicyUtilsIsElementwiseMatch( search_policy.search_task, s0.state_object, stage_id, consumer_id ): consumer = s0.stages[consumer_id].op s0.compute_inline(sparse_dense) i, nb_j, j, row_offset, c = s0[sparse_dense_block].iters m, n = s0[consumer].iters i0, i1, i2 = s0.split(sparse_dense_block, i, [None, None]) m0, m1 = s0.follow_split(consumer, m, len(s0.transform_steps) - 1, 1) j0, j1 = s0.split(sparse_dense_block, nb_j, [None]) n0, n1 = s0.follow_split(consumer, n, len(s0.transform_steps) - 1, 1) s0.reorder(sparse_dense_block, [i0, j0, i1, j1, row_offset, i2, j, c]) s0.reorder(consumer, [m0, n0, m1, n1]) s0.compute_at(sparse_dense_block, consumer, n0) ret.append([s0.state_object, stage_id - 2]) return ret ###################################################################### # Next, we set parameters for the auto-scheduler with the custom sketch plugged in. # # * :code:`num_measure_trials` is the number of measurement trials we can use during the search. # We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a # good value for the search to converge. You can do more trials according to your time budget. # * In addition, we use :code:`RecordToFile` to dump measurement records into a file # `sparse_dense.json`. # The measurement records can be used to query the history best, resume the search, # and do more analyses later. # * see :any:`auto_scheduler.TuningOptions` for more parameters # * Here, we need to create a :code:`auto_scheduler.SketchPolicy` object, and add the custom sketch # rule as a `init_search_callbacks`. log_file = "sparse_dense.json" tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) search_policy = auto_scheduler.SketchPolicy( task, program_cost_model=auto_scheduler.XGBModel(), init_search_callbacks=[ auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func, "SparseDense") ], ) ###################################################################### # Run the search # ^^^^^^^^^^^^^^ # Now we get all inputs ready. # We can kick off the search and let the auto-scheduler do its magic. # After some measurement trials, we can load the best schedule from the log # file and apply it. # Run auto-tuning (search) # Notice: We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. task.tune(tune_option, search_policy) # Apply the best schedule sch, args = task.apply_best(log_file) ###################################################################### # We can lower the schedule to see the IR after auto-scheduling. # The auto-scheduler correctly performs optimizations including multi-level tiling, # layout transformation, parallelization, vectorization, unrolling, and operator fusion. print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) ###################################################################### # Check correctness and evaluate performance # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # We build the binary and check its correctness and performance. func = tvm.build(sch, args, target) dev = tvm.cpu() X_tvm = tvm.nd.array(X_np, device=dev) W_data_tvm = tvm.nd.array(W_sp_np.data, device=dev) W_indices_tvm = tvm.nd.array(W_sp_np.indices, device=dev) W_indptr_tvm = tvm.nd.array(W_sp_np.indptr, device=dev) B_tvm = tvm.nd.array(B_np, device=dev) Y_tvm = tvm.nd.empty(Y_np.shape, device=dev) func(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, B_tvm, Y_tvm) # Check results tvm.testing.assert_allclose(Y_np, Y_tvm.numpy(), atol=1e-4, rtol=1e-4) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) print( "Execution time of this operator: %.3f ms" % ( np.median(evaluator(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, B_tvm, Y_tvm).results) * 1000 ) ) ###################################################################### # .. note:: Tuning result example # # .. code-block:: c # # ---------------------------------------------------------------------- # Lowered TIR: # primfn(placeholder_5: handle, placeholder_6: handle, placeholder_7: handle, placeholder_8: handle, placeholder_9: handle, compute_1: handle) -> () # attr = {"global_symbol": "main", "tir.noalias": True} # buffers = {placeholder_2: Buffer(placeholder_10: Pointer(float32), float32, [9831, 16, 1], []), # placeholder_4: Buffer(placeholder_11: Pointer(int32), int32, [33], []), # placeholder_3: Buffer(placeholder_12: Pointer(float32), float32, [512, 512], []), # compute: Buffer(compute_2: Pointer(float32), float32, [512, 512], []), # placeholder_1: Buffer(placeholder_13: Pointer(float32), float32, [512, 512], []), # placeholder: Buffer(placeholder_14: Pointer(int32), int32, [9831], [])} # buffer_map = {placeholder_7: placeholder, placeholder_9: placeholder_1, placeholder_6: placeholder_2, compute_1: compute, placeholder_5: placeholder_3, placeholder_8: placeholder_4} { # for (i0.outer.i1.outer.fused: int32, 0, 1024) "parallel" { # attr [compute_3: Pointer(float32)] "storage_scope" = "global"; # allocate(compute_3, float32, [256]) { # for (nb_j.inner: int32, 0, 2) { # for (i.inner.init: int32, 0, 8) { # for (j.init: int32, 0, 16) { # compute_3[(((i.inner.init*32) + (nb_j.inner*16)) + j.init)] = 0f32 # } # } # for (elem_idx: int32, 0, ((int32*)placeholder_11[(((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner) + 1)] - (int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)])) { # for (i.inner: int32, 0, 8) { # for (j: int32, 0, 16) { # compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] = ((float32*)compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] + ((float32*)placeholder_10[((((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)]*16) + (elem_idx*16)) + j)]*max((float32*)placeholder_12[(((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i.inner*512)) + (int32*)placeholder_14[((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)] + elem_idx)])], 0f32))) # } # } # } # } # for (i0.inner: int32, 0, 8) { # compute_2[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)] = max(((float32x32*)compute_3[ramp((i0.inner*32), 1, 32)] + (float32x32*)placeholder_13[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)]), broadcast(0f32, 32)) # } # } # } # }
https://github.com/zk-ml/tachikoma
gallery/how_to/tune_with_autotvm/tune_conv2d_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Tuning High Performance Convolution on NVIDIA GPUs ========================================================================= **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_ This is an advanced tutorial for writing high performance tunable template for NVIDIA GPU. By running auto-tuner on this template, we can outperform the vendor provided library CuDNN in many cases. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster in tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import logging import sys import numpy as np import tvm from tvm import te, topi, testing from tvm.topi.testing import conv2d_nchw_python import tvm.testing from tvm import autotvm ###################################################################### # Step 1: Define the search space # -------------------------------- # There are plenty of useful schedule primitives in tvm. You can also find # some tutorials that describe them in more details, such as # (1). :ref:`opt-conv-gpu` # (2). `Optimizing DepthwiseConv on NVIDIA GPU <https://tvm.apache.org/2017/08/22/Optimize-Deep-Learning-GPU-Operators-with-TVM-A-Depthwise-Convolution-Example>`_ # # However, their implementations are manually tuned for some special input # shapes. In this section, we build a large enough space to cover # the techniques used in these tutorials. Then we rely on the efficient auto-tuner # to search through this space and pick some good configurations. # # If you are familiar with writing cuda schedule, you can find the following # template is very general. Actually this template can be easily modified # to tune other operators such as depthwise convolution and GEMM. # In order to fully understand this template, you should be familiar with # the schedule primitives and auto tuning API. You can refer to the above # tutorials and :ref:`autotvm tutorial <tutorial-autotvm-matmul-x86>` # # It is worth noting that the search space for a conv2d operator # can be very large (at the level of 10^9 for some input shapes) # @autotvm.template("tutorial/conv2d_no_batching") def conv2d_no_batching(N, H, W, CO, CI, KH, KW, stride, padding): assert N == 1, "Only consider batch_size = 1 in this template" data = te.placeholder((N, CI, H, W), name="data") kernel = te.placeholder((CO, CI, KH, KW), name="kernel") conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32") s = te.create_schedule([conv.op]) ##### space definition begin ##### n, f, y, x = s[conv].op.axis rc, ry, rx = s[conv].op.reduce_axis cfg = autotvm.get_config() cfg.define_split("tile_f", f, num_outputs=4) cfg.define_split("tile_y", y, num_outputs=4) cfg.define_split("tile_x", x, num_outputs=4) cfg.define_split("tile_rc", rc, num_outputs=3) cfg.define_split("tile_ry", ry, num_outputs=3) cfg.define_split("tile_rx", rx, num_outputs=3) cfg.define_knob("auto_unroll_max_step", [0, 512, 1500]) cfg.define_knob("unroll_explicit", [0, 1]) ##### space definition end ##### # inline padding pad_data = s[conv].op.input_tensors[0] s[pad_data].compute_inline() data, raw_data = pad_data, data output = conv OL = s.cache_write(conv, "local") # create cache stage AA = s.cache_read(data, "shared", [OL]) WW = s.cache_read(kernel, "shared", [OL]) AL = s.cache_read(AA, "local", [OL]) WL = s.cache_read(WW, "local", [OL]) # tile and bind spatial axes n, f, y, x = s[output].op.axis bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f) by, vy, ty, yi = cfg["tile_y"].apply(s, output, y) bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x) kernel_scope = n # this is the scope to attach global config inside this kernel s[output].bind(bf, te.thread_axis("blockIdx.z")) s[output].bind(by, te.thread_axis("blockIdx.y")) s[output].bind(bx, te.thread_axis("blockIdx.x")) s[output].bind(vf, te.thread_axis("vthread")) s[output].bind(vy, te.thread_axis("vthread")) s[output].bind(vx, te.thread_axis("vthread")) s[output].bind(tf, te.thread_axis("threadIdx.z")) s[output].bind(ty, te.thread_axis("threadIdx.y")) s[output].bind(tx, te.thread_axis("threadIdx.x")) s[output].reorder(n, bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi) s[OL].compute_at(s[output], tx) # tile reduction axes n, f, y, x = s[OL].op.axis rc, ry, rx = s[OL].op.reduce_axis rco, rcm, rci = cfg["tile_rc"].apply(s, OL, rc) ryo, rym, ryi = cfg["tile_rx"].apply(s, OL, ry) rxo, rxm, rxi = cfg["tile_ry"].apply(s, OL, rx) s[OL].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, n, f, y, x) s[AA].compute_at(s[OL], rxo) s[WW].compute_at(s[OL], rxo) s[AL].compute_at(s[OL], rxm) s[WL].compute_at(s[OL], rxm) # cooperative fetching for load in [AA, WW]: n, f, y, x = s[load].op.axis fused = s[load].fuse(n, f, y, x) tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2]) ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2]) tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2]) s[load].bind(tz, te.thread_axis("threadIdx.z")) s[load].bind(ty, te.thread_axis("threadIdx.y")) s[load].bind(tx, te.thread_axis("threadIdx.x")) # tune unroll s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val) s[output].pragma(kernel_scope, "unroll_explicit", cfg["unroll_explicit"].val) return s, [raw_data, kernel, conv] ###################################################################### # Step 2: Search through the space # --------------------------------- # We pick the last layer on resnet as test case. # Since our space is very large, :code:`XGBoostTuner` is most suitable # for our case. Here we only do 20 trials for demonstration. # In practice, making 1000 trials usually can find some good kernels # for this template # logging config (for printing tuning log to screen) logging.getLogger("autotvm").setLevel(logging.DEBUG) logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout)) # the last layer in resnet N, H, W, CO, CI, KH, KW, strides, padding = 1, 7, 7, 512, 512, 3, 3, (1, 1), (1, 1) task = autotvm.task.create( "tutorial/conv2d_no_batching", args=(N, H, W, CO, CI, KH, KW, strides, padding), target="cuda" ) print(task.config_space) # Use local gpu, measure 10 times for every config to reduce variance # The timeout of compiling a program is 10 seconds, the timeout for running is 4 seconds measure_option = autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner(repeat=3, min_repeat_ms=100, timeout=4), ) # Begin tuning, log records to file `conv2d.log` # During tuning we will also try many invalid configs, so you are expected to # see many error reports. As long as you can see non-zero GFLOPS, it is okay. tuner = autotvm.tuner.XGBTuner(task) tuner.tune( n_trial=20, measure_option=measure_option, callbacks=[autotvm.callback.log_to_file("conv2d.log")], ) ######################################################################### # Finally we can inspect the best config from log file, check correctness, # and measure running time. # inspect the best config dispatch_context = autotvm.apply_history_best("conv2d.log") best_config = dispatch_context.query(task.target, task.workload) print("\nBest config:") print(best_config) # apply history best from log file with autotvm.apply_history_best("conv2d.log"): with tvm.target.Target("cuda"): s, arg_bufs = conv2d_no_batching(N, H, W, CO, CI, KH, KW, strides, padding) func = tvm.build(s, arg_bufs) # check correctness a_np = np.random.uniform(size=(N, CI, H, W)).astype(np.float32) w_np = np.random.uniform(size=(CO, CI, KH, KW)).astype(np.float32) c_np = conv2d_nchw_python(a_np, w_np, strides, padding) dev = tvm.cuda() a_tvm = tvm.nd.array(a_np, device=dev) w_tvm = tvm.nd.array(w_np, device=dev) c_tvm = tvm.nd.empty(c_np.shape, device=dev) func(a_tvm, w_tvm, c_tvm) tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-2) # Evaluate running time. Here we choose a large repeat number (400) to reduce the noise # and the overhead of kernel launch. You can also use nvprof to validate the result. evaluator = func.time_evaluator(func.entry_name, dev, number=400) print("Time cost of this operator: %f" % evaluator(a_tvm, w_tvm, c_tvm).mean)
https://github.com/zk-ml/tachikoma
gallery/how_to/tune_with_autotvm/tune_relay_arm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tune_relay_arm: Auto-tuning a Convolutional Network for ARM CPU =============================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Zhao Wu <https://github.com/FrozenGene>`_, `Eddie Yan <https://github.com/eqy>`_ Auto-tuning for a specific ARM device is critical for getting the best performance. This is a tutorial about how to tune a whole convolutional network. The operator implementation for ARM CPU in TVM is written in template form. The template has many tunable knobs (tile factor, vectorization, unrolling, etc). We will tune all convolution and depthwise convolution operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some arm devices. You can go to `ARM CPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#arm-cpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of TVM. In the root directory of TVM, execute # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with ARM boards. # During tuning, the tuner will send the generated code to the board and # measure the speed of code on the board. # # To scale up the tuning, TVM uses RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 phones, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Devices to RPC Tracker # ----------------------------------- # Now we can register our devices to the tracker. The first step is to # build the TVM runtime for the ARM devices. # # * For Linux: # Follow this section :ref:`build-tvm-runtime-on-device` to build # the TVM runtime on the device. Then register the device to tracker by # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399 # # (replace :code:`[HOST_IP]` with the IP address of your host machine) # # * For Android: # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install the TVM RPC APK on the android device. Make sure you can pass the android rpc test. # Then you have already registered your device. During tuning, you have to go to developer option # and enable "Keep screen awake during changing" and charge your phone to make it stable. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399, # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # mate10pro 2 2 0 # rk3399 2 2 0 # rpi3b 11 11 0 # ---------------------------------- # # You can register multiple devices to the tracker to accelerate the measurement in tuning. ########################################### # Set Tuning Options # ------------------ # Before tuning, we should apply some configurations. Here I use an RK3399 board # as example. In your setting, you should modify the target and device_key accordingly. # set :code:`use_android` to True if you use android phone. #### DEVICE CONFIG #### # Replace "aarch64-linux-gnu" with the correct target of your board. # This target is used for cross compilation. You can query it by :code:`gcc -v` on your device. target = tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu") # Also replace this with the device key in your tracker device_key = "rk3399" # Set this to True if you use android phone use_android = False #### TUNING OPTION #### network = "resnet-18" log_file = "%s.%s.log" % (device_key, network) dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 1500, "early_stopping": 800, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"), runner=autotvm.RPCRunner( device_key, host="127.0.0.1", port=9190, number=5, timeout=10, ), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default values provided here work well. # If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning run longer. # If your device runs very slow or your conv2d operators have many GFLOPs, considering to # set timeout larger. # # If your model has depthwise convolution, you could consider setting # :code:`try_spatial_pack_depthwise` be :code:`True`, which perform better than default # optimization in general. For example, on ARM CPU A53 2.0GHz, we find it could boost 1.6x # performance of depthwise convolution on Mobilenet V1 model. ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb" or tuner == "xgb-rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") elif tuner == "xgb_itervar": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar") elif tuner == "xgb_curve": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # process tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # export library tmp = tempdir() if use_android: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # upload module to device print("Upload...") remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # upload parameters to device dev = remote.device(str(target), 0) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=10)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. # It takes about 2 hours on a 32T AMD Ryzen Threadripper. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 22.37/ 52.19 GFLOPS | Progress: (544/1000) | 406.59 s Done. # [Task 2/12] Current/Best: 6.51/ 18.77 GFLOPS | Progress: (608/1000) | 325.05 s Done. # [Task 3/12] Current/Best: 4.67/ 24.87 GFLOPS | Progress: (480/1000) | 372.31 s Done. # [Task 4/12] Current/Best: 11.35/ 46.83 GFLOPS | Progress: (736/1000) | 602.39 s Done. # [Task 5/12] Current/Best: 1.01/ 19.80 GFLOPS | Progress: (448/1000) | 262.16 s Done. # [Task 6/12] Current/Best: 2.47/ 23.76 GFLOPS | Progress: (672/1000) | 563.85 s Done. # [Task 7/12] Current/Best: 14.57/ 33.97 GFLOPS | Progress: (544/1000) | 465.15 s Done. # [Task 8/12] Current/Best: 1.13/ 17.65 GFLOPS | Progress: (576/1000) | 365.08 s Done. # [Task 9/12] Current/Best: 14.45/ 22.66 GFLOPS | Progress: (928/1000) | 724.25 s Done. # [Task 10/12] Current/Best: 3.22/ 15.36 GFLOPS | Progress: (864/1000) | 564.27 s Done. # [Task 11/12] Current/Best: 11.03/ 32.23 GFLOPS | Progress: (736/1000) | 635.15 s Done. # [Task 12/12] Current/Best: 8.00/ 21.65 GFLOPS | Progress: (1000/1000) | 1111.81 s Done. # Compile... # Upload... # Evaluate inference time cost... # Mean inference time (std dev): 162.59 ms (0.06 ms) ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
https://github.com/zk-ml/tachikoma
gallery/how_to/tune_with_autotvm/tune_relay_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-tuning a Convolutional Network for NVIDIA GPU ================================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy/>`_ Auto-tuning for specific devices and workloads is critical for getting the best performance. This is a tutorial on how to tune a whole convolutional network for NVIDIA GPU. The operator implementation for NVIDIA GPU in TVM is written in template form. The template has many tunable knobs (tile factor, unrolling, etc). We will tune all convolution and depthwise convolution operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some NVIDIA GPUs. You can go to `NVIDIA GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#nvidia-gpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute: # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner import tvm.contrib.graph_executor as runtime ################################################################# # Define Network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`tvm.relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ########################################### # Set Tuning Options # ------------------ # Before tuning, we apply some configurations. #### DEVICE CONFIG #### target = tvm.target.cuda() #### TUNING OPTION #### network = "resnet-18" log_file = "%s.log" % network dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 2000, "early_stopping": 600, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(timeout=10), runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default value provided here works well. # # If you have large time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning runs longer. # # If you have multiple devices, you can use all of them for measurement to # accelerate the tuning process. (see the 'Scale up measurement` section below). # ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb" or tuner == "xgb-rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=100) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, out_shape = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # load parameters dev = tvm.device(str(target), 0) module = runtime.GraphModule(lib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=600)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. One sample output is listed below. # It takes about 4 hours to get the following output on a 32T AMD Ryzen Threadripper. # The tuning target is NVIDIA 1080 Ti. # (You can see some errors during compilation. If the tuning is not stuck, it is okay.) # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 541.83/3570.66 GFLOPS | Progress: (960/2000) | 1001.31 s Done. # [Task 2/12] Current/Best: 0.56/ 803.33 GFLOPS | Progress: (704/2000) | 608.08 s Done. # [Task 3/12] Current/Best: 103.69/1141.25 GFLOPS | Progress: (768/2000) | 702.13 s Done. # [Task 4/12] Current/Best: 2905.03/3925.15 GFLOPS | Progress: (864/2000) | 745.94 sterminate called without an active exception # [Task 4/12] Current/Best: 2789.36/3925.15 GFLOPS | Progress: (1056/2000) | 929.40 s Done. # [Task 5/12] Current/Best: 89.06/1076.24 GFLOPS | Progress: (704/2000) | 601.73 s Done. # [Task 6/12] Current/Best: 40.39/2129.02 GFLOPS | Progress: (1088/2000) | 1125.76 s Done. # [Task 7/12] Current/Best: 4090.53/5007.02 GFLOPS | Progress: (800/2000) | 903.90 s Done. # [Task 8/12] Current/Best: 4.78/1272.28 GFLOPS | Progress: (768/2000) | 749.14 s Done. # [Task 9/12] Current/Best: 1391.45/2325.08 GFLOPS | Progress: (992/2000) | 1084.87 s Done. # [Task 10/12] Current/Best: 1995.44/2383.59 GFLOPS | Progress: (864/2000) | 862.60 s Done. # [Task 11/12] Current/Best: 4093.94/4899.80 GFLOPS | Progress: (224/2000) | 240.92 sterminate called without an active exception # [Task 11/12] Current/Best: 3487.98/4909.91 GFLOPS | Progress: (480/2000) | 534.96 sterminate called without an active exception # [Task 11/12] Current/Best: 4636.84/4912.17 GFLOPS | Progress: (1184/2000) | 1381.16 sterminate called without an active exception # [Task 11/12] Current/Best: 50.12/4912.17 GFLOPS | Progress: (1344/2000) | 1602.81 s Done. # [Task 12/12] Current/Best: 3581.31/4286.30 GFLOPS | Progress: (736/2000) | 943.52 s Done. # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 1.07 ms (0.05 ms) # # As a reference baseline, the time cost of MXNet + TensorRT on resnet-18 is 1.30ms. So we are a little faster. ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org ################################################################# # .. _tutorials-autotvm-scale-up-rpc-tracker: ################################################################# # Scale up measurement by using multiple devices # ---------------------------------------------- # If you have multiple devices, you can use all of them for measurement. # TVM uses the RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 GPU cards, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 # # Then open another new terminal for the RPC server. We need to start one dedicated server # for each device. We use a string key to distinguish the types of devices. # You can pick a name you like. # (Note: For rocm backend, there are some internal errors with the compiler, # we need to add `--no-fork` to the argument list.) # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=127.0.0.1:9190 --key=1080ti # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=127.0.0.1 --port=9190 # # For example, if we have four 1080ti, two titanx and one gfx900, the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # 1080ti 4 4 0 # titanx 2 2 0 # gfx900 1 1 0 # ---------------------------------- # # Finally, we need to change the tuning option to use RPCRunner. Use the code below # to replace the corresponding part above. tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 2000, "early_stopping": 600, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(timeout=10), runner=autotvm.RPCRunner( "1080ti", # change the device key to your key "127.0.0.1", 9190, number=20, repeat=3, timeout=4, min_repeat_ms=150, ), ), }
https://github.com/zk-ml/tachikoma
gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Auto-tuning a Convolutional Network for Mobile GPU ================================================== **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Eddie Yan <https://github.com/eqy>`_ Auto-tuning for a specific device is critical for getting the best performance. This is a tutorial about how to tune a whole convolutional network. The operator implementation for Mobile GPU in TVM is written in template form. The template has many tunable knobs (tile factor, vectorization, unrolling, etc). We will tune all convolution, depthwise convolution and dense operators in the neural network. After tuning, we produce a log file which stores the best knob values for all required operators. When the TVM compiler compiles these operators, it will query this log file to get the best knob values. We also released pre-tuned parameters for some arm devices. You can go to `Mobile GPU Benchmark <https://github.com/apache/tvm/wiki/Benchmark#mobile-gpu>`_ to see the results. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ###################################################################### # Install dependencies # -------------------- # To use the autotvm package in tvm, we need to install some extra dependencies. # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user psutil xgboost tornado cloudpickle # # To make TVM run faster during tuning, it is recommended to use cython # as FFI of tvm. In the root directory of tvm, execute # (change "3" to "2" if you use python2): # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Import packages. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import relay, autotvm import tvm.relay.testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.contrib.utils import tempdir import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can load some pre-defined network from :code:`relay.testing`. # We can also load models from MXNet, ONNX and TensorFlow. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={"data": input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape ################################################################# # .. _tutorials-autotvm-start-rpc-tracker: ################################################################# # Start RPC Tracker # ----------------- # TVM uses RPC session to communicate with ARM boards. # During tuning, the tuner will send the generated code to the board and # measure the speed of code on the board. # # To scale up the tuning, TVM uses RPC Tracker to manage distributed devices. # The RPC Tracker is a centralized controller node. We can register all devices to # the tracker. For example, if we have 10 phones, we can register all of them # to the tracker, and run 10 measurements in parallel, accelerating the tuning process. # # To start an RPC tracker, run this command on the host machine. The tracker is # required during the whole tuning process, so we need to open a new terminal for # this command: # # .. code-block:: bash # # python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190 # # The expected output is # # .. code-block:: bash # # INFO:RPCTracker:bind to 0.0.0.0:9190 ################################################################# # Register Devices to RPC Tracker # ----------------------------------- # Now we can register our devices to the tracker. The first step is to # build the TVM runtime for the ARM devices. # # * For Linux: # Follow this section :ref:`build-tvm-runtime-on-device` to build # the TVM runtime on the device. Then register the device to tracker by # # .. code-block:: bash # # python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=rk3399 # # (replace :code:`[HOST_IP]` with the IP address of your host machine) # # * For Android: # Follow this `readme page <https://github.com/apache/tvm/tree/main/apps/android_rpc>`_ to # install TVM RPC APK on the android device. Make sure you can pass the android RPC test. # Then you have already registered your device. During tuning, you have to go to developer option # and enable "Keep screen awake during changing" and charge your phone to make it stable. # # After registering devices, we can confirm it by querying rpc_tracker # # .. code-block:: bash # # python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190 # # For example, if we have 2 Huawei mate10 pro, 11 Raspberry Pi 3B and 2 rk3399, # the output can be # # .. code-block:: bash # # Queue Status # ---------------------------------- # key total free pending # ---------------------------------- # mate10pro 2 2 0 # rk3399 2 2 0 # rpi3b 11 11 0 # ---------------------------------- # # You can register multiple devices to the tracker to accelerate the measurement in tuning. ########################################### # Set Tuning Options # ------------------ # Before tuning, we should apply some configurations. Here I use an RK3399 board # as example. In your setting, you should modify the target and device_key accordingly. # set :code:`use_android` to True if you use android phone. #### DEVICE CONFIG #### # Replace "aarch64-linux-gnu" with the correct target of your board. # This target host is used for cross compilation. You can query it by :code:`gcc -v` on your device. target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=aarch64-linux-gnu") # Also replace this with the device key in your tracker device_key = "rk3399" # Set this to True if you use android phone use_android = False #### TUNING OPTION #### network = "resnet-18" log_file = "%s.%s.log" % (device_key, network) dtype = "float32" tuning_option = { "log_filename": log_file, "tuner": "xgb", "n_trial": 1000, "early_stopping": 450, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"), runner=autotvm.RPCRunner( device_key, host="127.0.0.1", port=9190, number=10, timeout=5, ), ), } #################################################################### # # .. note:: How to set tuning options # # In general, the default values provided here work well. # If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping` larger, # which makes the tuning run longer. # If your device runs very slow or your conv2d operators have many GFLOPs, considering to # set timeout larger. # ################################################################### # Begin Tuning # ------------ # Now we can extract tuning tasks from the network and begin tuning. # Here, we provide a simple utility function to tune a list of tasks. # This function is just an initial implementation which tunes them in sequential order. # We will introduce a more sophisticated tuning scheduler in the future. # You can skip the implementation of this function for this tutorial. def tune_tasks( tasks, measure_option, tuner="xgb", n_trial=1000, early_stopping=None, log_filename="tuning.log", use_transfer_learning=True, ): # create tmp log file tmp_log_file = log_filename + ".tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) for i, tsk in enumerate(reversed(tasks)): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb" or tuner == "xgb-rank": tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(tsk) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: raise ValueError("Invalid tuner: " + tuner) if use_transfer_learning: if os.path.isfile(tmp_log_file): tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) tuner_obj.tune( n_trial=tsk_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(tsk_trial, prefix=prefix), autotvm.callback.log_to_file(tmp_log_file), ], ) # pick best records to a cache file autotvm.record.pick_best(tmp_log_file, log_filename) os.remove(tmp_log_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, input_shape, _ = get_network(network, batch_size=1) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),), ) # run tuning tasks print("Tuning...") tune_tasks(tasks, **tuning_opt) # compile kernels with history best records with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) # export library tmp = tempdir() if use_android: from tvm.contrib import ndk filename = "net.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: filename = "net.tar" lib.export_library(tmp.relpath(filename)) # upload module to device print("Upload...") remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000) remote.upload(tmp.relpath(filename)) rlib = remote.load_module(filename) # upload parameters to device dev = remote.device(str(target), 0) module = runtime.GraphModule(rlib["default"](dev)) data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype)) module.set_input("data", data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=30)) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. It takes about 3 hours on a 32T AMD Ryzen Threadripper. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/17] Current/Best: 25.30/ 39.12 GFLOPS | Progress: (992/1000) | 751.22 s Done. # [Task 2/17] Current/Best: 40.70/ 45.50 GFLOPS | Progress: (736/1000) | 545.46 s Done. # [Task 3/17] Current/Best: 38.83/ 42.35 GFLOPS | Progress: (992/1000) | 1549.85 s Done. # [Task 4/17] Current/Best: 23.31/ 31.02 GFLOPS | Progress: (640/1000) | 1059.31 s Done. # [Task 5/17] Current/Best: 0.06/ 2.34 GFLOPS | Progress: (544/1000) | 305.45 s Done. # [Task 6/17] Current/Best: 10.97/ 17.20 GFLOPS | Progress: (992/1000) | 1050.00 s Done. # [Task 7/17] Current/Best: 8.98/ 10.94 GFLOPS | Progress: (928/1000) | 421.36 s Done. # [Task 8/17] Current/Best: 4.48/ 14.86 GFLOPS | Progress: (704/1000) | 582.60 s Done. # [Task 9/17] Current/Best: 10.30/ 25.99 GFLOPS | Progress: (864/1000) | 899.85 s Done. # [Task 10/17] Current/Best: 11.73/ 12.52 GFLOPS | Progress: (608/1000) | 304.85 s Done. # [Task 11/17] Current/Best: 15.26/ 18.68 GFLOPS | Progress: (800/1000) | 747.52 s Done. # [Task 12/17] Current/Best: 17.48/ 26.71 GFLOPS | Progress: (1000/1000) | 1166.40 s Done. # [Task 13/17] Current/Best: 0.96/ 11.43 GFLOPS | Progress: (960/1000) | 611.65 s Done. # [Task 14/17] Current/Best: 17.88/ 20.22 GFLOPS | Progress: (672/1000) | 670.29 s Done. # [Task 15/17] Current/Best: 11.62/ 13.98 GFLOPS | Progress: (736/1000) | 449.25 s Done. # [Task 16/17] Current/Best: 19.90/ 23.83 GFLOPS | Progress: (608/1000) | 708.64 s Done. # [Task 17/17] Current/Best: 17.98/ 22.75 GFLOPS | Progress: (736/1000) | 1122.60 s Done. # Compile... # Upload... # Evaluate inference time cost... # Mean inference time (std dev): 128.05 ms (7.74 ms) # ###################################################################### # # .. note:: **Experiencing Difficulties?** # # The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS", # then there must be something wrong. # # First, make sure you set the correct configuration of your device. # Then, you can print debug information by adding these lines in the beginning # of the script. It will print every measurement result, where you can find useful # error messages. # # .. code-block:: python # # import logging # logging.getLogger('autotvm').setLevel(logging.DEBUG) # # Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
https://github.com/zk-ml/tachikoma
gallery/how_to/tune_with_autotvm/tune_relay_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tune_relay_x86: Auto-tuning a Convolutional Network for x86 CPU =============================================== **Author**: `Yao Wang <https://github.com/kevinthesun>`_, `Eddie Yan <https://github.com/eqy>`_ This is a tutorial about how to tune convolution neural network for x86 CPU. Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import numpy as np import tvm from tvm import relay, autotvm from tvm.relay import testing from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner from tvm.autotvm.graph_tuner import DPTuner, PBQPTuner import tvm.contrib.graph_executor as runtime ################################################################# # Define network # -------------- # First we need to define the network in relay frontend API. # We can either load some pre-defined network from :code:`relay.testing` # or building :any:`relay.testing.resnet` with relay. # We can also load models from MXNet, ONNX and TensorFlow. # # In this tutorial, we choose resnet-18 as tuning example. def get_network(name, batch_size): """Get the symbol definition and random weight of a network""" input_shape = (batch_size, 3, 224, 224) output_shape = (batch_size, 1000) if "resnet" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.resnet.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif "vgg" in name: n_layer = int(name.split("-")[1]) mod, params = relay.testing.vgg.get_workload( num_layers=n_layer, batch_size=batch_size, dtype=dtype ) elif name == "mobilenet": mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size, dtype=dtype) elif name == "squeezenet_v1.1": mod, params = relay.testing.squeezenet.get_workload( batch_size=batch_size, version="1.1", dtype=dtype ) elif name == "inception_v3": input_shape = (batch_size, 3, 299, 299) mod, params = relay.testing.inception_v3.get_workload(batch_size=batch_size, dtype=dtype) elif name == "mxnet": # an example for mxnet model from mxnet.gluon.model_zoo.vision import get_model block = get_model("resnet18_v1", pretrained=True) mod, params = relay.frontend.from_mxnet(block, shape={input_name: input_shape}, dtype=dtype) net = mod["main"] net = relay.Function( net.params, relay.nn.softmax(net.body), None, net.type_params, net.attrs ) mod = tvm.IRModule.from_expr(net) else: raise ValueError("Unsupported network: " + name) return mod, params, input_shape, output_shape # Replace "llvm" with the correct target of your CPU. # For example, for AWS EC2 c5 instance with Intel Xeon # Platinum 8000 series, the target should be "llvm -mcpu=skylake-avx512". # For AWS EC2 c4 instance with Intel Xeon E5-2666 v3, it should be # "llvm -mcpu=core-avx2". target = "llvm" batch_size = 1 dtype = "float32" model_name = "resnet-18" log_file = "%s.log" % model_name graph_opt_sch_file = "%s_graph_opt.log" % model_name # Set the input name of the graph # For ONNX models, it is typically "0". input_name = "data" # Set number of threads used for tuning based on the number of # physical CPU cores on your machine. num_threads = 1 os.environ["TVM_NUM_THREADS"] = str(num_threads) ################################################################# # Configure tensor tuning settings and create tasks # ------------------------------------------------- # To get better kernel execution performance on x86 CPU, # we need to change data layout of convolution kernel from # "NCHW" to "NCHWc". To deal with this situation, we define # conv2d_NCHWc operator in topi. We will tune this operator # instead of plain conv2d. # # We will use local mode for tuning configuration. RPC tracker # mode can be setup similarly to the approach in # :ref:`tune_relay_arm` tutorial. # # To perform a precise measurement, we should repeat the measurement several # times and use the average of results. In addition, we need to flush the cache # for the weight tensors between repeated measurements. This can make the measured # latency of one operator closer to its actual latency during end-to-end inference. tuning_option = { "log_filename": log_file, "tuner": "random", "early_stopping": None, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(), runner=autotvm.LocalRunner( number=1, repeat=10, min_repeat_ms=0, enable_cpu_cache_flush=True ), ), } # You can skip the implementation of this function for this tutorial. def tune_kernels( tasks, measure_option, tuner="gridsearch", early_stopping=None, log_filename="tuning.log" ): for i, task in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner if tuner == "xgb" or tuner == "xgb-rank": tuner_obj = XGBTuner(task, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(task, pop_size=50) elif tuner == "random": tuner_obj = RandomTuner(task) elif tuner == "gridsearch": tuner_obj = GridSearchTuner(task) else: raise ValueError("Invalid tuner: " + tuner) # do tuning n_trial = len(task.config_space) tuner_obj.tune( n_trial=n_trial, early_stopping=early_stopping, measure_option=measure_option, callbacks=[ autotvm.callback.progress_bar(n_trial, prefix=prefix), autotvm.callback.log_to_file(log_filename), ], ) # Use graph tuner to achieve graph level optimal schedules # Set use_DP=False if it takes too long to finish. def tune_graph(graph, dshape, records, opt_sch_file, use_DP=True): target_op = [ relay.op.get("nn.conv2d"), ] Tuner = DPTuner if use_DP else PBQPTuner executor = Tuner(graph, {input_name: dshape}, records, target_op, target) executor.benchmark_layout_transform(min_exec_num=2000) executor.run() executor.write_opt_sch2record_file(opt_sch_file) ######################################################################## # Finally, we launch tuning jobs and evaluate the end-to-end performance. def evaluate_performance(lib, data_shape): # upload parameters to device dev = tvm.cpu() data_tvm = tvm.nd.array((np.random.uniform(size=data_shape)).astype(dtype)) module = runtime.GraphModule(lib["default"](dev)) module.set_input(input_name, data_tvm) # evaluate print("Evaluate inference time cost...") print(module.benchmark(dev, number=100, repeat=3)) def tune_and_evaluate(tuning_opt): # extract workloads from relay program print("Extract tasks...") mod, params, data_shape, out_shape = get_network(model_name, batch_size) tasks = autotvm.task.extract_from_program( mod["main"], target=target, params=params, ops=(relay.op.get("nn.conv2d"),) ) # run tuning tasks tune_kernels(tasks, **tuning_opt) tune_graph(mod["main"], data_shape, log_file, graph_opt_sch_file) # compile kernels in default mode print("Evaluation of the network compiled in 'default' mode without auto tune:") with tvm.transform.PassContext(opt_level=3): print("Compile...") lib = relay.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # compile kernels in kernel tuned only mode print("\nEvaluation of the network been tuned on kernel level:") with autotvm.apply_history_best(log_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # compile kernels with graph-level best records print("\nEvaluation of the network been tuned on graph level:") with autotvm.apply_graph_best(graph_opt_sch_file): print("Compile...") with tvm.transform.PassContext(opt_level=3): lib = relay.build_module.build(mod, target=target, params=params) evaluate_performance(lib, data_shape) # We do not run the tuning in our webpage server since it takes too long. # Uncomment the following line to run it by yourself. # tune_and_evaluate(tuning_option) ###################################################################### # Sample Output # ------------- # The tuning needs to compile many programs and extract feature from them. # So a high performance CPU is recommended. # One sample output is listed below. # # .. code-block:: bash # # Extract tasks... # Tuning... # [Task 1/12] Current/Best: 598.05/2497.63 GFLOPS | Progress: (252/252) | 1357.95 s Done. # [Task 2/12] Current/Best: 522.63/2279.24 GFLOPS | Progress: (784/784) | 3989.60 s Done. # [Task 3/12] Current/Best: 447.33/1927.69 GFLOPS | Progress: (784/784) | 3869.14 s Done. # [Task 4/12] Current/Best: 481.11/1912.34 GFLOPS | Progress: (672/672) | 3274.25 s Done. # [Task 5/12] Current/Best: 414.09/1598.45 GFLOPS | Progress: (672/672) | 2720.78 s Done. # [Task 6/12] Current/Best: 508.96/2273.20 GFLOPS | Progress: (768/768) | 3718.75 s Done. # [Task 7/12] Current/Best: 469.14/1955.79 GFLOPS | Progress: (576/576) | 2665.67 s Done. # [Task 8/12] Current/Best: 230.91/1658.97 GFLOPS | Progress: (576/576) | 2435.01 s Done. # [Task 9/12] Current/Best: 487.75/2295.19 GFLOPS | Progress: (648/648) | 3009.95 s Done. # [Task 10/12] Current/Best: 182.33/1734.45 GFLOPS | Progress: (360/360) | 1755.06 s Done. # [Task 11/12] Current/Best: 372.18/1745.15 GFLOPS | Progress: (360/360) | 1684.50 s Done. # [Task 12/12] Current/Best: 215.34/2271.11 GFLOPS | Progress: (400/400) | 2128.74 s Done. # INFO Start to benchmark layout transformation... # INFO Benchmarking layout transformation successful. # INFO Start to run dynamic programming algorithm... # INFO Start forward pass... # INFO Finished forward pass. # INFO Start backward pass... # INFO Finished backward pass... # INFO Finished DPExecutor run. # INFO Writing optimal schedules to resnet-18_graph_opt.log successfully. # # Evaluation of the network compiled in 'default' mode without auto tune: # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 4.5 ms (0.03 ms) # # Evaluation of the network been tuned on kernel level: # Compile... # Evaluate inference time cost... # Mean inference time (std dev): 3.2 ms (0.03 ms) # # Evaluation of the network been tuned on graph level: # Compile... # Config for target=llvm -keys=cpu, workload=('dense_nopack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression. # Config for target=llvm -keys=cpu, workload=('dense_pack.x86', ('TENSOR', (1, 512), 'float32'), ('TENSOR', (1000, 512), 'float32'), None, 'float32') is missing in ApplyGraphBest context. A fallback configuration is used, which may bring great performance regression. # Evaluate inference time cost... # Mean inference time (std dev): 3.16 ms (0.03 ms)
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_aot.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-AoT: microTVM Host-Driven AoT =========================== **Authors**: `Mehrdad Hessar <https://github.com/mehrdadh>`_, `Alan MacDonald <https://github.com/alanmacd>`_ This tutorial is showcasing microTVM host-driven AoT compilation with a TFLite model. AoTExecutor reduces the overhead of parsing graph at runtime compared to GraphExecutor. Also, we can have better memory management using ahead of time compilation. This tutorial can be executed on a x86 CPU using C runtime (CRT) or on Zephyr platform on a microcontroller/board supported by Zephyr. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import numpy as np import pathlib import json import os import tvm from tvm import relay from tvm.relay.backend import Executor, Runtime from tvm.contrib.download import download_testdata ###################################################################### # Import a TFLite model # --------------------- # # To begin with, download and import a Keyword Spotting TFLite model. # This model is originally from `MLPerf Tiny repository <https://github.com/mlcommons/tiny>`_. # To test this model, we use samples from `KWS dataset provided by Google <https://ai.googleblog.com/2017/08/launching-speech-commands-dataset.html>`_. # # **Note:** By default this tutorial runs on x86 CPU using CRT, if you would like to run on Zephyr platform # you need to export `TVM_MICRO_USE_HW` environment variable. # use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) MODEL_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/keyword_spotting_quant.tflite" MODEL_PATH = download_testdata(MODEL_URL, "keyword_spotting_quant.tflite", module="model") SAMPLE_URL = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy" SAMPLE_PATH = download_testdata(SAMPLE_URL, "keyword_spotting_int8_6.pyc.npy", module="data") tflite_model_buf = open(MODEL_PATH, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) input_shape = (1, 49, 10, 1) INPUT_NAME = "input_1" relay_mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={INPUT_NAME: input_shape}, dtype_dict={INPUT_NAME: "int8"} ) ###################################################################### # Defining the target # ------------------- # # Now we need to define the target, runtime and executor. In this tutorial, we focused on # using AOT host driven executor. We use the host micro target which is for running a model # on x86 CPU using CRT runtime or running a model with Zephyr platform on qemu_x86 simulator # board. In the case of a physical microcontroller, we get the target model for the physical # board (E.g. nucleo_l4r5zi) and pass it to `tvm.target.target.micro` to create a full # micro target. # # Use the C runtime (crt) and enable static linking by setting system-lib to True RUNTIME = Runtime("crt", {"system-lib": True}) # Simulate a microcontroller on the host machine. Uses the main() from `src/runtime/crt/host/main.cc`. # To use physical hardware, replace "host" with something matching your hardware. TARGET = tvm.target.target.micro("host") # Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style. EXECUTOR = Executor("aot") if use_physical_hw: boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json" with open(boards_file) as f: boards = json.load(f) BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") TARGET = tvm.target.target.micro(boards[BOARD]["model"]) ###################################################################### # Compile the model # ----------------- # # Now, we compile the model for the target: # with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): module = tvm.relay.build( relay_mod, target=TARGET, params=params, runtime=RUNTIME, executor=EXECUTOR ) ###################################################################### # Create a microTVM project # ------------------------- # # Now that we have the compiled model as an IRModule, we need to create a firmware project # to use the compiled model with microTVM. To do this, we use Project API. We have defined # CRT and Zephyr microTVM template projects which are used for x86 CPU and Zephyr boards # respectively. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {} # You can use options to provide platform-specific options through TVM. if use_physical_hw: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) project_options = {"project_type": "host_driven", "board": BOARD} temp_dir = tvm.contrib.utils.tempdir() generated_project_dir = temp_dir / "project" project = tvm.micro.generate_project( template_project_path, module, generated_project_dir, project_options ) ###################################################################### # Build, flash and execute the model # ---------------------------------- # Next, we build the microTVM project and flash it. Flash step is specific to # physical microcontrollers and it is skipped if it is simulating a microcontroller # via the host main.cc or if a Zephyr emulated board is selected as the target. # Next, we define the labels for the model output and execute the model with a # sample with expected value of 6 (label: left). # project.build() project.flash() labels = [ "_silence_", "_unknown_", "yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", ] with tvm.micro.Session(project.transport()) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) sample = np.load(SAMPLE_PATH) aot_executor.get_input(INPUT_NAME).copyfrom(sample) aot_executor.run() result = aot_executor.get_output(0).numpy() print(f"Label is `{labels[np.argmax(result)]}` with index `{np.argmax(result)}`")
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_autotune.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-autotune: Autotuning with microTVM ========================= **Authors**: `Andrew Reusch <https://github.com/areusch>`_, `Mehrdad Hessar <https://github.com/mehrdadh>`_ This tutorial explains how to autotune a model using the C runtime. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import json import numpy as np import pathlib import tvm from tvm.relay.backend import Runtime use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) #################### # Defining the model #################### # # To begin with, define a model in Relay to be executed on-device. Then create an IRModule from relay model and # fill parameters with random numbers. # data_shape = (1, 3, 10, 10) weight_shape = (6, 3, 5, 5) data = tvm.relay.var("data", tvm.relay.TensorType(data_shape, "float32")) weight = tvm.relay.var("weight", tvm.relay.TensorType(weight_shape, "float32")) y = tvm.relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), kernel_layout="OIHW", out_dtype="float32", ) f = tvm.relay.Function([data, weight], y) relay_mod = tvm.IRModule.from_expr(f) relay_mod = tvm.relay.transform.InferType()(relay_mod) weight_sample = np.random.rand( weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3] ).astype("float32") params = {"weight": weight_sample} ####################### # Defining the target ####################### # Now we define the TVM target that describes the execution environment. This looks very similar # to target definitions from other microTVM tutorials. Alongside this we pick the C Runtime to code # generate our model against. # # When running on physical hardware, choose a target and a board that # describe the hardware. There are multiple hardware targets that could be selected from # PLATFORM list in this tutorial. You can chose the platform by passing --platform argument when running # this tutorial. # RUNTIME = Runtime("crt", {"system-lib": True}) TARGET = tvm.target.target.micro("host") # Compiling for physical hardware # -------------------------------------------------------------------------- # When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The # STM32L4R5ZI Nucleo target and board is chosen in the example below. if use_physical_hw: boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json" with open(boards_file) as f: boards = json.load(f) BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_l4r5zi") TARGET = tvm.target.target.micro(boards[BOARD]["model"]) ######################### # Extracting tuning tasks ######################### # Not all operators in the Relay program printed above can be tuned. Some are so trivial that only # a single implementation is defined; others don't make sense as tuning tasks. Using # `extract_from_program`, you can produce a list of tunable tasks. # # Because task extraction involves running the compiler, we first configure the compiler's # transformation passes; we'll apply the same configuration later on during autotuning. # pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}) with pass_context: tasks = tvm.autotvm.task.extract_from_program(relay_mod["main"], {}, TARGET) assert len(tasks) > 0 ###################### # Configuring microTVM ###################### # Before autotuning, we need to define a module loader and then pass that to # a `tvm.autotvm.LocalBuilder`. Then we create a `tvm.autotvm.LocalRunner` and use # both builder and runner to generates multiple measurements for auto tunner. # # In this tutorial, we have the option to use x86 host as an example or use different targets # from Zephyr RTOS. If you choose pass `--platform=host` to this tutorial it will uses x86. You can # choose other options by choosing from `PLATFORM` list. # module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")), project_options={"verbose": False}, ) builder = tvm.autotvm.LocalBuilder( n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=True, build_func=tvm.micro.autotvm_build_func, runtime=RUNTIME, ) runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) # Compiling for physical hardware if use_physical_hw: module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")), project_options={ "board": BOARD, "west_cmd": "west", "verbose": False, "project_type": "host_driven", }, ) builder = tvm.autotvm.LocalBuilder( n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=False, build_func=tvm.micro.autotvm_build_func, runtime=RUNTIME, ) runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) ########################## # Run Autotuning ########################## # Now we can run autotuning separately on each extracted task on microTVM device. # autotune_log_file = pathlib.Path("microtvm_autotune.log.txt") if os.path.exists(autotune_log_file): os.remove(autotune_log_file) num_trials = 10 for task in tasks: tuner = tvm.autotvm.tuner.GATuner(task) tuner.tune( n_trial=num_trials, measure_option=measure_option, callbacks=[ tvm.autotvm.callback.log_to_file(str(autotune_log_file)), tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"), ], si_prefix="M", ) ############################ # Timing the untuned program ############################ # For comparison, let's compile and run the graph without imposing any autotuning schedules. TVM # will select a randomly-tuned implementation for each operator, which should not perform as well as # the tuned operator. # with pass_context: lowered = tvm.relay.build(relay_mod, target=TARGET, runtime=RUNTIME, params=params) temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("crt")), lowered, temp_dir / "project", {"verbose": False}, ) # Compiling for physical hardware if use_physical_hw: temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("zephyr")), lowered, temp_dir / "project", { "board": BOARD, "west_cmd": "west", "verbose": False, "project_type": "host_driven", }, ) project.build() project.flash() with tvm.micro.Session(project.transport()) as session: debug_module = tvm.micro.create_local_debug_executor( lowered.get_graph_json(), session.get_system_lib(), session.device ) debug_module.set_input(**lowered.get_params()) print("########## Build without Autotuning ##########") debug_module.run() del debug_module ########################## # Timing the tuned program ########################## # Once autotuning completes, you can time execution of the entire program using the Debug Runtime: with tvm.autotvm.apply_history_best(str(autotune_log_file)): with pass_context: lowered_tuned = tvm.relay.build(relay_mod, target=TARGET, runtime=RUNTIME, params=params) temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("crt")), lowered_tuned, temp_dir / "project", {"verbose": False}, ) # Compiling for physical hardware if use_physical_hw: temp_dir = tvm.contrib.utils.tempdir() project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects("zephyr")), lowered_tuned, temp_dir / "project", { "board": BOARD, "west_cmd": "west", "verbose": False, "project_type": "host_driven", }, ) project.build() project.flash() with tvm.micro.Session(project.transport()) as session: debug_module = tvm.micro.create_local_debug_executor( lowered_tuned.get_graph_json(), session.get_system_lib(), session.device ) debug_module.set_input(**lowered_tuned.get_params()) print("########## Build with Autotuning ##########") debug_module.run() del debug_module
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_ethosu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Running TVM on bare metal Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN ====================================================================================== **Author**: `Grant Watson <https://github.com/grant-arm>`_ This section contains an example of how to use TVM to run a model on an Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN, using bare metal. The Cortex(R)-M55 is a small, low-power CPU designed for use in embedded devices. CMSIS-NN is a collection of kernels optimized for Arm(R) Cortex(R)-M CPUs. The Ethos(TM)-U55 is a microNPU, specifically designed to accelerate ML inference in resource-constrained embedded devices. In order to run the demo application without having access to a Cortex(R)-M55 and Ethos(TM)-U55 development board, we will be running our sample application on a Fixed Virtual Platform (FVP). The FVP based on Arm(R) Corstone(TM)-300 software, models a hardware system containing a Cortex(R)-M55 and Ethos(TM)-U55. It provides a programmer's view that is suitable for software development. In this tutorial, we will be compiling a MobileNet v1 model and instructing TVM to offload operators to the Ethos(TM)-U55 where possible. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # Obtaining TVM # ------------- # # To obtain TVM for you platform, please visit https://tlcpack.ai/ and follow the # instructions. Once TVM has been installed correctly, you should have access to # ``tvmc`` from the command line. # # Typing ``tvmc`` on the command line should display the following: # # .. code-block:: text # # usage: tvmc [-h] [-v] [--version] {tune,compile,run} ... # # TVM compiler driver # # optional arguments: # -h, --help show this help message and exit # -v, --verbose increase verbosity # --version print the version and exit # # commands: # {tune,compile,run} # tune auto-tune a model # compile compile a model. # run run a compiled module # # TVMC - TVM driver command-line interface # ################################################################################ # Installing additional python dependencies # ----------------------------------------- # # In order to run the demo, you will need some additional python packages. # These can be installed by using the requirements.txt file below: # # .. code-block:: text # :caption: requirements.txt # :name: requirements.txt # # attrs==21.2.0 # cloudpickle==2.0.0 # decorator==5.1.0 # ethos-u-vela==3.2.0 # flatbuffers==1.12 # lxml==4.6.3 # nose==1.3.7 # numpy==1.19.5 # Pillow==8.3.2 # psutil==5.8.0 # scipy==1.5.4 # synr==0.4 # tflite==2.4.0 # tornado==6.1 # # These packages can be installed by running the following from the command line: # # .. code-block:: bash # # pip install -r requirements.txt # ################################################################################ # Obtaining the Model # ------------------- # # For this tutorial, we will be working with MobileNet v1. # MobileNet v1 is a convolutional neural network designed to classify images, # that has been optimized for edge devices. The model we will be using has been # pre-trained to classify images into one of 1001 different categories. # The network has an input image size of 224x224 so any input images will need # to be resized to those dimensions before being used. # # For this tutorial we will be using the model in Tflite format. # # .. code-block:: bash # # mkdir -p ./build # cd build # wget https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz # gunzip mobilenet_v1_1.0_224_quant.tgz # tar xvf mobilenet_v1_1.0_224_quant.tar # ###################################################################################### # Compiling the model for Arm(R) Cortex(R)-M55 CPU and Ethos(TM)-U55 NPU with CMSIS-NN # ------------------------------------------------------------------------------------ # # Once we've downloaded the MobileNet v1 model, the next step is to compile it. # To accomplish that, we are going to use ``tvmc compile``. The output we get from # the compilation process is a TAR package of the model compiled to the Model # Library Format (MLF) for our target platform. We will be able to run that model # on our target device using the TVM runtime. # # .. code-block:: bash # # tvmc compile --target=ethos-u,cmsis-nn,c \ # --target-ethos-u-accelerator_config=ethos-u55-256 \ # --target-cmsis-nn-mcpu=cortex-m55 \ # --target-c-mcpu=cortex-m55 \ # --runtime=crt \ # --executor=aot \ # --executor-aot-interface-api=c \ # --executor-aot-unpacked-api=1 \ # --pass-config tir.usmp.enable=1 \ # --pass-config tir.usmp.algorithm=hill_climb \ # --pass-config tir.disable_storage_rewrite=1 \ # --pass-config tir.disable_vectorize=1 \ # ./mobilenet_v1_1.0_224_quant.tflite \ # --output-format=mlf # ################################################################################ # .. note:: Explanation of tvmc compile arguments: # # * ``--target=ethos-u,cmsis-nn,c`` : offload operators to the microNPU where possible, falling back to CMSIS-NN and finally generated C code where an operator is not supported on the microNPU.. # # * ``--target-ethos-u-accelerator_config=ethos-u55-256`` : specifies the microNPU configuration # # * ``--target-c-mcpu=cortex-m55`` : Cross-compile for the Cortex(R)-M55. # # * ``--runtime=crt`` : Generate glue code to allow operators to work with C runtime. # # * ``--executor=aot`` : Use Ahead Of Time compiltaion instead of the Graph Executor. # # * ``--executor-aot-interface-api=c`` : Generate a C-style interface with structures designed for integrating into C apps at the boundary. # # * ``--executor-aot-unpacked-api=1`` : Use the unpacked API internally. # # * ``--pass-config tir.usmp.enable=1`` : Enable Unified Static Memory Planning # # * ``--pass-config tir.usmp.algorithm=hill_climb`` : Use the hill-climb algorithm for USMP # # * ``--pass-config tir.disable_storage_rewrite=1`` : Disable storage rewrite # # * ``--pass-config tir.disable_vectorize=1`` : Disable vectorize since there are no standard vectorized types in C. # # * ``./mobilenet_v1_1.0_224_quant.tflite`` : The TFLite model that is being compiled. # # * ``--output-format=mlf`` : Output should be generated in the Model Library Format. # ################################################################################ # .. note:: If you don't want to make use of the microNPU and want to offload # operators to CMSIS-NN only: # # * Use ``--target=cmsis-nn,c`` in place of ``--target=ethos-u,cmsis-nn,c`` # # * Remove the microNPU config parameter ``--target-ethos-u-accelerator_config=ethos-u55-256`` # ################################################################################ # Extracting the generated code into the current directory # -------------------------------------------------------- # # .. code-block:: bash # # tar xvf module.tar # ################################################################################ # Getting ImageNet labels # ----------------------- # # When running MobileNet v1 on an image, the result is an index in the range 0 to # 1000. In order to make our application a little more user friendly, instead of # just displaying the category index, we will display the associated label. We # will download these image labels into a text file now and use a python script # to include them in our C application later. # # .. code-block:: bash # # curl -sS https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt \ # -o ./labels_mobilenet_quant_v1_224.txt # ################################################################################ # Getting the input image # ----------------------- # # As input for this tutorial, we will use the image of a cat, but you can # substitute an image of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center # # We download the image into the build directory and we will use a python script # in the next step to convert the image into an array of bytes in a C header file. # # .. code-block:: bash # # curl -sS https://s3.amazonaws.com/model-server/inputs/kitten.jpg -o ./kitten.jpg # ################################################################################ # Pre-processing the image # ------------------------ # # The following script will create 2 C header files in the src directory: # # * ``inputs.h`` - The image supplied as an argument to the script will be converted # to an array of integers for input to our MobileNet v1 model. # * ``outputs.h`` - An integer array of zeroes will reserve 1001 integer values # for the output of inference. # # .. code-block:: python # :caption: convert_image.py # :name: convert_image.py # # #!python ./convert_image.py # import os # import pathlib # import re # import sys # from PIL import Image # import numpy as np # # # def create_header_file(name, section, tensor_name, tensor_data, output_path): # """ # This function generates a header file containing the data from the numpy array provided. # """ # file_path = pathlib.Path(f"{output_path}/" + name).resolve() # # Create header file with npy_data as a C array # raw_path = file_path.with_suffix(".h").resolve() # with open(raw_path, "w") as header_file: # header_file.write( # "#include <tvmgen_default.h>\n" # + f"const size_t {tensor_name}_len = {tensor_data.size};\n" # + f'uint8_t {tensor_name}[] __attribute__((section("{section}"), aligned(16))) = "' # ) # data_hexstr = tensor_data.tobytes().hex() # for i in range(0, len(data_hexstr), 2): # header_file.write(f"\\x{data_hexstr[i:i+2]}") # header_file.write('";\n\n') # # # def create_headers(image_name): # """ # This function generates C header files for the input and output arrays required to run inferences # """ # img_path = os.path.join("./", f"{image_name}") # # # Resize image to 224x224 # resized_image = Image.open(img_path).resize((224, 224)) # img_data = np.asarray(resized_image).astype("float32") # # # Convert input to NCHW # img_data = np.transpose(img_data, (2, 0, 1)) # # # Create input header file # input_data = img_data.astype(np.uint8) # create_header_file("inputs", "ethosu_scratch", "input", input_data, "./include") # # Create output header file # output_data = np.zeros([1001], np.uint8) # create_header_file( # "outputs", # "output_data_sec", # "output", # output_data, # "./include", # ) # # # if __name__ == "__main__": # create_headers(sys.argv[1]) # # Run the script from the command line: # # .. code-block:: bash # # python convert_image.py ./kitten.jpg ################################################################################ # Pre-processing the labels # ------------------------- # # The following script will create a ``labels.h`` header file in the src directory. # The labels.txt file that we downloaded previously will be turned # into an array of strings. This array will be used to display the label that # our image has been classified as. # # .. code-block:: python # :caption: convert_labels.py # :name: convert_labels.py # # #!python ./convert_labels.py # import os # import pathlib # import sys # # # def create_labels_header(labels_file, section, output_path): # """ # This function generates a header file containing the ImageNet labels as an array of strings # """ # labels_path = pathlib.Path(labels_file).resolve() # file_path = pathlib.Path(f"{output_path}/labels.h").resolve() # # with open(labels_path) as f: # labels = f.readlines() # # with open(file_path, "w") as header_file: # header_file.write(f'char* labels[] __attribute__((section("{section}"), aligned(16))) = {{') # # for _, label in enumerate(labels): # header_file.write(f'"{label.rstrip()}",') # # header_file.write("};\n") # # # if __name__ == "__main__": # create_labels_header(sys.argv[1], "ethosu_scratch", "./include") # # Run the script from the command line: # # .. code-block:: bash # # python convert_labels.py ################################################################################ # Writing the demo application # ---------------------------- # # The following C application will run a single inference of the MobileNet v1 # model on the image that we downloaded and converted to an array of integers # previously. Since the model was compiled with a target of "ethos-u ...", # operators supported by the Ethos(TM)-U55 NPU will be offloaded for acceleration. # Once the application is built and run, our test image should be correctly # classied as a "tabby" and the result should be displayed on the console. # This file should be placed in ``./src`` # # .. code-block:: c # :caption: demo.c # :name: demo.c # # #include <stdio.h> # #include <tvm_runtime.h> # # #include "ethosu_mod.h" # #include "uart.h" # # // Header files generated by convert_image.py and convert_labels.py # #include "inputs.h" # #include "labels.h" # #include "outputs.h" # # int abs(int v) { return v * ((v > 0) - (v < 0)); } # # int main(int argc, char** argv) { # uart_init(); # printf("Starting Demo\n"); # EthosuInit(); # # printf("Allocating memory\n"); # StackMemoryManager_Init(&app_workspace, g_aot_memory, WORKSPACE_SIZE); # # printf("Running inference\n"); # struct tvmgen_default_outputs outputs = { # .output = output, # }; # struct tvmgen_default_inputs inputs = { # .input = input, # }; # struct ethosu_driver* driver = ethosu_reserve_driver(); # struct tvmgen_default_devices devices = { # .ethos_u = driver, # }; # tvmgen_default_run(&inputs, &outputs, &devices); # ethosu_release_driver(driver); # # // Calculate index of max value # uint8_t max_value = 0; # int32_t max_index = -1; # for (unsigned int i = 0; i < output_len; ++i) { # if (output[i] > max_value) { # max_value = output[i]; # max_index = i; # } # } # printf("The image has been classified as '%s'\n", labels[max_index]); # # // The FVP will shut down when it receives "EXITTHESIM" on the UART # printf("EXITTHESIM\n"); # while (1 == 1) # ; # return 0; # } # # # In addition, you will need these header files from github in your ``./include`` directory: # # `include files <https://github.com/apache/tvm/tree/main/apps/microtvm/ethosu/include>`_ ################################################################################ # .. note:: # # If you'd like to use FreeRTOS for task scheduling and queues, a sample application can be found here # `demo_freertos.c <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/src/demo_freertos.c>` ################################################################################ # Creating the linker script # -------------------------- # # We need to create a linker script that will be used when we build our application # in the following section. The linker script tells the linker where everything # should be placed in memory. The corstone300.ld linker script below should be # placed in your working directory. # # An example linker script for the FVP can be found here # `corstone300.ld <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/corstone300.ld>`_ ################################################################################ # .. note:: # # The code generated by TVM will place the model weights and the Arm(R) # Ethos(TM)-U55 command stream in a section named ``ethosu_scratch``. # For a model the size of MobileNet v1, the weights and command stream will not # fit into the limited SRAM available. For this reason it's important that the # linker script places the ``ethosu_scratch`` section into DRAM (DDR). ################################################################################ # .. note:: # # Before building and running the application, you will need to update your # PATH environment variable to include the path to cmake 3.19.5 and the FVP. # For example if you've installed these in ``/opt/arm`` , then you would do # the following: # # ``export PATH=/opt/arm/FVP_Corstone_SSE-300_Ethos-U55/models/Linux64_GCC-6.4:/opt/arm/cmake/bin:$PATH`` # ################################################################################ # Building the demo application using make # ---------------------------------------- # # We can now build the demo application using make. The Makefile should be placed # in your working directory before running ``make`` on the command line: # # An example Makefile can be found here: # `Makefile <https://github.com/apache/tvm/blob/main/apps/microtvm/ethosu/Makefile>`_ ################################################################################ # .. note:: # # If you're using FreeRTOS, the Makefile builds it from the specified FREERTOS_PATH: # ``make FREERTOS_PATH=<FreeRTOS directory>`` # ################################################################################ # Running the demo application # ---------------------------- # # Finally, we can run our demo appliction on the Fixed Virtual Platform (FVP), # by using the following command: # # .. code-block:: bash # # FVP_Corstone_SSE-300_Ethos-U55 -C cpu0.CFGDTCMSZ=15 \ # -C cpu0.CFGITCMSZ=15 -C mps3_board.uart0.out_file=\"-\" -C mps3_board.uart0.shutdown_tag=\"EXITTHESIM\" \ # -C mps3_board.visualisation.disable-visualisation=1 -C mps3_board.telnetterminal0.start_telnet=0 \ # -C mps3_board.telnetterminal1.start_telnet=0 -C mps3_board.telnetterminal2.start_telnet=0 -C mps3_board.telnetterminal5.start_telnet=0 \ # -C ethosu.extra_args="--fast" \ # -C ethosu.num_macs=256 ./build/demo # # You should see the following output displayed in your console window: # # .. code-block:: text # # telnetterminal0: Listening for serial connection on port 5000 # telnetterminal1: Listening for serial connection on port 5001 # telnetterminal2: Listening for serial connection on port 5002 # telnetterminal5: Listening for serial connection on port 5003 # # Ethos-U rev dedfa618 --- Jan 12 2021 23:03:55 # (C) COPYRIGHT 2019-2021 Arm Limited # ALL RIGHTS RESERVED # # Starting Demo # ethosu_init. base_address=0x48102000, fast_memory=0x0, fast_memory_size=0, secure=1, privileged=1 # ethosu_register_driver: New NPU driver at address 0x20000de8 is registered. # CMD=0x00000000 # Soft reset NPU # Allocating memory # Running inference # ethosu_find_and_reserve_driver - Driver 0x20000de8 reserved. # ethosu_invoke # CMD=0x00000004 # QCONFIG=0x00000002 # REGIONCFG0=0x00000003 # REGIONCFG1=0x00000003 # REGIONCFG2=0x00000013 # REGIONCFG3=0x00000053 # REGIONCFG4=0x00000153 # REGIONCFG5=0x00000553 # REGIONCFG6=0x00001553 # REGIONCFG7=0x00005553 # AXI_LIMIT0=0x0f1f0000 # AXI_LIMIT1=0x0f1f0000 # AXI_LIMIT2=0x0f1f0000 # AXI_LIMIT3=0x0f1f0000 # ethosu_invoke OPTIMIZER_CONFIG # handle_optimizer_config: # Optimizer release nbr: 0 patch: 1 # Optimizer config cmd_stream_version: 0 macs_per_cc: 8 shram_size: 48 custom_dma: 0 # Optimizer config Ethos-U version: 1.0.6 # Ethos-U config cmd_stream_version: 0 macs_per_cc: 8 shram_size: 48 custom_dma: 0 # Ethos-U version: 1.0.6 # ethosu_invoke NOP # ethosu_invoke NOP # ethosu_invoke NOP # ethosu_invoke COMMAND_STREAM # handle_command_stream: cmd_stream=0x61025be0, cms_length 1181 # QBASE=0x0000000061025be0, QSIZE=4724, base_pointer_offset=0x00000000 # BASEP0=0x0000000061026e60 # BASEP1=0x0000000060002f10 # BASEP2=0x0000000060002f10 # BASEP3=0x0000000061000fb0 # BASEP4=0x0000000060000fb0 # CMD=0x000Interrupt. status=0xffff0022, qread=4724 # CMD=0x00000006 # 00006 # CMD=0x0000000c # ethosu_release_driver - Driver 0x20000de8 released # The image has been classified as 'tabby' # EXITTHESIM # Info: /OSCI/SystemC: Simulation stopped by user. # # You should see near the end of the output that the image has been correctly # classified as 'tabby'.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_pytorch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-Pytorch: microTVM PyTorch Tutorial =========================== **Authors**: `Mehrdad Hessar <https://github.com/mehrdadh>`_ This tutorial is showcasing microTVM host-driven AoT compilation with a PyTorch model. This tutorial can be executed on a x86 CPU using C runtime (CRT). **Note:** This tutorial only runs on x86 CPU using CRT and does not run on Zephyr since the model would not fit on our current supported Zephyr boards. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import pathlib import torch import torchvision from torchvision import transforms import numpy as np from PIL import Image import tvm from tvm import relay from tvm.contrib.download import download_testdata from tvm.relay.backend import Executor ################################## # Load a pre-trained PyTorch model # -------------------------------- # # To begin with, load pre-trained MobileNetV2 from torchvision. Then, # download a cat image and preprocess it to use as the model input. # model = torchvision.models.quantization.mobilenet_v2(weights="DEFAULT", quantize=True) model = model.eval() input_shape = [1, 3, 224, 224] input_data = torch.randn(input_shape) scripted_model = torch.jit.trace(model, input_data).eval() img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true" img_path = download_testdata(img_url, "cat.png", module="data") img = Image.open(img_path).resize((224, 224)) # Preprocess the image and convert to tensor my_preprocess = transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) img = my_preprocess(img) img = np.expand_dims(img, 0) input_name = "input0" shape_list = [(input_name, input_shape)] relay_mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) ##################################### # Define Target, Runtime and Executor # ----------------------------------- # # In this tutorial we use AOT host-driven executor. To compile the model # for an emulated embedded environment on an x86 machine we use C runtime (CRT) # and we use `host` micro target. Using this setup, TVM compiles the model # for C runtime which can run on a x86 CPU machine with the same flow that # would run on a physical microcontroller. # # Simulate a microcontroller on the host machine. Uses the main() from `src/runtime/crt/host/main.cc` # To use physical hardware, replace "host" with another physical micro target, e.g. `nrf52840` # or `mps2_an521`. See more more target examples in micro_train.py and micro_tflite.py tutorials. target = tvm.target.target.micro("host") # Use the C runtime (crt) and enable static linking by setting system-lib to True runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True}) # Use the AOT executor rather than graph or vm executors. Don't use unpacked API or C calling style. executor = Executor("aot") #################### # Compile the model # ------------------ # # Now, we compile the model for the target: # with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True}, ): module = tvm.relay.build( relay_mod, target=target, runtime=runtime, executor=executor, params=params ) ########################### # Create a microTVM project # ------------------------- # # Now that we have the compiled model as an IRModule, we need to create a firmware project # to use the compiled model with microTVM. To do this, we use Project API. # template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {"verbose": False, "memory_size_bytes": 6 * 1024 * 1024} temp_dir = tvm.contrib.utils.tempdir() / "project" project = tvm.micro.generate_project( str(template_project_path), module, temp_dir, project_options, ) #################################### # Build, flash and execute the model # ---------------------------------- # Next, we build the microTVM project and flash it. Flash step is specific to # physical microcontroller and it is skipped if it is simulating a microcontroller # via the host `main.cc`` or if a Zephyr emulated board is selected as the target. # project.build() project.flash() input_data = {input_name: tvm.nd.array(img.astype("float32"))} with tvm.micro.Session(project.transport()) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) aot_executor.set_input(**input_data) aot_executor.run() result = aot_executor.get_output(0).numpy() ##################### # Look up synset name # ------------------- # Look up prediction top 1 index in 1000 class synset. # synset_url = ( "https://raw.githubusercontent.com/Cadene/" "pretrained-models.pytorch/master/data/" "imagenet_synsets.txt" ) synset_name = "imagenet_synsets.txt" synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synsets = f.readlines() synsets = [x.strip() for x in synsets] splits = [line.split(" ") for line in synsets] key_to_classname = {spl[0]: " ".join(spl[1:]) for spl in splits} class_url = ( "https://raw.githubusercontent.com/Cadene/" "pretrained-models.pytorch/master/data/" "imagenet_classes.txt" ) class_path = download_testdata(class_url, "imagenet_classes.txt", module="data") with open(class_path) as f: class_id_to_key = f.readlines() class_id_to_key = [x.strip() for x in class_id_to_key] # Get top-1 result for TVM top1_tvm = np.argmax(result) tvm_class_key = class_id_to_key[top1_tvm] # Convert input to PyTorch variable and get PyTorch result for comparison with torch.no_grad(): torch_img = torch.from_numpy(img) output = model(torch_img) # Get top-1 result for PyTorch top1_torch = np.argmax(output.numpy()) torch_class_key = class_id_to_key[top1_torch] print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key]))
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_reference_vm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-micro-reference-vm: =================================== microTVM Reference Virtual Machines =================================== **Author**: `Andrew Reusch <[email protected]>`_ This tutorial explains how to launch microTVM Reference Virtual Machines. You can use these to develop on real physical hardware without needing to individually install the microTVM dependencies. These are also particularly useful when trying to reproduce behavior with microTVM, such as when filing bug reports. microTVM is the effort to allow TVM to build and execute models on bare-metal microcontrollers. microTVM aims to be compatible with a wide variety of SoCs and runtime environments (i.e. bare metal, RTOS, etc). However, some stable software environment is needed to allow developers to share and reproduce bugs and results. The microTVM Reference Virtual Machines are intended to provide that environment. How it works ============ No Virtual Machines are stored in the TVM repository--instead, the files stored in ``apps/microtvm/reference-vm`` describe how to build VMs to the Vagrant_ VM builder tool. The Reference VMs are split into two parts: 1. A Vagrant Base Box, which contains all of the stable dependencies for that platform. Build scripts are stored in ``apps/microtvm/reference-vm/<platform>/base-box``. TVM committers run these when a platform's "stable" dependencies change, and the generated base boxes are stored in `Vagrant Cloud`_. 2. A per-workspace VM, which users normally build using the Base Box as a starting point. Build scripts are stored in ``apps/microtvm/reference-vm/<platform>`` (everything except ``base-box``). .. _Vagrant: https://vagrantup.com .. _Vagrant Cloud: https://app.vagrantup.com/tlcpack Setting up the VM ================= Installing prerequisites ------------------------ A minimal set of prerequisites are needed: 1. `Vagrant <https://vagrantup.com>`__ 2. A supported Virtual Machine hypervisor (**VirtualBox**, **Parallels**, or **VMWare Fusion/Workstation**). `VirtualBox <https://www.virtualbox.org>`__ is a suggested free hypervisor, but please note that the `VirtualBox Extension Pack`_ is required for proper USB forwarding. If using VirtualBox, also consider installing the `vbguest <https://github.com/dotless-de/vagrant-vbguest>`_ plugin. .. _VirtualBox Extension Pack: https://www.virtualbox.org/wiki/Downloads#VirtualBox6.1.16OracleVMVirtualBoxExtensionPack 3. If required for your hypervisor, the `Vagrant provider plugin <https://github.com/hashicorp/vagrant/wiki/Available-Vagrant-Plugins#providers>`__ (or see `here <https://www.vagrantup.com/vmware>`__ for VMWare). First boot ---------- The first time you use a reference VM, you need to create the box locally and then provision it. .. code-block:: bash # Replace zephyr with the name of a different platform, if you are not using Zephyr. ~/.../tvm $ cd apps/microtvm/reference-vm/zephyr # Replace <provider_name> with the name of the hypervisor you wish to use (i.e. virtualbox, parallels, vmware_desktop). ~/.../tvm/apps/microtvm/reference-vm/zephyr $ vagrant up --provider=<provider_name> This command will take a couple of minutes to run and will require 4 to 5GB of storage on your machine. It does the following: 1. Downloads the `microTVM base box`_ and clones it to form a new VM specific to this TVM directory. 2. Mounts your TVM directory (and, if using ``git-subtree``, the original ``.git`` repo) into the VM. 3. Builds TVM and installs a Python virtualenv with the dependencies corresponding with your TVM build. .. _microTVM base box: https://app.vagrantup.com/tlcpack/boxes/microtvm Connect Hardware to the VM -------------------------- Next, you need to configure USB passthrough to attach your physical development board to the virtual machine (rather than directly to your laptop's host OS). It's suggested you setup a device filter, rather than doing a one-time forward, because often the device may reboot during the programming process and you may, at that time, need to enable forwarding again. It may not be obvious to the end user when this occurs. Instructions to do that: * `VirtualBox <https://www.virtualbox.org/manual/ch03.html#usb-support>`__ * `Parallels <https://kb.parallels.com/122993>`__ * `VMWare Workstation <https://docs.vmware.com/en/VMware-Workstation-Pro/15.0/com.vmware.ws.using.doc/GUID-E003456F-EB94-4B53-9082-293D9617CB5A.html>`__ Rebuilding TVM inside the Reference VM -------------------------------------- After the first boot, you'll need to ensure you keep the build, in ``$TVM_HOME/build-microtvm-zephyr``, up-to-date when you modify the C++ runtime or checkout a different revision. You can either re-provision the machine (``vagrant provision`` in the same directory you ran ``vagrant up`` before) or manually rebuild TVM yourself. Remember: the TVM ``.so`` built inside the VM is different from the one you may use on your host machine. This is why it's built inside the special directory ``build-microtvm-zephyr``. Logging in to the VM -------------------- The VM should be available to your host only with the hostname ``microtvm``. You can SSH to the VM as follows: .. code-block:: bash $ vagrant ssh Then ``cd`` to the same path used on your host machine for TVM. For example, on Mac: .. code-block:: bash $ cd /Users/yourusername/path/to/tvm Running tests ============= Once the VM has been provisioned, tests can be executed using ``poetry``: .. code-block:: bash $ cd apps/microtvm/reference-vm/zephyr $ poetry run python3 ../../../../tests/micro/zephyr/test_zephyr.py --board=stm32f746g_disco If you do not have physical hardware attached, but wish to run the tests using the local QEMU emulator running within the VM, run the following commands instead: .. code-block:: bash $ cd /Users/yourusername/path/to/tvm $ cd apps/microtvm/reference-vm/zephyr/ $ poetry run pytest ../../../../tests/micro/zephyr/test_zephyr.py --board=qemu_x86 """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_tflite.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _microTVM-with-TFLite: microTVM with TFLite Models =========================== **Author**: `Tom Gall <https://github.com/tom-gall>`_ This tutorial is an introduction to working with microTVM and a TFLite model with Relay. """ ###################################################################### # .. note:: # If you want to run this tutorial on the microTVM Reference VM, download the Jupyter # notebook using the link at the bottom of this page and save it into the TVM directory. Then: # # #. Login to the reference VM with a modified ``vagrant ssh`` command: # # ``$ vagrant ssh -- -L8888:localhost:8888`` # # #. Install jupyter: ``pip install jupyterlab`` # #. ``cd`` to the TVM directory. # #. Install tflite: poetry install -E importer-tflite # #. Launch Jupyter Notebook: ``jupyter notebook`` # #. Copy the localhost URL displayed, and paste it into your browser. # #. Navigate to saved Jupyter Notebook (``.ipynb`` file). # # # Setup # ----- # # Install TFLite # ^^^^^^^^^^^^^^ # # To get started, TFLite package needs to be installed as prerequisite. You can do this in two ways: # # 1. Install tflite with ``pip`` # # .. code-block:: bash # # pip install tflite=2.1.0 --user # # 2. Generate the TFLite package yourself. The steps are the following: # # Get the flatc compiler. # Please refer to https://github.com/google/flatbuffers for details # and make sure it is properly installed. # # .. code-block:: bash # # flatc --version # # Get the TFLite schema. # # .. code-block:: bash # # wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs # # Generate TFLite package. # # .. code-block:: bash # # flatc --python schema.fbs # # Add the current folder (which contains generated tflite module) to PYTHONPATH. # # .. code-block:: bash # # export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd) # # To validate that the TFLite package was installed successfully, ``python -c "import tflite"`` # # Install Zephyr (physical hardware only) # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # When running this tutorial with a host simulation (the default), you can use the host ``gcc`` to # build a firmware image that simulates the device. When compiling to run on physical hardware, you # need to install a *toolchain* plus some target-specific dependencies. microTVM allows you to # supply any compiler and runtime that can launch the TVM RPC server, but to get started, this # tutorial relies on the Zephyr RTOS to provide these pieces. # # You can install Zephyr by following the # `Installation Instructions <https://docs.zephyrproject.org/latest/getting_started/index.html>`_. # # Aside: Recreating your own Pre-Trained TFLite model # The tutorial downloads a pretrained TFLite model. When working with microcontrollers # you need to be mindful these are highly resource constrained devices as such standard # models like MobileNet may not fit into their modest memory. # # For this tutorial, we'll make use of one of the TF Micro example models. # # If you wish to replicate the training steps see: # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train # # .. note:: # # If you accidentally download the example pretrained model from: # # ``wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip`` # # this will fail due to an unimplemented opcode (114) # # Load and prepare the Pre-Trained Model # -------------------------------------- # # Load the pretrained TFLite model from a file in your current # directory into a buffer # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import os import json import tarfile import pathlib import tempfile import numpy as np import tvm from tvm import relay import tvm.contrib.utils from tvm.contrib.download import download_testdata use_physical_hw = bool(os.getenv("TVM_MICRO_USE_HW")) model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite" model_file = "sine_model.tflite" model_path = download_testdata(model_url, model_file, module="data") tflite_model_buf = open(model_path, "rb").read() ###################################################################### # Using the buffer, transform into a tflite model python object try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ###################################################################### # Print out the version of the model version = tflite_model.Version() print("Model Version: " + str(version)) ###################################################################### # Parse the python model object to convert it into a relay module # and weights. # It is important to note that the input tensor name must match what # is contained in the model. # # If you are unsure what that might be, this can be discovered by using # the ``visualize.py`` script within the Tensorflow project. # See `How do I inspect a .tflite file? <https://www.tensorflow.org/lite/guide/faq>`_ input_tensor = "dense_4_input" input_shape = (1,) input_dtype = "float32" mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype} ) ###################################################################### # Defining the target # ------------------- # # Now we create a build config for relay, turning off two options and then calling relay.build which # will result in a C source file for the selected TARGET. When running on a simulated target of the # same architecture as the host (where this Python script is executed) choose "host" below for the # TARGET, the C Runtime as the RUNTIME and a proper board/VM to run it (Zephyr will create the right # QEMU VM based on BOARD. In the example below the x86 arch is selected and a x86 VM is picked up accordingly: # RUNTIME = tvm.relay.backend.Runtime("crt", {"system-lib": True}) TARGET = tvm.target.target.micro("host") # # Compiling for physical hardware # When running on physical hardware, choose a TARGET and a BOARD that describe the hardware. The # STM32F746 Nucleo target and board is chosen in the example below. Another option would be to # choose the STM32F746 Discovery board instead. Since that board has the same MCU as the Nucleo # board but a couple of wirings and configs differ, it's necessary to select the "stm32f746g_disco" # board to generated the right firmware image. # if use_physical_hw: boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json" with open(boards_file) as f: boards = json.load(f) BOARD = os.getenv("TVM_MICRO_BOARD", default="nucleo_f746zg") TARGET = tvm.target.target.micro(boards[BOARD]["model"]) # # For some boards, Zephyr runs them emulated by default, using QEMU. For example, below is the # TARGET and BOARD used to build a microTVM firmware for the mps2-an521 board. Since that board # runs emulated by default on Zephyr the suffix "-qemu" is added to the board name to inform # microTVM that the QEMU transporter must be used to communicate with the board. If the board name # already has the prefix "qemu_", like "qemu_x86", then it's not necessary to add that suffix. # # TARGET = tvm.target.target.micro("mps2_an521") # BOARD = "mps2_an521-qemu" ###################################################################### # Now, compile the model for the target: with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["AlterOpLayout"] ): module = relay.build(mod, target=TARGET, runtime=RUNTIME, params=params) # Inspecting the compilation output # --------------------------------- # # The compilation process has produced some C code implementing the operators in this graph. We # can inspect it by printing the CSourceModule contents (for the purposes of this tutorial, let's # just print the first 10 lines): c_source_module = module.get_lib().imported_modules[0] assert c_source_module.type_key == "c", "tutorial is broken" c_source_code = c_source_module.get_source() first_few_lines = c_source_code.split("\n")[:10] assert any( l.startswith("TVM_DLL int32_t tvmgen_default_") for l in first_few_lines ), f"tutorial is broken: {first_few_lines!r}" print("\n".join(first_few_lines)) # Compiling the generated code # ---------------------------- # # Now we need to incorporate the generated C code into a project that allows us to run inference on the # device. The simplest way to do this is to integrate it yourself, using microTVM's standard output format # (:doc:`Model Library Format` </dev/model_library_format>`). This is a tarball with a standard layout: # Get a temporary path where we can store the tarball (since this is running as a tutorial). fd, model_library_format_tar_path = tempfile.mkstemp() os.close(fd) os.unlink(model_library_format_tar_path) tvm.micro.export_model_library_format(module, model_library_format_tar_path) with tarfile.open(model_library_format_tar_path, "r:*") as tar_f: print("\n".join(f" - {m.name}" for m in tar_f.getmembers())) # Cleanup for tutorial: os.unlink(model_library_format_tar_path) # TVM also provides a standard way for embedded platforms to automatically generate a standalone # project, compile and flash it to a target, and communicate with it using the standard TVM RPC # protocol. The Model Library Format serves as the model input to this process. When embedded # platforms provide such an integration, they can be used directly by TVM for both host-driven # inference and autotuning . This integration is provided by the # `microTVM Project API` <https://github.com/apache/tvm-rfcs/blob/main/rfcs/0008-microtvm-project-api.md>_, # # Embedded platforms need to provide a Template Project containing a microTVM API Server (typically, # this lives in a file ``microtvm_api_server.py`` in the root directory). Let's use the example ``host`` # project in this tutorial, which simulates the device using a POSIX subprocess and pipes: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project_options = {} # You can use options to provide platform-specific options through TVM. # Compiling for physical hardware (or an emulated board, like the mps_an521) # -------------------------------------------------------------------------- # For physical hardware, you can try out the Zephyr platform by using a different template project # and options: # if use_physical_hw: template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) project_options = {"project_type": "host_driven", "board": BOARD} # Create a temporary directory temp_dir = tvm.contrib.utils.tempdir() generated_project_dir = temp_dir / "generated-project" generated_project = tvm.micro.generate_project( template_project_path, module, generated_project_dir, project_options ) # Build and flash the project generated_project.build() generated_project.flash() ###################################################################### # Next, establish a session with the simulated device and run the # computation. The `with session` line would typically flash an attached # microcontroller, but in this tutorial, it simply launches a subprocess # to stand in for an attached microcontroller. with tvm.micro.Session(transport_context_manager=generated_project.transport()) as session: graph_mod = tvm.micro.create_local_graph_executor( module.get_graph_json(), session.get_system_lib(), session.device ) # Set the model parameters using the lowered parameters produced by `relay.build`. graph_mod.set_input(**module.get_params()) # The model consumes a single float32 value and returns a predicted sine value. To pass the # input value we construct a tvm.nd.array object with a single contrived number as input. For # this model values of 0 to 2Pi are acceptable. graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32"))) graph_mod.run() tvm_output = graph_mod.get_output(0).numpy() print("result is: " + str(tvm_output))
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_microtvm/micro_train.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _microtvm-train-arduino: Training Vision Models for microTVM on Arduino ============================================== **Author**: `Gavin Uberti <https://github.com/guberti>`_ This tutorial shows how MobileNetV1 models can be trained to fit on embedded devices, and how those models can be deployed to Arduino using TVM. """ ###################################################################### # .. note:: # # This tutorial is best viewed as a Jupyter Notebook. You can download and run it locally # using the link at the bottom of this page, or open it online for free using Google Colab. # Click the icon below to open in Google Colab. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/utilities/colab_button.png # :align: center # :target: https://colab.research.google.com/github/apache/tvm-site/blob/asf-site/docs/_downloads/a7c7ea4b5017ae70db1f51dd8e6dcd82/micro_train.ipynb # :width: 300px # # Motivation # ---------- # When building IOT devices, we often want them to **see and understand** the world around them. # This can take many forms, but often times a device will want to know if a certain **kind of # object** is in its field of vision. # # For example, a security camera might look for **people**, so it can decide whether to save a video # to memory. A traffic light might look for **cars**, so it can judge which lights should change # first. Or a forest camera might look for a **kind of animal**, so they can estimate how large # the animal population is. # # To make these devices affordable, we would like them to need only a low-cost processor like the # `nRF52840 <https://www.nordicsemi.com/Products/nRF52840>`_ (costing five dollars each on Mouser) or the `RP2040 <https://www.raspberrypi.com/products/rp2040/>`_ (just $1.45 each!). # # These devices have very little memory (~250 KB RAM), meaning that no conventional edge AI # vision model (like MobileNet or EfficientNet) will be able to run. In this tutorial, we will # show how these models can be modified to work around this requirement. Then, we will use TVM # to compile and deploy it for an Arduino that uses one of these processors. # # Installing the Prerequisites # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # This tutorial will use TensorFlow to train the model - a widely used machine learning library # created by Google. TensorFlow is a very low-level library, however, so we will the Keras # interface to talk to TensorFlow. We will also use TensorFlow Lite to perform quantization on # our model, as TensorFlow by itself does not support this. # # Once we have our generated model, we will use TVM to compile and test it. To avoid having to # build from source, we'll install ``tlcpack`` - a community build of TVM. Lastly, we'll also # install ``imagemagick`` and ``curl`` to preprocess data: # # .. code-block:: bash # # %%bash # pip install -q tensorflow tflite # pip install -q tlcpack-nightly -f https://tlcpack.ai/wheels # apt-get -qq install imagemagick curl # # # Install Arduino CLI and library for Nano 33 BLE # curl -fsSL https://raw.githubusercontent.com/arduino/arduino-cli/master/install.sh | sh # /content/bin/arduino-cli core update-index # /content/bin/arduino-cli core install arduino:mbed_nano # # Using the GPU # ^^^^^^^^^^^^^ # # This tutorial demonstrates training a neural network, which is requires a lot of computing power # and will go much faster if you have a GPU. If you are viewing this tutorial on Google Colab, you # can enable a GPU by going to **Runtime->Change runtime type** and selecting "GPU" as our hardware # accelerator. If you are running locally, you can `follow TensorFlow's guide <https://www.tensorflow.org/guide/gpu>`_ instead. # # We can test our GPU installation with the following code: import tensorflow as tf if not tf.test.gpu_device_name(): print("No GPU was detected!") print("Model training will take much longer (~30 minutes instead of ~5)") else: print("GPU detected - you're good to go.") ###################################################################### # Choosing Our Work Dir # ^^^^^^^^^^^^^^^^^^^^^ # We need to pick a directory where our image datasets, trained model, and eventual Arduino sketch # will all live. If running on Google Colab, we'll save everything in ``/root`` (aka ``~``) but you'll # probably want to store it elsewhere if running locally. Note that this variable only affects Python # scripts - you'll have to adjust the Bash commands too. import os FOLDER = "/root" # sphinx_gallery_start_ignore import tempfile FOLDER = tempfile.mkdtemp() # sphinx_gallery_end_ignore ###################################################################### # Downloading the Data # -------------------- # Convolutional neural networks usually learn by looking at many images, along with labels telling # the network what those images are. To get these images, we'll need a publicly available dataset # with thousands of images of all sorts of objects and labels of what's in each image. We'll also # need a bunch of images that **aren't** of cars, as we're trying to distinguish these two classes. # # In this tutorial, we'll create a model to detect if an image contains a **car**, but you can use # whatever category you like! Just change the source URL below to one containing images of another # type of object. # # To get our car images, we'll be downloading the `Stanford Cars dataset <http://ai.stanford.edu/~jkrause/cars/car_dataset.html>`_, # which contains 16,185 full color images of cars. We'll also need images of random things that # aren't cars, so we'll use the `COCO 2017 <https://cocodataset.org/#home>`_ validation set (it's # smaller, and thus faster to download than the full training set. Training on the full data set # would yield better results). Note that there are some cars in the COCO 2017 data set, but it's # a small enough fraction not to matter - just keep in mind that this will drive down our percieved # accuracy slightly. # # We could use the TensorFlow dataloader utilities, but we'll instead do it manually to make sure # it's easy to change the datasets being used. We'll end up with the following file hierarchy: # # .. code-block:: # # /root # β”œβ”€β”€ images # β”‚ β”œβ”€β”€ object # β”‚ β”‚ β”œβ”€β”€ 000001.jpg # β”‚ β”‚ β”‚ ... # β”‚ β”‚ └── 016185.jpg # β”‚ β”œβ”€β”€ object.tgz # β”‚ β”œβ”€β”€ random # β”‚ β”‚ β”œβ”€β”€ 000000000139.jpg # β”‚ β”‚ β”‚ ... # β”‚ β”‚ └── 000000581781.jpg # β”‚ └── random.zip # # We should also note that Stanford cars has 8k images, while the COCO 2017 validation set is 5k # images - it is not a 50/50 split! If we wanted to, we could weight these classes differently # during training to correct for this, but training will still work if we ignore it. It should # take about **2 minutes** to download the Stanford Cars, while COCO 2017 validation will take # **1 minute**. import os import shutil import urllib.request # Download datasets os.makedirs(f"{FOLDER}/downloads") os.makedirs(f"{FOLDER}/images") urllib.request.urlretrieve( "https://data.deepai.org/stanfordcars.zip", f"{FOLDER}/downloads/target.zip" ) urllib.request.urlretrieve( "http://images.cocodataset.org/zips/val2017.zip", f"{FOLDER}/downloads/random.zip" ) # Extract them and rename their folders shutil.unpack_archive(f"{FOLDER}/downloads/target.zip", f"{FOLDER}/downloads") shutil.unpack_archive(f"{FOLDER}/downloads/random.zip", f"{FOLDER}/downloads") shutil.move(f"{FOLDER}/downloads/cars_train/cars_train", f"{FOLDER}/images/target") shutil.move(f"{FOLDER}/downloads/val2017", f"{FOLDER}/images/random") ###################################################################### # Loading the Data # ---------------- # Currently, our data is stored on-disk as JPG files of various sizes. To train with it, we'll have # to load the images into memory, resize them to be 64x64, and convert them to raw, uncompressed # data. Keras's ``image_dataset_from_directory`` will take care of most of this, though it loads # images such that each pixel value is a float from 0 to 255. # # We'll also need to load labels, though Keras will help with this. From our subdirectory structure, # it knows the images in ``/objects`` are one class, and those in ``/random`` another. Setting # ``label_mode='categorical'`` tells Keras to convert these into **categorical labels** - a 2x1 vector # that's either ``[1, 0]`` for an object of our target class, or ``[0, 1]`` vector for anything else. # We'll also set ``shuffle=True`` to randomize the order of our examples. # # We will also **batch** the data - grouping samples into clumps to make our training go faster. # Setting ``batch_size = 32`` is a decent number. # # Lastly, in machine learning we generally want our inputs to be small numbers. We'll thus use a # ``Rescaling`` layer to change our images such that each pixel is a float between ``0.0`` and ``1.0``, # instead of ``0`` to ``255``. We need to be careful not to rescale our categorical labels though, so # we'll use a ``lambda`` function. IMAGE_SIZE = (64, 64, 3) unscaled_dataset = tf.keras.utils.image_dataset_from_directory( f"{FOLDER}/images", batch_size=32, shuffle=True, label_mode="categorical", image_size=IMAGE_SIZE[0:2], ) rescale = tf.keras.layers.Rescaling(scale=1.0 / 255) full_dataset = unscaled_dataset.map(lambda im, lbl: (rescale(im), lbl)) ###################################################################### # What's Inside Our Dataset? # ^^^^^^^^^^^^^^^^^^^^^^^^^^ # Before giving this data set to our neural network, we ought to give it a quick visual inspection. # Does the data look properly transformed? Do the labels seem appropriate? And what's our ratio of # objects to other stuff? We can display some examples from our datasets using ``matplotlib``: import matplotlib.pyplot as plt num_target_class = len(os.listdir(f"{FOLDER}/images/target/")) num_random_class = len(os.listdir(f"{FOLDER}/images/random/")) print(f"{FOLDER}/images/target contains {num_target_class} images") print(f"{FOLDER}/images/random contains {num_random_class} images") # Show some samples and their labels SAMPLES_TO_SHOW = 10 plt.figure(figsize=(20, 10)) for i, (image, label) in enumerate(unscaled_dataset.unbatch()): if i >= SAMPLES_TO_SHOW: break ax = plt.subplot(1, SAMPLES_TO_SHOW, i + 1) plt.imshow(image.numpy().astype("uint8")) plt.title(list(label.numpy())) plt.axis("off") ###################################################################### # Validating our Accuracy # ^^^^^^^^^^^^^^^^^^^^^^^ # While developing our model, we'll often want to check how accurate it is (e.g. to see if it # improves during training). How do we do this? We could just train it on *all* of the data, and # then ask it to classify that same data. However, our model could cheat by just memorizing all of # the samples, which would make it *appear* to have very high accuracy, but perform very badly in # reality. In practice, this "memorizing" is called **overfitting**. # # To prevent this, we will set aside some of the data (we'll use 20%) as a **validation set**. Our # model will never be trained on validation data - we'll only use it to check our model's accuracy. num_batches = len(full_dataset) train_dataset = full_dataset.take(int(num_batches * 0.8)) validation_dataset = full_dataset.skip(len(train_dataset)) ###################################################################### # Loading the Data # ---------------- # In the past decade, `convolutional neural networks <https://en.wikipedia.org/wiki/Convolutional_neural_network>`_ have been widely # adopted for image classification tasks. State-of-the-art models like `EfficientNet V2 <https://arxiv.org/abs/2104.00298>`_ are able # to perform image classification better than even humans! Unfortunately, these models have tens of # millions of parameters, and thus won't fit on cheap security camera computers. # # Our applications generally don't need perfect accuracy - 90% is good enough. We can thus use the # older and smaller MobileNet V1 architecture. But this *still* won't be small enough - by default, # MobileNet V1 with 224x224 inputs and alpha 1.0 takes ~50 MB to just **store**. To reduce the size # of the model, there are three knobs we can turn. First, we can reduce the size of the input images # from 224x224 to 96x96 or 64x64, and Keras makes it easy to do this. We can also reduce the **alpha** # of the model, from 1.0 to 0.25, which downscales the width of the network (and the number of # filters) by a factor of four. And if we were really strapped for space, we could reduce the # number of **channels** by making our model take grayscale images instead of RGB ones. # # In this tutorial, we will use an RGB 64x64 input image and alpha 0.25. This is not quite # ideal, but it allows the finished model to fit in 192 KB of RAM, while still letting us perform # transfer learning using the official TensorFlow source models (if we used alpha <0.25 or a # grayscale input, we wouldn't be able to do this). # # What is Transfer Learning? # ^^^^^^^^^^^^^^^^^^^^^^^^^^ # Deep learning has `dominated image classification <https://paperswithcode.com/sota/image-classification-on-imagenet>`_ for a long time, # but training neural networks takes a lot of time. When a neural network is trained "from scratch", # its parameters start out randomly initialized, forcing it to learn very slowly how to tell images # apart. # # With transfer learning, we instead start with a neural network that's **already** good at a # specific task. In this example, that task is classifying images from `the ImageNet database <https://www.image-net.org/>`_. This # means the network already has some object detection capabilities, and is likely closer to what you # want then a random model would be. # # This works especially well with image processing neural networks like MobileNet. In practice, it # turns out the convolutional layers of the model (i.e. the first 90% of the layers) are used for # identifying low-level features like lines and shapes - only the last few fully connected layers # are used to determine how those shapes make up the objects the network is trying to detect. # # We can take advantage of this by starting training with a MobileNet model that was trained on # ImageNet, and already knows how to identify those lines and shapes. We can then just remove the # last few layers from this pretrained model, and add our own final layers. We'll then train this # conglomerate model for a few epochs on our cars vs non-cars dataset, to adjust the first layers # and train from scratch the last layers. This process of training an already-partially-trained # model is called *fine-tuning*. # # Source MobileNets for transfer learning have been `pretrained by the TensorFlow folks <https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md>`_, so we # can just download the one closest to what we want (the 128x128 input model with 0.25 depth scale). os.makedirs(f"{FOLDER}/models") WEIGHTS_PATH = f"{FOLDER}/models/mobilenet_2_5_128_tf.h5" urllib.request.urlretrieve( "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5", WEIGHTS_PATH, ) pretrained = tf.keras.applications.MobileNet( input_shape=IMAGE_SIZE, weights=WEIGHTS_PATH, alpha=0.25 ) ###################################################################### # Modifying Our Network # ^^^^^^^^^^^^^^^^^^^^^ # As mentioned above, our pretrained model is designed to classify the 1,000 ImageNet categories, # but we want to convert it to classify cars. Since only the bottom few layers are task-specific, # we'll **cut off the last five layers** of our original model. In their place we'll build our own # "tail" to the model by performing respape, dropout, flatten, and softmax operations. model = tf.keras.models.Sequential() model.add(tf.keras.layers.InputLayer(input_shape=IMAGE_SIZE)) model.add(tf.keras.Model(inputs=pretrained.inputs, outputs=pretrained.layers[-5].output)) model.add(tf.keras.layers.Reshape((-1,))) model.add(tf.keras.layers.Dropout(0.1)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(2, activation="softmax")) ###################################################################### # Fine Tuning Our Network # ^^^^^^^^^^^^^^^^^^^^^^^ # When training neural networks, we must set a parameter called the **learning rate** that controls # how fast our network learns. It must be set carefully - too slow, and our network will take # forever to train; too fast, and our network won't be able to learn some fine details. Generally # for Adam (the optimizer we're using), ``0.001`` is a pretty good learning rate (and is what's # recommended in the `original paper <https://arxiv.org/abs/1412.6980>`_). However, in this case # ``0.0005`` seems to work a little better. # # We'll also pass the validation set from earlier to ``model.fit``. This will evaluate how good our # model is each time we train it, and let us track how our model is improving. Once training is # finished, the model should have a validation accuracy around ``0.98`` (meaning it was right 98% of # the time on our validation set). model.compile( optimizer=tf.keras.optimizers.Adam(learning_rate=0.0005), loss="categorical_crossentropy", metrics=["accuracy"], ) model.fit(train_dataset, validation_data=validation_dataset, epochs=3, verbose=2) ###################################################################### # Quantization # ------------ # We've done a decent job of reducing our model's size so far - changing the input dimension, # along with removing the bottom layers reduced the model to just 219k parameters. However, each of # these parameters is a ``float32`` that takes four bytes, so our model will take up almost one MB! # # Additionally, it might be the case that our hardware doesn't have built-in support for floating # point numbers. While most high-memory Arduinos (like the Nano 33 BLE) do have hardware support, # some others (like the Arduino Due) do not. On any boards *without* dedicated hardware support, # floating point multiplication will be extremely slow. # # To address both issues we will **quantize** the model - representing the weights as eight bit # integers. It's more complex than just rounding, though - to get the best performance, TensorFlow # tracks how each neuron in our model activates, so we can figure out how most accurately simulate # the neuron's original activations with integer operations. # # We will help TensorFlow do this by creating a representative dataset - a subset of the original # that is used for tracking how those neurons activate. We'll then pass this into a ``TFLiteConverter`` # (Keras itself does not have quantization support) with an ``Optimize`` flag to tell TFLite to perform # the conversion. By default, TFLite keeps the inputs and outputs of our model as floats, so we must # explicitly tell it to avoid this behavior. def representative_dataset(): for image_batch, label_batch in full_dataset.take(10): yield [image_batch] converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = tf.uint8 converter.inference_output_type = tf.uint8 quantized_model = converter.convert() ###################################################################### # Download the Model if Desired # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # We've now got a finished model that you can use locally or in other tutorials (try autotuning # this model or viewing it on `https://netron.app/ <https://netron.app/>`_). But before we do # those things, we'll have to write it to a file (``quantized.tflite``). If you're running this # tutorial on Google Colab, you'll have to uncomment the last two lines to download the file # after writing it. QUANTIZED_MODEL_PATH = f"{FOLDER}/models/quantized.tflite" with open(QUANTIZED_MODEL_PATH, "wb") as f: f.write(quantized_model) # from google.colab import files # files.download(QUANTIZED_MODEL_PATH) ###################################################################### # Compiling With TVM For Arduino # ------------------------------ # TensorFlow has a built-in framework for deploying to microcontrollers - `TFLite Micro <https://www.tensorflow.org/lite/microcontrollers>`_. However, # it's poorly supported by development boards and does not support autotuning. We will use Apache # TVM instead. # # TVM can be used either with its command line interface (``tvmc``) or with its Python interface. The # Python interface is fully-featured and more stable, so we'll use it here. # # TVM is an optimizing compiler, and optimizations to our model are performed in stages via # **intermediate representations**. The first of these is `Relay <https://arxiv.org/abs/1810.00952>`_ a high-level intermediate # representation emphasizing portability. The conversion from ``.tflite`` to Relay is done without any # knowledge of our "end goal" - the fact we intend to run this model on an Arduino. # # Choosing an Arduino Board # ^^^^^^^^^^^^^^^^^^^^^^^^^ # Next, we'll have to decide exactly which Arduino board to use. The Arduino sketch that we # ultimately generate should be compatible with any board, but knowing which board we are using in # advance allows TVM to adjust its compilation strategy to get better performance. # # There is one catch - we need enough **memory** (flash and RAM) to be able to run our model. We # won't ever be able to run a complex vision model like a MobileNet on an Arduino Uno - that board # only has 2 kB of RAM and 32 kB of flash! Our model has ~200,000 parameters, so there is just no # way it could fit. # # For this tutorial, we will use the Nano 33 BLE, which has 1 MB of flash memory and 256 KB of RAM. # However, any other Arduino with those specs or better should also work. # # Generating our project # ^^^^^^^^^^^^^^^^^^^^^^ # Next, we'll compile the model to TVM's MLF (model library format) intermediate representation, # which consists of C/C++ code and is designed for autotuning. To improve performance, we'll tell # TVM that we're compiling for the ``nrf52840`` microprocessor (the one the Nano 33 BLE uses). We'll # also tell it to use the C runtime (abbreviated ``crt``) and to use ahead-of-time memory allocation # (abbreviated ``aot``, which helps reduce the model's memory footprint). Lastly, we will disable # vectorization with ``"tir.disable_vectorize": True``, as C has no native vectorized types. # # Once we have set these configuration parameters, we will call ``tvm.relay.build`` to compile our # Relay model into the MLF intermediate representation. From here, we just need to call # ``tvm.micro.generate_project`` and pass in the Arduino template project to finish compilation. import shutil import tflite import tvm # Method to load model is different in TFLite 1 vs 2 try: # TFLite 2.1 and above tflite_model = tflite.Model.GetRootAsModel(quantized_model, 0) except AttributeError: # Fall back to TFLite 1.14 method tflite_model = tflite.Model.Model.GetRootAsModel(quantized_model, 0) # Convert to the Relay intermediate representation mod, params = tvm.relay.frontend.from_tflite(tflite_model) # Set configuration flags to improve performance target = tvm.target.target.micro("nrf52840") runtime = tvm.relay.backend.Runtime("crt") executor = tvm.relay.backend.Executor("aot", {"unpacked-api": True}) # Convert to the MLF intermediate representation with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.relay.build(mod, target, runtime=runtime, executor=executor, params=params) # Generate an Arduino project from the MLF intermediate representation shutil.rmtree(f"{FOLDER}/models/project", ignore_errors=True) arduino_project = tvm.micro.generate_project( tvm.micro.get_microtvm_template_projects("arduino"), mod, f"{FOLDER}/models/project", { "board": "nano33ble", "arduino_cli_cmd": "/content/bin/arduino-cli", "project_type": "example_project", }, ) ###################################################################### # Testing our Arduino Project # --------------------------- # Consider the following two 224x224 images from the author's camera roll - one of a car, one not. # We will test our Arduino project by loading both of these images and executing the compiled model # on them. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/testdata/microTVM/data/model_train_images_combined.png # :align: center # :height: 200px # :width: 600px # # Currently, these are 224x224 PNG images we can download from Imgur. Before we can feed in these # images, we'll need to resize and convert them to raw data, which can be done with ``imagemagick``. # # It's also challenging to load raw data onto an Arduino, as only C/CPP files (and similar) are # compiled. We can work around this by embedding our raw data in a hard-coded C array with the # built-in utility ``bin2c`` that will output a file like below: # # .. code-block:: c # # static const unsigned char CAR_IMAGE[] = { # 0x22,0x23,0x14,0x22, # ... # 0x07,0x0e,0x08,0x08 # }; # # We can do both of these things with a few lines of Bash code: # # .. code-block:: bash # # %%bash # mkdir -p ~/tests # curl "https://i.imgur.com/JBbEhxN.png" -o ~/tests/car_224.png # convert ~/tests/car_224.png -resize 64 ~/tests/car_64.png # stream ~/tests/car_64.png ~/tests/car.raw # bin2c -c -st ~/tests/car.raw --name CAR_IMAGE > ~/models/project/car.c # # curl "https://i.imgur.com/wkh7Dx2.png" -o ~/tests/catan_224.png # convert ~/tests/catan_224.png -resize 64 ~/tests/catan_64.png # stream ~/tests/catan_64.png ~/tests/catan.raw # bin2c -c -st ~/tests/catan.raw --name CATAN_IMAGE > ~/models/project/catan.c ###################################################################### # Writing our Arduino Script # -------------------------- # We now need a little bit of Arduino code to read the two binary arrays we just generated, run the # model on them, and log the output to the serial monitor. This file will replace ``arduino_sketch.ino`` # as the main file of our sketch. You'll have to copy this code in manually.. # # .. code-block:: c # # %%writefile /root/models/project.ino # #include "src/model.h" # #include "car.c" # #include "catan.c" # # void setup() { # Serial.begin(9600); # TVMInitialize(); # } # # void loop() { # uint8_t result_data[2]; # Serial.println("Car results:"); # TVMExecute(const_cast<uint8_t*>(CAR_IMAGE), result_data); # Serial.print(result_data[0]); Serial.print(", "); # Serial.print(result_data[1]); Serial.println(); # # Serial.println("Other object results:"); # TVMExecute(const_cast<uint8_t*>(CATAN_IMAGE), result_data); # Serial.print(result_data[0]); Serial.print(", "); # Serial.print(result_data[1]); Serial.println(); # # delay(1000); # } # # Compiling Our Code # ^^^^^^^^^^^^^^^^^^ # Now that our project has been generated, TVM's job is mostly done! We can still call # ``arduino_project.build()`` and ``arduino_project.upload()``, but these just use ``arduino-cli``'s # compile and flash commands underneath. We could also begin autotuning our model, but that's a # subject for a different tutorial. To finish up, we'll verify no compiler errors are thrown # by our project: shutil.rmtree(f"{FOLDER}/models/project/build", ignore_errors=True) # sphinx_gallery_start_ignore from unittest.mock import MagicMock arduino_project = MagicMock() # sphinx_gallery_end_ignore arduino_project.build() print("Compilation succeeded!") ###################################################################### # Uploading to Our Device # ----------------------- # The very last step is uploading our sketch to an Arduino to make sure our code works properly. # Unfortunately, we can't do that from Google Colab, so we'll have to download our sketch. This is # simple enough to do - we'll just turn our project into a `.zip` archive, and call `files.download`. # If you're running on Google Colab, you'll have to uncomment the last two lines to download the file # after writing it. ZIP_FOLDER = f"{FOLDER}/models/project" shutil.make_archive(ZIP_FOLDER, "zip", ZIP_FOLDER) # from google.colab import files # files.download(f"{FOLDER}/models/project.zip") # sphinx_gallery_start_ignore # Run a few unit tests to make sure the Python code worked # Ensure transfer learn model was correctly assembled assert len(model.layers) == 5 assert model.count_params() == 219058 # Only 219,058 of these are trainable assert len(quantized_model) >= 250000 # Quantized model will be 250 KB - 350 KB assert len(quantized_model) <= 350000 # Exact value depends on quantization # Assert .tflite and .zip files were written to disk assert os.path.isfile(f"{FOLDER}/models/quantized.tflite") assert os.path.isfile(f"{FOLDER}/models/project.zip") # Assert MLF file was correctly generated assert mod.executor.name == "aot" # Remove the temporary folder we generated at the beginning shutil.rmtree(FOLDER) # sphinx_gallery_end_ignore ###################################################################### # From here, we'll need to open it in the Arduino IDE. You'll have to download the IDE as well as # the SDK for whichever board you are using. For certain boards like the Sony SPRESENSE, you may # have to change settings to control how much memory you want the board to use. # # Expected Results # ^^^^^^^^^^^^^^^^ # If all works as expected, you should see the following output on a Serial monitor: # # .. code-block:: # # Car results: # 255, 0 # Other object results: # 0, 255 # # The first number represents the model's confidence that the object **is** a car and ranges from # 0-255. The second number represents the model's confidence that the object **is not** a car and # is also 0-255. These results mean the model is very sure that the first image is a car, and the # second image is not (which is correct). Hence, our model is working! # # Summary # ------- # In this tutorial, we used transfer learning to quickly train an image recognition model to # identify cars. We modified its input dimensions and last few layers to make it better at this, # and to make it faster and smaller. We then quantified the model and compiled it using TVM to # create an Arduino sketch. Lastly, we tested the model using two static images to prove it works # as intended. # # Next Steps # ^^^^^^^^^^ # From here, we could modify the model to read live images from the camera - we have another # Arduino tutorial for how to do that `on GitHub <https://github.com/guberti/tvm-arduino-demos/tree/master/examples/person_detection>`_. Alternatively, we could also # `use TVM's autotuning capabilities <https://tvm.apache.org/docs/how_to/work_with_microtvm/micro_autotune.html>`_ to dramatically improve the model's performance. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_pytorch/using_as_torch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Wrap Your TVMScript as PyTorch Module ====================== **Author**: `Yaoda Zhou <https://github.com/juda>`_ This article is a tutorial on wrapping the TVMScript code as the PyTorch module. Using the decorator `as_torch`, users can wrap TVMScript code into a PyTorch nn.Module naturally. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore # Import PyTorch, as well as necessary libraries import torch import torch.nn.functional as F import torch.utils.benchmark as benchmark import tvm from tvm.contrib.torch import as_torch from tvm.script import tir as T ###################################################################### # Write your own PyTorch operator by TVMScript # ------------------------------- # PyTorch is a very popular machine learning framework which contains # optimized implementations of most commonly used operators. # Nevertheless, sometimes you might want to write your own operators in PyTorch. # In that case, the performance of such custom operators might not be satisfactory for your needs. # # For example, suppose that we are going to define a 1-d depthwise convolution operator. # Assume the number of in_channel and out_channel are both 70, # the width is 80 and the kernel size is 20, # then the 1-d depthwise conv could be written in PyTorch in one line: in_channel = 70 out_channel = 70 width = 80 kernel_size = 20 def torch_depthwise(inputs, filters): return F.conv1d(inputs, filters.view(out_channel, 1, kernel_size), groups=out_channel) # We can run this function as: inputs = torch.randn(in_channel, width) filters = torch.randn(out_channel, kernel_size) ret_torch = torch_depthwise(inputs, filters) # The `torch_depthwise` function, in a plain Python code, could be written as: def vanilla_depthwise(input, weight): ret = torch.zeros(out_channel, width - kernel_size + 1) for j in range(out_channel): for i in range(width - kernel_size + 1): for k in range(kernel_size): ret[j, i] += weight[j, k] * input[j, i + k] return ret # Then, we plan to optimize the `depthwise` function by leveraging the power of TVM. # TVM community proposes an embedded Domain Specific Language in Python called TVMScript, # which serves as the high-level frontend for TVM's Tensor IR. # The depthwise 1D convolution code above can be translated to TVMScript as follows. # We provide an `as_torch` decorator, which converts the TVMScript code to PyTorch's nn.Module automatically. @as_torch @T.prim_func def tvm_depthwise( A: T.Buffer((70, 80), "float32"), B: T.Buffer((70, 20), "float32"), C: T.Buffer((70, 61), "float32"), ) -> None: for j, i, k in T.grid(70, 61, 20): with T.block(): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vj, vi] = T.float32(0) C[vj, vi] += B[vj, vk] * A[vj, vi + vk] # We can build the TVMScript code by calling the `tune` method in default setting. # Without providing extra information, the model will be tuned for CPU. tvm_depthwise.tune() # We can print out the tuned TVMScript code to see how the program is transformed, as print(tvm_depthwise.script()) # We can verify that the two outputs are the same: ret_tvm = torch.zeros(out_channel, width - kernel_size + 1) tvm_depthwise(inputs, filters, ret_tvm) testing.assert_allclose(ret_torch.cpu().numpy(), ret_tvm.cpu().numpy(), atol=1e-5, rtol=1e-5) ###################################################################### # Benchmark # ------------------------------- results = [] for i in range(5): inputs = torch.randn(out_channel, width) filters = torch.randn(out_channel, kernel_size) res = torch.zeros(out_channel, width - kernel_size + 1) sub_label = f"[test {i}]" results.append( benchmark.Timer( stmt="tvm_depthwise(inputs, filters, res)", setup="from __main__ import tvm_depthwise", globals={"inputs": inputs, "filters": filters, "res": res}, sub_label=sub_label, description="TVMScript", ).blocked_autorange() ) results.append( benchmark.Timer( stmt="torch_depthwise(inputs, filters)", setup="from __main__ import torch_depthwise", globals={ "inputs": inputs, "filters": filters, }, sub_label=sub_label, description="PyTorch", ).blocked_autorange() ) compare = benchmark.Compare(results) compare.print() # In author's environment, the average inference time of `tvm_depthwise` is 120.0 us, # while the average inference time of `torch_depthwise` is 196.0 us (PyTorch version is 1.11.0), # showing the speedup of around 38%.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_pytorch/using_optimized_torch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compile PyTorch Models ====================== **Author**: `Yaoda Zhou <https://github.com/juda>`_ This article is a tutorial to optimize PyTorch models by using decorator `optimize_torch`. To follow this tutorial, PyTorch, as well as TorchVision, should be installed. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore # Import PyTorch import torch import torch.nn as nn import torch.nn.functional as F # Import library for profiling import torch.utils.benchmark as benchmark from torchvision.models import resnet18 # Import `optimize_torch` function from tvm.contrib.torch import optimize_torch from tvm.meta_schedule import TuneConfig ###################################################################### # Define a simple module written by PyTorch # ------------------------------ class SimpleModel(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 20, 5) self.conv2 = nn.Conv2d(20, 20, 5) def forward(self, x): x = F.relu(self.conv1(x)) return F.relu(self.conv2(x)) ###################################################################### # Optimize SimpleModel by TVM MetaSchedule # ------------------------------ # We provide the `optimize_torch` function, which has the similar usage as `torch.jit.trace`. # The PyTorch model to optimize, along with its example input, are provided by users. # The PyTorch module will be tuned by TVM for the target hardware. # Without providing extra information, the model will be tuned for CPU. simple_model = SimpleModel() example_input = torch.randn(20, 1, 10, 10) model_optimized_by_tvm = optimize_torch(simple_model, example_input) ###################################################################### # Save/Load module # ------------------------------ # We can save and load our tuned module like the standard `nn.Module`. # Let us run our tuned module. ret1 = model_optimized_by_tvm(example_input) torch.save(model_optimized_by_tvm, "model_optimized.pt") model_loaded = torch.load("model_optimized.pt") # We load the module and run it again. ret2 = model_loaded(example_input) # We will show 2 results: # (1) we can safely load and save model by showing the result of model # after save and load operations is still the same as original one; # (2) the model we optimize returns the same result as the original PyTorch model. ret3 = simple_model(example_input) testing.assert_allclose(ret1.detach().numpy(), ret2.detach().numpy(), atol=1e-5, rtol=1e-5) testing.assert_allclose(ret1.detach().numpy(), ret3.detach().numpy(), atol=1e-5, rtol=1e-5) ###################################################################### # Optimize resnet18 # ------------------------------ # In the following, we will show that our approach is able to # accelerate common models, such as resnet18. # We will tune our model for the GPU. target_cuda = "nvidia/geforce-rtx-3070" # For PyTorch users, the code could be written as usual, except for # applying "optimize_torch" function on the resnet18 model. resnet18_tvm = optimize_torch( resnet18().cuda().eval(), [torch.rand(1, 3, 224, 224).cuda()], target=target_cuda ) # TorchScript also provides a built-in "optimize_for_inference" function to accelerate the inference. resnet18_torch = torch.jit.optimize_for_inference(torch.jit.script(resnet18().cuda().eval())) ###################################################################### # Compare the performance between two approaches. # ------------------------------ results = [] for i in range(5): test_input = torch.rand(1, 3, 224, 224).cuda() sub_label = f"[test {i}]" results.append( benchmark.Timer( stmt="resnet18_tvm(test_input)", setup="from __main__ import resnet18_tvm", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by meta", ).blocked_autorange() ) results.append( benchmark.Timer( stmt="resnet18_torch(test_input)", setup="from __main__ import resnet18_torch", globals={"test_input": test_input}, sub_label=sub_label, description="tuning by jit", ).blocked_autorange() ) compare = benchmark.Compare(results) compare.print() # In author's environment, the average inference time of `resnet18_tvm` is 620.0 us, # while the average inference time of `resnet18_torch` is 980.0 us (PyTorch version is 1.11.0), # showing the speedup of around 38%.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_relay/build_gcn.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Building a Graph Convolutional Network ====================================== **Author**: `Yulun Yao <https://yulunyao.io/>`_, \ `Chien-Yu Lin <https://homes.cs.washington.edu/~cyulin/>`_ This article is an introductory tutorial to build a Graph Convolutional Network (GCN) with Relay. In this tutorial, we will run our GCN on Cora dataset to demonstrate. Cora dataset is a common benchmark for Graph Neural Networks (GNN) and frameworks that support GNN training and inference. We directly load the dataset from DGL library to do the apples to apples comparison against DGL. Please refer to DGL doc for DGL installation at https://docs.dgl.ai/install/index.html. Please refer to PyTorch guide for PyTorch installation at https://pytorch.org/get-started/locally/. """ ###################################################################### # Define GCN in DGL with PyTorch backend # -------------------------------------- # # DGL example: https://github.com/dmlc/dgl/tree/master/examples/pytorch/gcn # This part reuses the code from the above example. import torch import torch.nn as nn import torch.nn.functional as F import dgl import networkx as nx from dgl.nn.pytorch import GraphConv class GCN(nn.Module): def __init__(self, g, n_infeat, n_hidden, n_classes, n_layers, activation): super(GCN, self).__init__() self.g = g self.layers = nn.ModuleList() self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation)) for i in range(n_layers - 1): self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation)) self.layers.append(GraphConv(n_hidden, n_classes)) def forward(self, features): h = features for i, layer in enumerate(self.layers): # handle api changes for differnt DGL version if dgl.__version__ > "0.3": h = layer(self.g, h) else: h = layer(h, self.g) return h ###################################################################### # Define the functions to load dataset and evaluate accuracy # ---------------------------------------------------------- # You may substitute this part with your own dataset, here we load data from DGL from dgl.data import load_data from collections import namedtuple def load_dataset(dataset="cora"): args = namedtuple("args", ["dataset"]) data = load_data(args(dataset)) # Remove self-loops to avoid duplicate passing of a node's feature to itself g = data.graph g.remove_edges_from(nx.selfloop_edges(g)) g.add_edges_from(zip(g.nodes, g.nodes)) return g, data def evaluate(data, logits): test_mask = data.test_mask # the test set which isn't included in the training phase pred = logits.argmax(axis=1) acc = ((pred == data.labels) * test_mask).sum() / test_mask.sum() return acc ###################################################################### # Load the data and set up model parameters # ----------------------------------------- """ Parameters ---------- dataset: str Name of dataset. You can choose from ['cora', 'citeseer', 'pubmed']. num_layer: int number of hidden layers num_hidden: int number of the hidden units in the hidden layer infeat_dim: int dimension of the input features num_classes: int dimension of model output (Number of classes) """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore dataset = "cora" g, data = load_dataset(dataset) num_layers = 1 num_hidden = 16 infeat_dim = data.features.shape[1] num_classes = data.num_labels ###################################################################### # Set up the DGL-PyTorch model and get the golden results # ------------------------------------------------------- # # The weights are trained with https://github.com/dmlc/dgl/blob/master/examples/pytorch/gcn/train.py from tvm.contrib.download import download_testdata from dgl import DGLGraph features = torch.FloatTensor(data.features) dgl_g = DGLGraph(g) torch_model = GCN(dgl_g, infeat_dim, num_hidden, num_classes, num_layers, F.relu) # Download the pretrained weights model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_%s.torch" % (dataset) model_path = download_testdata(model_url, "gcn_%s.pickle" % (dataset), module="gcn_model") # Load the weights into the model torch_model.load_state_dict(torch.load(model_path)) ###################################################################### # Run the DGL model and test for accuracy # --------------------------------------- torch_model.eval() with torch.no_grad(): logits_torch = torch_model(features) print("Print the first five outputs from DGL-PyTorch execution\n", logits_torch[:5]) acc = evaluate(data, logits_torch.numpy()) print("Test accuracy of DGL results: {:.2%}".format(acc)) ###################################################################### # Define Graph Convolution Layer in Relay # --------------------------------------- # To run GCN on TVM, we first need to implement Graph Convolution Layer. # You may refer to https://github.com/dmlc/dgl/blob/master/python/dgl/nn/mxnet/conv/graphconv.py for a GraphConv Layer implemented in DGL with MXNet Backend # # The layer is defined with below operations, note that we apply two transposes to keep adjacency matrix on right hand side of sparse_dense operator, # this method is temporary and will be updated in next few weeks when we have sparse matrix transpose and support for left sparse operator. # # .. math:: # # \mbox{GraphConv}(A, H, W) = A * H * W # = ((H * W)^t * A^t)^t # = ((W^t * H^t) * A^t)^t from tvm import relay from tvm.contrib import graph_executor import tvm from tvm import te def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=True, activation=None): """ Parameters ---------- layer_name: str Name of layer input_dim: int Input dimension per node feature output_dim: int, Output dimension per node feature adj: namedtuple, Graph representation (Adjacency Matrix) in Sparse Format (`data`, `indices`, `indptr`), where `data` has shape [num_nonzeros], indices` has shape [num_nonzeros], `indptr` has shape [num_nodes + 1] input: relay.Expr, Input feature to current layer with shape [num_nodes, input_dim] norm: relay.Expr, Norm passed to this layer to normalize features before and after Convolution. bias: bool Set bias to True to add bias when doing GCN layer activation: <function relay.op.nn>, Activation function applies to the output. e.g. relay.nn.{relu, sigmoid, log_softmax, softmax, leaky_relu} Returns ---------- output: tvm.relay.Expr The Output Tensor for this layer [num_nodes, output_dim] """ if norm is not None: input = relay.multiply(input, norm) weight = relay.var(layer_name + ".weight", shape=(input_dim, output_dim)) weight_t = relay.transpose(weight) dense = relay.nn.dense(weight_t, input) output = relay.nn.sparse_dense(dense, adj) output_t = relay.transpose(output) if norm is not None: output_t = relay.multiply(output_t, norm) if bias is True: _bias = relay.var(layer_name + ".bias", shape=(output_dim, 1)) output_t = relay.nn.bias_add(output_t, _bias, axis=-1) if activation is not None: output_t = activation(output_t) return output_t ###################################################################### # Prepare the parameters needed in the GraphConv layers # ----------------------------------------------------- # import numpy as np import networkx as nx def prepare_params(g, data): params = {} params["infeats"] = data.features.numpy().astype( "float32" ) # Only support float32 as feature for now # Generate adjacency matrix adjacency = nx.to_scipy_sparse_matrix(g) params["g_data"] = adjacency.data.astype("float32") params["indices"] = adjacency.indices.astype("int32") params["indptr"] = adjacency.indptr.astype("int32") # Normalization w.r.t. node degrees degs = [g.in_degree[i] for i in range(g.number_of_nodes())] params["norm"] = np.power(degs, -0.5).astype("float32") params["norm"] = params["norm"].reshape((params["norm"].shape[0], 1)) return params params = prepare_params(g, data) # Check shape of features and the validity of adjacency matrix assert len(params["infeats"].shape) == 2 assert ( params["g_data"] is not None and params["indices"] is not None and params["indptr"] is not None ) assert params["infeats"].shape[0] == params["indptr"].shape[0] - 1 ###################################################################### # Put layers together # ------------------- # Define input features, norms, adjacency matrix in Relay infeats = relay.var("infeats", shape=data.features.shape) norm = relay.Constant(tvm.nd.array(params["norm"])) g_data = relay.Constant(tvm.nd.array(params["g_data"])) indices = relay.Constant(tvm.nd.array(params["indices"])) indptr = relay.Constant(tvm.nd.array(params["indptr"])) Adjacency = namedtuple("Adjacency", ["data", "indices", "indptr"]) adj = Adjacency(g_data, indices, indptr) # Construct the 2-layer GCN layers = [] layers.append( GraphConv( layer_name="layers.0", input_dim=infeat_dim, output_dim=num_hidden, adj=adj, input=infeats, norm=norm, activation=relay.nn.relu, ) ) layers.append( GraphConv( layer_name="layers.1", input_dim=num_hidden, output_dim=num_classes, adj=adj, input=layers[-1], norm=norm, activation=None, ) ) # Analyze free variables and generate Relay function output = layers[-1] ###################################################################### # Compile and run with TVM # ------------------------ # # Export the weights from PyTorch model to Python Dict model_params = {} for param_tensor in torch_model.state_dict(): model_params[param_tensor] = torch_model.state_dict()[param_tensor].numpy() for i in range(num_layers + 1): params["layers.%d.weight" % (i)] = model_params["layers.%d.weight" % (i)] params["layers.%d.bias" % (i)] = model_params["layers.%d.bias" % (i)] # Set the TVM build target target = "llvm" # Currently only support `llvm` as target func = relay.Function(relay.analysis.free_vars(output), output) func = relay.build_module.bind_params_by_name(func, params) mod = tvm.IRModule() mod["main"] = func # Build with Relay with tvm.transform.PassContext(opt_level=0): # Currently only support opt_level=0 lib = relay.build(mod, target, params=params) # Generate graph executor dev = tvm.device(target, 0) m = graph_executor.GraphModule(lib["default"](dev)) ###################################################################### # Run the TVM model, test for accuracy and verify with DGL # -------------------------------------------------------- m.run() logits_tvm = m.get_output(0).numpy() print("Print the first five outputs from TVM execution\n", logits_tvm[:5]) labels = data.labels test_mask = data.test_mask acc = evaluate(data, logits_tvm) print("Test accuracy of TVM results: {:.2%}".format(acc)) import tvm.testing # Verify the results with the DGL model tvm.testing.assert_allclose(logits_torch, logits_tvm, atol=1e-3)
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_relay/using_external_lib.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Using External Libraries in Relay ================================= **Author**: `Masahiro Masuda <https://github.com/masahi>`_, `Truman Tian <https://github.com/SiNZeRo>`_ This is a short tutorial on how to use external libraries such as cuDNN, or cuBLAS with Relay. Relay uses TVM internally to generate target specific code. For example, with cuda backend TVM generates cuda kernels for all layers in the user provided network. But sometimes it is also helpful to incorporate external libraries developed by various vendors into Relay. Luckily, TVM has a mechanism to transparently call into these libraries. For Relay users, all we need to do is just to set a target string appropriately. Before we can use external libraries from Relay, your TVM needs to be built with libraries you want to use. For example, to use cuDNN, USE_CUDNN option in `cmake/config.cmake` needs to be enabled, and cuDNN include and library directories need to be specified if necessary. To begin with, we import Relay and TVM. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np from tvm.contrib import graph_executor as runtime from tvm import relay from tvm.relay import testing import tvm.testing ###################################################################### # Create a simple network # ----------------------- # Let's create a very simple network for demonstration. # It consists of convolution, batch normalization, and ReLU activation. out_channels = 16 batch_size = 1 data = relay.var("data", relay.TensorType((batch_size, 3, 224, 224), "float32")) weight = relay.var("weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") bn_mmean = relay.var("bn_mean") bn_mvar = relay.var("bn_var") simple_net = relay.nn.conv2d( data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1) ) simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) data_shape = (batch_size, 3, 224, 224) net, params = testing.create_workload(simple_net) ###################################################################### # Build and run with cuda backend # ------------------------------- # We build and run this network with cuda backend, as usual. # By setting the logging level to DEBUG, the result of Relay graph compilation will be dumped as pseudo code. import logging logging.basicConfig(level=logging.DEBUG) # to dump TVM IR after fusion target = "cuda" lib = relay.build_module.build(net, target, params=params) dev = tvm.device(target, 0) data = np.random.uniform(-1, 1, size=data_shape).astype("float32") module = runtime.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) out_cuda = out.numpy() ###################################################################### # The generated pseudo code should look something like below. # Note how bias add, batch normalization, and ReLU activation are fused into the convolution kernel. # TVM generates a single, fused kernel from this representation. # # .. code-block:: text # # produce tensor { # // attr [iter_var(blockIdx.z, , blockIdx.z)] thread_extent = 1 # // attr [compute] storage_scope = "local" # allocate compute[float32 * 32] # // attr [pad_temp.shared] storage_scope = "shared" # allocate pad_temp.shared[float32 * 180] # // attr [placeholder.shared] storage_scope = "shared" # allocate placeholder.shared[float32 * 144] # // attr [iter_var(blockIdx.y, , blockIdx.y)] thread_extent = 28 # // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 14 # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # produce compute { # compute[0] = 0.000000f # compute[1] = 0.000000f # compute[2] = 0.000000f # compute[3] = 0.000000f # compute[4] = 0.000000f # compute[5] = 0.000000f # compute[6] = 0.000000f # compute[7] = 0.000000f # compute[8] = 0.000000f # compute[9] = 0.000000f # compute[10] = 0.000000f # compute[11] = 0.000000f # compute[12] = 0.000000f # compute[13] = 0.000000f # compute[14] = 0.000000f # compute[15] = 0.000000f # compute[16] = 0.000000f # compute[17] = 0.000000f # compute[18] = 0.000000f # compute[19] = 0.000000f # compute[20] = 0.000000f # compute[21] = 0.000000f # compute[22] = 0.000000f # compute[23] = 0.000000f # compute[24] = 0.000000f # compute[25] = 0.000000f # compute[26] = 0.000000f # compute[27] = 0.000000f # compute[28] = 0.000000f # compute[29] = 0.000000f # compute[30] = 0.000000f # compute[31] = 0.000000f # for (rc.outer, 0, 3) { # produce pad_temp.shared { # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # if (likely(((threadIdx.z*15) < (60 - threadIdx.x)))) { # if (likely((threadIdx.x < 15))) { # pad_temp.shared[(((((threadIdx.z*15) + threadIdx.x)/60)*180) + ((((((threadIdx.z*15) + threadIdx.x)/6) % 10)*18) + ((((threadIdx.z*3) + threadIdx.x)*3) % 18)))] = tvm_if_then_else((((((1 - ((((threadIdx.z*15) + threadIdx.x)/6) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((threadIdx.z*15) + threadIdx.x)/6) % 10)))) && ((1 - ((((threadIdx.z*3) + threadIdx.x)*3) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - ((((threadIdx.z*3) + threadIdx.x)*3) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((threadIdx.z*15) + threadIdx.x)/60)*9408))*16) + ((((threadIdx.z*3) + threadIdx.x)*3) % 18)) + (((((threadIdx.z*15) + threadIdx.x)/6) % 10)*224)) + -225)], 0.000000f) # pad_temp.shared[(((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/180)*180) + ((((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)*18) + (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)))) && ((1 - (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/180)*9408))*16) + (((((threadIdx.z*3) + threadIdx.x)*3) + 1) % 18)) + (((((((threadIdx.z*15) + threadIdx.x)*3) + 1)/18) % 10)*224)) + -225)], 0.000000f) # pad_temp.shared[(((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/180)*180) + ((((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)*18) + (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)))] = tvm_if_then_else((((((1 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)) <= (blockIdx.y*8)) && ((blockIdx.y*8) < (225 - ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)))) && ((1 - (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)) <= (blockIdx.x*16))) && ((blockIdx.x*16) < (225 - (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)))), placeholder[((((((((blockIdx.y*112) + blockIdx.x) + (rc.outer*3136)) + ((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/180)*9408))*16) + (((((threadIdx.z*3) + threadIdx.x)*3) + 2) % 18)) + (((((((threadIdx.z*15) + threadIdx.x)*3) + 2)/18) % 10)*224)) + -225)], 0.000000f) # } # } # } # produce placeholder.shared { # // attr [iter_var(threadIdx.z, , threadIdx.z)] thread_extent = 4 # // attr [iter_var(threadIdx.y, , threadIdx.y)] thread_extent = 1 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 16 # if (likely(((threadIdx.z*4) < (16 - (threadIdx.x/3))))) { # if (likely(((threadIdx.z*12) < (48 - threadIdx.x)))) { # if (likely((threadIdx.x < 12))) { # placeholder.shared[(((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3)] = placeholder[(((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3)] # placeholder.shared[((((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3) + 1)] = placeholder[((((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3) + 1)] # placeholder.shared[((((((threadIdx.z*4) + (threadIdx.x/3))*3) + (threadIdx.x % 3))*3) + 2)] = placeholder[((((((rc.outer + (threadIdx.z*12)) + ((threadIdx.x/3)*3))*3) + (threadIdx.x % 3))*3) + 2)] # } # } # } # } # compute[0] = (compute[0] + (pad_temp.shared[threadIdx.x]*placeholder.shared[(threadIdx.z*36)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[(threadIdx.z*36)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[(threadIdx.z*36)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[(threadIdx.z*36)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[(threadIdx.z*36)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[(threadIdx.z*36)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[(threadIdx.z*36)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[(threadIdx.z*36)])) # compute[8] = (compute[8] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 9)])) # compute[16] = (compute[16] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 18)])) # compute[24] = (compute[24] + (pad_temp.shared[threadIdx.x]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 27)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 1)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 10)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 19)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 1)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 28)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 2)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 11)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 20)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 2)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 29)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 3)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 12)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 21)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 18)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 30)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 4)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 13)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 22)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 19)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 31)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 5)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 14)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 23)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 20)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 32)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 6)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 15)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 24)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 36)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 54)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 72)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 90)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 108)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 126)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 144)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 162)]*placeholder.shared[((threadIdx.z*36) + 33)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 7)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 16)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 25)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 37)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 55)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 73)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 91)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 109)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 127)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 145)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 163)]*placeholder.shared[((threadIdx.z*36) + 34)])) # compute[0] = (compute[0] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[1] = (compute[1] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[2] = (compute[2] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[3] = (compute[3] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[4] = (compute[4] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[5] = (compute[5] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[6] = (compute[6] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[7] = (compute[7] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 8)])) # compute[8] = (compute[8] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[9] = (compute[9] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[10] = (compute[10] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[11] = (compute[11] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[12] = (compute[12] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[13] = (compute[13] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[14] = (compute[14] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[15] = (compute[15] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 17)])) # compute[16] = (compute[16] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[17] = (compute[17] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[18] = (compute[18] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[19] = (compute[19] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[20] = (compute[20] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[21] = (compute[21] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[22] = (compute[22] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[23] = (compute[23] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 26)])) # compute[24] = (compute[24] + (pad_temp.shared[(threadIdx.x + 38)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[25] = (compute[25] + (pad_temp.shared[(threadIdx.x + 56)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[26] = (compute[26] + (pad_temp.shared[(threadIdx.x + 74)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[27] = (compute[27] + (pad_temp.shared[(threadIdx.x + 92)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[28] = (compute[28] + (pad_temp.shared[(threadIdx.x + 110)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[29] = (compute[29] + (pad_temp.shared[(threadIdx.x + 128)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[30] = (compute[30] + (pad_temp.shared[(threadIdx.x + 146)]*placeholder.shared[((threadIdx.z*36) + 35)])) # compute[31] = (compute[31] + (pad_temp.shared[(threadIdx.x + 164)]*placeholder.shared[((threadIdx.z*36) + 35)])) # } # } # tensor[(((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x)] = max(((compute[0]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 224)] = max(((compute[1]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 448)] = max(((compute[2]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 672)] = max(((compute[3]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 896)] = max(((compute[4]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1120)] = max(((compute[5]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1344)] = max(((compute[6]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 1568)] = max(((compute[7]*placeholder[(threadIdx.z*4)]) + placeholder[(threadIdx.z*4)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50176)] = max(((compute[8]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50400)] = max(((compute[9]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50624)] = max(((compute[10]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 50848)] = max(((compute[11]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51072)] = max(((compute[12]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51296)] = max(((compute[13]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51520)] = max(((compute[14]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 51744)] = max(((compute[15]*placeholder[((threadIdx.z*4) + 1)]) + placeholder[((threadIdx.z*4) + 1)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100352)] = max(((compute[16]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100576)] = max(((compute[17]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 100800)] = max(((compute[18]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101024)] = max(((compute[19]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101248)] = max(((compute[20]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101472)] = max(((compute[21]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101696)] = max(((compute[22]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 101920)] = max(((compute[23]*placeholder[((threadIdx.z*4) + 2)]) + placeholder[((threadIdx.z*4) + 2)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150528)] = max(((compute[24]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150752)] = max(((compute[25]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 150976)] = max(((compute[26]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151200)] = max(((compute[27]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151424)] = max(((compute[28]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151648)] = max(((compute[29]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 151872)] = max(((compute[30]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # tensor[((((((blockIdx.y*112) + blockIdx.x) + (threadIdx.z*12544))*16) + threadIdx.x) + 152096)] = max(((compute[31]*placeholder[((threadIdx.z*4) + 3)]) + placeholder[((threadIdx.z*4) + 3)]), 0.000000f) # } ###################################################################### # Use cuDNN for a convolutional layer # ----------------------------------- # We can use cuDNN to replace convolution kernels with cuDNN ones. # To do that, all we need to do is to append the option " -libs=cudnn" to the target string. net, params = testing.create_workload(simple_net) target = "cuda -libs=cudnn" # use cudnn for convolution lib = relay.build_module.build(net, target, params=params) dev = tvm.device(target, 0) data = np.random.uniform(-1, 1, size=data_shape).astype("float32") module = runtime.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() out_shape = (batch_size, out_channels, 224, 224) out = module.get_output(0, tvm.nd.empty(out_shape)) out_cudnn = out.numpy() ###################################################################### # Note that if you use cuDNN, Relay cannot fuse convolution with layers following it. # This is because layer fusion happens at the level of TVM internal representation(IR). # Relay treats external libraries as black box, so there is no way to fuse them with TVM IR. # # The pseudo code below shows that cuDNN convolution + bias add + batch norm + ReLU turned into two stages of computation, one for cuDNN call and the other for the rest of operations. # # .. code-block:: text # # // attr [y] storage_scope = "global" # allocate y[float32 * 802816] # produce y { # // attr [0] extern_scope = 0 # tvm_call_packed("tvm.contrib.cudnn.conv2d.forward", 1, 0, 1, 1, 1, 1, 1, 1, 1, tvm_stack_make_array(placeholder, tvm_stack_make_shape(1, 3, 224, 224), 0, 4, 0.000000f, 0), tvm_stack_make_array(placeholder, tvm_stack_make_shape(16, 3, 3, 3), 0, 4, 0.000000f, 0), tvm_stack_make_array(y, tvm_stack_make_shape(1, 16, 224, 224), 0, 4, 0.000000f, 0)) # } # produce tensor { # // attr [iter_var(blockIdx.x, , blockIdx.x)] thread_extent = 256 # // attr [iter_var(threadIdx.x, , threadIdx.x)] thread_extent = 512 # for (ax0.ax1.fused.ax2.fused.ax3.fused.outer, 0, 7) { # if (likely(((blockIdx.x*512) < ((802816 - (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072)) - threadIdx.x)))) { # tensor[(((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/802816)*802816) + (((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/224) % 224)*224) + ((((blockIdx.x*64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*32)) % 224))) + ((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)*50176))] = max(((y[(((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/802816)*802816) + (((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/224) % 224)*224) + ((((blockIdx.x*64) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*32)) % 224))) + ((((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)*50176))]*placeholder[(((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)]) + placeholder[(((((blockIdx.x*512) + threadIdx.x) + (ax0.ax1.fused.ax2.fused.ax3.fused.outer*131072))/50176) % 16)]), 0.000000f) # } # } # } ###################################################################### # Verify the result # ----------------- # We can check that the results of two runs match. tvm.testing.assert_allclose(out_cuda, out_cudnn, rtol=1e-5) ##################################################################### # Conclusion # ---------- # This tutorial covered the usage of cuDNN with Relay. # We also have support for cuBLAS. If cuBLAS is enabled, it will be used inside a fully connected layer (relay.dense). # To use cuBLAS, set a target string as "cuda -libs=cublas". # You can use both cuDNN and cuBLAS with "cuda -libs=cudnn,cublas". # # For ROCm backend, we have support for MIOpen and rocBLAS. # They can be enabled with target "rocm -libs=miopen,rocblas". # # Being able to use external libraries is great, but we need to keep in mind some cautions. # # First, the use of external libraries may restrict your usage of TVM and Relay. # For example, MIOpen only supports NCHW layout and fp32 data type at the moment, so you cannot use other layouts or data type in TVM. # # Second, and more importantly, external libraries restrict the possibility of operator fusion during graph compilation, as shown above. # TVM and Relay aim to achieve the best performance on a variety of hardwares, with joint operator level and graph level optimization. # To achieve this goal, we should continue developing better optimizations for TVM and Relay, while using external libraries as a nice way to fall back to existing implementation when necessary.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_relay/using_pipeline_executor.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Using Pipeline Executor in Relay ================================= **Author**: `Hua Jiang <https://github.com/huajsj>`_ This is a short tutorial on how to use "Pipeline Executor" with Relay. """ import tvm from tvm import te import numpy as np from tvm.contrib import graph_executor as runtime from tvm.relay.op.contrib.cutlass import partition_for_cutlass from tvm import relay from tvm.relay import testing import tvm.testing from tvm.contrib.cutlass import ( has_cutlass, num_cutlass_partitions, finalize_modules, finalize_modules_vm, ) img_size = 8 ####################################################################### # Create a simple network, this network can be a pre-trained model too. # --------------------------------------------------------------------- # Let's create a very simple network for demonstration. # It consists of convolution, batch normalization, dense, and ReLU activation. def get_network(): out_channels = 16 batch_size = 1 data = relay.var("data", relay.TensorType((batch_size, 3, img_size, img_size), "float16")) dense_weight = relay.var( "dweight", relay.TensorType((batch_size, 16 * img_size * img_size), "float16") ) weight = relay.var("weight") second_weight = relay.var("second_weight") bn_gamma = relay.var("bn_gamma") bn_beta = relay.var("bn_beta") bn_mmean = relay.var("bn_mean") bn_mvar = relay.var("bn_var") simple_net = relay.nn.conv2d( data=data, weight=weight, kernel_size=(3, 3), channels=out_channels, padding=(1, 1) ) simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0] simple_net = relay.nn.relu(simple_net) simple_net = relay.nn.batch_flatten(simple_net) simple_net = relay.nn.dense(simple_net, dense_weight) simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net) data_shape = (batch_size, 3, img_size, img_size) net, params = testing.create_workload(simple_net) return net, params, data_shape net, params, data_shape = get_network() ########################################### # Splitting the network into two subgraphs. # ----------------------------------------- # This function called 'graph_split' from a unit test is just an example. User can create a customized logic # to split the graph. import inspect import os tutorial_dir = os.path.dirname(inspect.getfile(lambda: None)) os.sys.path.append(os.path.join(tutorial_dir, "../../../tests/python/relay")) from test_pipeline_executor import graph_split ########################################### # Splitting the network into two subgraphs. split_config = [{"op_name": "nn.relu", "op_index": 0}] subgraphs = graph_split(net["main"], split_config, params) ########################################################### # The generated subgraphs should look something like below. """ #subgraphs[0]) def @main(%data: Tensor[(1, 3, img_size, img_size), float16]) { %0 = nn.conv2d(%data, meta[relay.Constant][0] /* ty=Tensor[(16, 3, 3, 3), float16] */, padding=[1, 1, 1, 1], channels=16, kernel_size=[3, 3]) /* ty=Tensor[(1, 16, img_size, img_size), float16] */; %1 = nn.batch_norm(%0, meta[relay.Constant][1] /* ty=Tensor[(16), float16] */, meta[relay.Constant][2] /* ty=Tensor[(16), float16]*/, meta[relay.Constant][3] /* ty=Tensor[(16), float16] */, meta[relay.Constant][4] /* ty=Tensor[(16), float16] */) /* ty=(Tensor[(1,16, img_size, img_size), float16], Tensor[(16), float16], Tensor[(16), float16]) */; %2 = %1.0; nn.relu(%2) /* ty=Tensor[(1, 16, img_size, img_size), float16] */ } #subgraphs[1] def @main(%data_n_0: Tensor[(1, 16, 8, 8), float16] /* ty=Tensor[(1, 16, 8, 8), float16] */) { %0 = nn.batch_flatten(%data_n_0) /* ty=Tensor[(1, 1024), float16] */; nn.dense(%0, meta[relay.Constant][0] /* ty=Tensor[(1, 1024), float16] */, units=None) /* ty=Tensor[(1, 1), float16] */ } """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ######################################### # Build the subgraph with cutlass target. # --------------------------------------- cutlass = tvm.target.Target( { "kind": "cutlass", "sm": int(tvm.target.Target("cuda").arch.split("_")[1]), "use_3xtf32": True, "split_k_slices": [1], "profile_all_alignments": False, "find_first_valid": True, "use_multiprocessing": True, "use_fast_math": False, "tmp_dir": "./tmp", }, host=tvm.target.Target("llvm"), ) def cutlass_build(mod, target, params=None, target_host=None, mod_name="default"): target = [target, cutlass] lib = relay.build_module.build( mod, target=target, params=params, target_host=target_host, mod_name=mod_name ) return lib ########################################################### # Run the two subgraphs in pipeline with pipeline executor. # --------------------------------------------------------- # Set 'USE_PIPELINE_EXECUTOR' as ON, and set USE_CUTLASS' as ON in cmake. from tvm.contrib import graph_executor, pipeline_executor, pipeline_executor_build ######################################### # Create subgraph pipeline configuration. # Associate a subgraph module with a target. # Use CUTLASS BYOC to build the second subgraph module. mod0, mod1 = subgraphs[0], subgraphs[1] # Use cutlass as the codegen. mod1 = partition_for_cutlass(mod1) ################################################# # Get the pipeline executor configuration object. pipe_config = pipeline_executor_build.PipelineConfig() ########################################################################### # Set the compile target of the subgraph module. pipe_config[mod0].target = "llvm" pipe_config[mod0].dev = tvm.cpu(0) ############################################################## # Set the compile target of the second subgraph module as cuda. pipe_config[mod1].target = "cuda" pipe_config[mod1].dev = tvm.device("cuda", 0) pipe_config[mod1].build_func = cutlass_build pipe_config[mod1].export_cc = "nvcc" # Create the pipeline by connecting the subgraph modules. # The global input will be forwarded to the input interface of the first module named mod0 pipe_config["input"]["data"].connect(pipe_config[mod0]["input"]["data"]) # The first output of mod0 will be forwarded to the input interface of mod1 pipe_config[mod0]["output"][0].connect(pipe_config[mod1]["input"]["data_n_0"]) # The first output of mod1 will be the first global output. pipe_config[mod1]["output"][0].connect(pipe_config["output"][0]) ###################################### # The pipeline configuration as below. """ print(pipe_config) Inputs |data: mod0:data output |output(0) : mod1.output(0) connections |mod0.output(0)-> mod1.data_n_0 """ # sphinx_gallery_start_ignore from tvm import testing # testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ############################## # Build the pipeline executor. # ---------------------------- with tvm.transform.PassContext(opt_level=3): pipeline_mod_factory = pipeline_executor_build.build(pipe_config) ############################################### # Export the parameter configuration to a file. directory_path = tvm.contrib.utils.tempdir().temp_dir os.makedirs(directory_path, exist_ok=True) config_file_name = pipeline_mod_factory.export_library(directory_path) ################################################################ # Use the load function to create and initialize PipelineModule. # -------------------------------------------------------------- pipeline_module = pipeline_executor.PipelineModule.load_library(config_file_name) ############################ # Run the pipeline executor. # -------------------------- # Allocate input data. data = np.random.uniform(-1, 1, size=data_shape).astype("float16") pipeline_module.set_input("data", tvm.nd.array(data)) ########################################################################## # Run the two subgraph in the pipeline mode to get the output asynchronously # or synchronously. In the following example, it is synchronous. pipeline_module.run() outputs = pipeline_module.get_output() ###################################### # Use graph_executor for verification. # ------------------------------------ # Run these two subgraphs in sequence with graph_executor to get the output. target = "llvm" dev0 = tvm.device(target, 0) lib0 = relay.build_module.build(mod0, target, params=params) module0 = runtime.GraphModule(lib0["default"](dev0)) cuda = tvm.target.Target("cuda", host=tvm.target.Target("llvm")) lib1 = relay.build_module.build(mod1, [cuda, cutlass], params=params) lib1 = finalize_modules(lib1, "compile.so", "./tmp") dev1 = tvm.device("cuda", 0) module1 = runtime.GraphModule(lib1["default"](dev1)) module0.set_input("data", data) module0.run() out_shape = (1, 16, img_size, img_size) out = module0.get_output(0, tvm.nd.empty(out_shape, "float16")) module1.set_input("data_n_0", out) module1.run() out_shape = (1, 1) out = module1.get_output(0, tvm.nd.empty(out_shape, "float16")) #################### # Verify the result. tvm.testing.assert_allclose(outputs[0].numpy(), out.numpy())
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_relay/using_relay_viz.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=line-too-long """ Use Relay Visualizer to Visualize Relay ============================================================ **Author**: `Chi-Wei Wang <https://github.com/chiwwang>`_ Relay IR module can contain lots of operations. Although an individual operation is usually easy to understand, putting them together can cause a complicated, hard-to-read graph. Things can get even worse with optimization-passes coming into play. This utility visualizes an IR module as nodes and edges. It defines a set of interfaces including parser, plotter(renderer), graph, node, and edges. A default parser is provided. Users can implement their own renderers to render the graph. Here we use a renderer rendering graph in the text-form. It is a lightweight, AST-like visualizer, inspired by `clang ast-dump <https://clang.llvm.org/docs/IntroductionToTheClangAST.html>`_. We will introduce how to implement customized parsers and renderers through interface classes. For more details, please refer to :py:mod:`tvm.contrib.relay_viz`. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore from typing import ( Dict, Union, Tuple, List, ) import tvm from tvm import relay from tvm.contrib import relay_viz from tvm.contrib.relay_viz.interface import ( VizEdge, VizNode, VizParser, ) from tvm.contrib.relay_viz.terminal import ( TermGraph, TermPlotter, TermVizParser, ) ###################################################################### # Define a Relay IR Module with multiple GlobalVar # ------------------------------------------------ # Let's build an example Relay IR Module containing multiple ``GlobalVar``. # We define an ``add`` function and call it in the main function. data = relay.var("data") bias = relay.var("bias") add_op = relay.add(data, bias) add_func = relay.Function([data, bias], add_op) add_gvar = relay.GlobalVar("AddFunc") input0 = relay.var("input0") input1 = relay.var("input1") input2 = relay.var("input2") add_01 = relay.Call(add_gvar, [input0, input1]) add_012 = relay.Call(add_gvar, [input2, add_01]) main_func = relay.Function([input0, input1, input2], add_012) main_gvar = relay.GlobalVar("main") mod = tvm.IRModule({main_gvar: main_func, add_gvar: add_func}) ###################################################################### # Render the graph with Relay Visualizer on the terminal # ------------------------------------------------------ # The terminal can show a Relay IR module in text similar to clang AST-dump. # We should see ``main`` and ``AddFunc`` function. ``AddFunc`` is called twice in the ``main`` function. viz = relay_viz.RelayVisualizer(mod) viz.render() ###################################################################### # Customize Parser for Interested Relay Types # ------------------------------------------- # Sometimes we want to emphasize interested information, or parse things differently for a specific usage. # It is possible to provide customized parsers as long as it obeys the interface. # Here demonstrate how to customize parsers for ``relay.var``. # We need to implement abstract interface :py:class:`tvm.contrib.relay_viz.interface.VizParser`. class YourAwesomeParser(VizParser): def __init__(self): self._delegate = TermVizParser() def get_node_edges( self, node: relay.Expr, relay_param: Dict[str, tvm.runtime.NDArray], node_to_id: Dict[relay.Expr, str], ) -> Tuple[Union[VizNode, None], List[VizEdge]]: if isinstance(node, relay.Var): node = VizNode(node_to_id[node], "AwesomeVar", f"name_hint {node.name_hint}") # no edge is introduced. So return an empty list. return node, [] # delegate other types to the other parser. return self._delegate.get_node_edges(node, relay_param, node_to_id) ###################################################################### # Pass the parser and an interested renderer to visualizer. # Here we just the terminal renderer. viz = relay_viz.RelayVisualizer(mod, {}, TermPlotter(), YourAwesomeParser()) viz.render() ###################################################################### # Customization around Graph and Plotter # ------------------------------------------- # Besides parsers, we can also customize graph and renderers by implementing # abstract class :py:class:`tvm.contrib.relay_viz.interface.VizGraph` and # :py:class:`tvm.contrib.relay_viz.interface.Plotter`. # Here we override the ``TermGraph`` defined in ``terminal.py`` for easier demo. # We add a hook duplicating above ``AwesomeVar``, and make ``TermPlotter`` use the new class. class AwesomeGraph(TermGraph): def node(self, viz_node): # add the node first super().node(viz_node) # if it's AwesomeVar, duplicate it. if viz_node.type_name == "AwesomeVar": duplicated_id = f"duplicated_{viz_node.identity}" duplicated_type = "double AwesomeVar" super().node(VizNode(duplicated_id, duplicated_type, "")) # connect the duplicated var to the original one super().edge(VizEdge(duplicated_id, viz_node.identity)) # override TermPlotter to use `AwesomeGraph` instead class AwesomePlotter(TermPlotter): def create_graph(self, name): self._name_to_graph[name] = AwesomeGraph(name) return self._name_to_graph[name] viz = relay_viz.RelayVisualizer(mod, {}, AwesomePlotter(), YourAwesomeParser()) viz.render() ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of Relay Visualizer and customization. # The class :py:class:`tvm.contrib.relay_viz.RelayVisualizer` is composed of interfaces # defined in ``interface.py``. # # It is aimed for quick look-then-fix iterations. # The constructor arguments are intended to be simple, while the customization is still # possible through a set of interface classes. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/extern_op.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ External Tensor Functions ========================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ While TVM supports transparent code generation, sometimes it is also helpful to incorporate manual written code into the pipeline. For example, we might want to use cuDNN for some of the convolution kernels and define the rest of the stages. TVM supports these black box function calls natively. Specifically, TVM support all the tensor functions that are DLPack compatible. Which means we can call any function with POD types(pointer, int, float) or pointer to DLTensor as argument. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np from tvm.contrib import cblas import tvm.testing if not tvm.get_global_func("tvm.contrib.cblas.matmul", allow_missing=True): raise Exception("Not compiled with cblas support; can't build this tutorial") ###################################################################### # Use Extern Tensor Function # -------------------------- # In the example below, we use :any:`te.extern` to add an extern # array function call. In the extern call, we declare the shape # of output tensors. In the second argument we provide the list of inputs. # # User will need to provide a function describing how to compute the result. # The compute function takes list of symbolic placeholder for the inputs, # list of symbolic placeholder for the outputs and returns the executing statement. # # In this case we simply call a registered TVM function, which invokes a CBLAS call. # TVM does not control internal of the extern array function and treats it as black-box. # We can further mix schedulable TVM calls that add a bias term to the result. # n = 1024 l = 128 m = 235 bias = te.var("bias", dtype="float32") A = te.placeholder((n, l), name="A") B = te.placeholder((l, m), name="B") C = te.extern( (n, m), [A, B], lambda ins, outs: tvm.tir.call_packed( "tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], False, False ), name="C", ) D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D") s = te.create_schedule(D.op) ###################################################################### # Verify the Result # ----------------- # We can verify that the result matches what we expected. # dev = tvm.cpu(0) f = tvm.build(s, [A, B, D, bias], "llvm") a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev) d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev) bb = 10.0 f(a, b, d, bb) tvm.testing.assert_allclose(d.numpy(), np.dot(a.numpy(), b.numpy()) + 10, rtol=1e-5) ###################################################################### # Extern Contrib Wrappers # ----------------------- # TVM also provide extern contrib wrappers to useful extern calls, # the following line is equivalent to the previous example. # from tvm.contrib import cblas C = cblas.matmul(A, B) D = te.compute(C.shape, lambda i, j: C[i, j] + bias, name="D") s = te.create_schedule(D.op) ###################################################################### # Hook Python Function as Extern # ------------------------------ # Since we can call into any PackedFunc in TVM. We can use the extern # function to callback into python. # # The following example registers a python function into TVM runtime system # and use it to complete one stage of the computation. # This makes TVM much more flexible. For example, we can insert front-end # callbacks to inspect the intermediate results or mix customized code # with TVM. # @tvm.register_func("tvm.contrib.my_tvm_addone") def my_tvm_addone(x, y): print("my_tvm_addone signatures: %s, %s" % (type(x), type(y))) tvm.nd.array(x.numpy() + 1).copyto(y) A = te.placeholder((n,), name="A") B = te.extern( A.shape, [A], lambda ins, outs: tvm.tir.call_packed("tvm.contrib.my_tvm_addone", ins[0], outs[0]), name="C", ) s = te.create_schedule(B.op) f = tvm.build(s, [A, B], "llvm") a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev) f(a, b) tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1, rtol=1e-5) ###################################################################### # Summary # ------- # - TVM calls extern tensor function via :any:`te.extern` # - Use contrib wrappers for short sugars of extern tensor calls. # - We can hook front-end function as extern tensor callbacks. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/intrin_math.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Intrinsics and Math Functions ============================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ While TVM supports basic arithmetic operations. In many cases usually we will need more complicated builtin functions. For example :code:`exp` to take the exponential of the function. These functions are target system dependent and may have different names of different target platforms. In this tutorial, we will learn how we can invoke these target specific functions, and how we can unify the interface via TVM's intrinsic API. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignoreimport numpy as np import tvm from tvm import te from tvm.ir import register_op_attr, register_intrin_lowering ###################################################################### # Direct Declare Extern Math Call # ------------------------------- # The most straight-forward way to call target specific function is via # extern function call construct in tvm. # In the following example, we use :any:`tvm.tir.call_pure_extern` to call # :code:`__expf` function, which is only available under CUDA. # n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: tvm.tir.call_pure_extern("float32", "__expf", A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) f = tvm.build(s, [A, B], "cuda", name="myexp") print(f.imported_modules[0].get_source()) ###################################################################### # Unified Intrinsic Call # ---------------------- # The above code verifies that direct external call can be used to # call into device specific functions. # However, the above way only works for CUDA target with float type. # Ideally, we want to write same code for any device and any data type. # # TVM intrinsic provides the user a mechanism to achieve this, and this # is the recommended way to solve the problem. # The following code use te.exp instead, which create an intrinsic call # :py::func:`tvm.te.exp` to do the exponential. # n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: te.exp(A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) fcuda = tvm.build(s, [A, B], "cuda", name="myexp") print(fcuda.imported_modules[0].get_source()) ###################################################################### # We can find that the code works for both CUDA and opencl. # The same te.exp can also be used for float64 data types. # fopencl = tvm.build(s, [A, B], "opencl", name="myexp") print(fopencl.imported_modules[0].get_source()) ###################################################################### # Intrinsic Lowering Rule # ----------------------- # When :py:func:`tvm.te.exp` is called, TVM creates an intrinsic Call Expr. # TVM uses transformation rules to transform the intrinsic # call to device specific extern calls. # # TVM also allows user to customize the rules during runtime. # The following example customizes CUDA lowering rule for :code:`exp`. # def my_cuda_math_rule(op): """Customized CUDA intrinsic lowering rule""" assert isinstance(op, tvm.tir.Call) name = op.op.name assert name.startswith("tir.") dispatch_name = name[4:] if op.dtype == "float32": # call float function return tvm.tir.call_pure_extern("float32", "%sf" % dispatch_name, op.args[0]) elif op.dtype == "float64": # call double function return tvm.tir.call_pure_extern("float32", dispatch_name, op.args[0]) else: # cannot do translation, return self. return op register_intrin_lowering("tir.exp", target="cuda", f=my_cuda_math_rule, level=99) ###################################################################### # Register the rule to TVM with override option to override existing rule. # Notice the difference between the printed code from previous one: # our new rule uses math function :code:`expf` instead of # fast math version :code:`__expf`. # fcuda = tvm.build(s, [A, B], "cuda", name="myexp") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Add Your Own Intrinsic # ---------------------- # If there is an intrinsic that is not provided by TVM. # User can easily add new intrinsic by using the intrinsic rule system. # The following example add an intrinsic :code:`mylog` to the system. # def mylog(x): """customized log intrinsic function""" return tvm.tir.call_intrin(x.dtype, "tir.mylog", x) def my_cuda_mylog_rule(op): """CUDA lowering rule for log""" if op.dtype == "float32": return tvm.tir.call_pure_extern("float32", "logf", op.args[0]) elif op.dtype == "float64": return tvm.tir.call_pure_extern("float64", "log", op.args[0]) else: return op # new op registration is triggered by registering an attribute of the op register_op_attr("tir.mylog", "TCallEffectKind", tvm.tir.CallEffectKind.Pure) register_intrin_lowering("tir.mylog", target="cuda", f=my_cuda_mylog_rule, level=99) n = te.var("n") A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: mylog(A[i]), name="B") s = te.create_schedule(B.op) num_thread = 64 bx, tx = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) fcuda = tvm.build(s, [A, B], "cuda", name="mylog") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Summary # ------- # - TVM can call extern target dependent math function. # - Use intrinsic to defined a unified interface for the functions. # - For more intrinsics available in tvm, take a look at :any:`tvm.tir` # - You can customize the intrinsic behavior by defining your own rules. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/reduction.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Reduction ========= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ This is an introduction material on how to do reduction in TVM. Associative reduction operators like sum/max/min are typical construction blocks of linear algebra operations. In this tutorial, we will demonstrate how to do reduction in TVM. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te import numpy as np ###################################################################### # Describe Sum of Rows # -------------------- # Assume we want to compute sum of rows as our example. # In numpy semantics this can be written as :code:`B = numpy.sum(A, axis=1)` # # The following lines describe the row sum operation. # To create a reduction formula, we declare a reduction axis using # :any:`te.reduce_axis`. :any:`te.reduce_axis` takes in the range of reductions. # :any:`te.sum` takes in the expression to be reduced as well as the reduction # axis and compute the sum of value over all k in the declared range. # # The equivalent C code is as follows: # # .. code-block:: c # # for (int i = 0; i < n; ++i) { # B[i] = 0; # for (int k = 0; k < m; ++k) { # B[i] = B[i] + A[i][k]; # } # } # n = te.var("n") m = te.var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") ###################################################################### # Schedule the Reduction # ---------------------- # There are several ways to schedule a reduction. # Before doing anything, let us print out the IR code of default schedule. # s = te.create_schedule(B.op) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # You can find that the IR code is quite like the C code. # The reduction axis is similar to a normal axis, it can be splitted. # # In the following code we split both the row axis of B as well # axis by different factors. The result is a nested reduction. # ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) xo, xi = s[B].split(B.op.axis[0], factor=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # If we are building a GPU kernel, we can bind the rows of B to GPU threads. s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # Reduction Factoring and Parallelization # --------------------------------------- # One problem of building a reduction is that we cannot simply # parallelize over the reduction axis. We need to divide the computation # of the reduction, store the local reduction result in a temporal array # before doing a reduction over the temp array. # # The rfactor primitive does such rewrite of the computation. # In the following schedule, the result of B is written to a temporary # result B.rf. The factored dimension becomes the first dimension of B.rf. # s = te.create_schedule(B.op) ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) BF = s.rfactor(B, ki) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # The scheduled operator of B also get rewritten to be sum over # the first axis of reduced result of B.f # print(s[B].op.body) ###################################################################### # Cross Thread Reduction # ---------------------- # We can now parallelize over the factored axis. # Here the reduction axis of B is marked to be a thread. # TVM allows reduction axis to be marked as thread if it is the only # axis in reduction and cross thread reduction is possible in the device. # # This is indeed the case after the factoring. # We can directly compute BF at the reduction axis as well. # The final generated kernel will divide the rows by blockIdx.x and threadIdx.y # columns by threadIdx.x and finally do a cross thread reduction over threadIdx.x # xo, xi = s[B].split(s[B].op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.y")) tx = te.thread_axis("threadIdx.x") s[B].bind(s[B].op.reduce_axis[0], tx) s[BF].compute_at(s[B], s[B].op.reduce_axis[0]) s[B].set_store_predicate(tx.var.equal(0)) fcuda = tvm.build(s, [A, B], "cuda") print(fcuda.imported_modules[0].get_source()) ###################################################################### # Verify the correctness of result kernel by comparing it to numpy. # nn = 128 dev = tvm.cuda(0) a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) fcuda(a, b) tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4) ###################################################################### # Describe Convolution via 2D Reduction # ------------------------------------- # In TVM, we can describe convolution via 2D reduction in a simple way. # Here is an example for 2D convolution with filter size = [3, 3] and strides = [1, 1]. # n = te.var("n") Input = te.placeholder((n, n), name="Input") Filter = te.placeholder((3, 3), name="Filter") di = te.reduce_axis((0, 3), name="di") dj = te.reduce_axis((0, 3), name="dj") Output = te.compute( (n - 2, n - 2), lambda i, j: te.sum(Input[i + di, j + dj] * Filter[di, dj], axis=[di, dj]), name="Output", ) s = te.create_schedule(Output.op) print(tvm.lower(s, [Input, Filter, Output], simple_mode=True)) ###################################################################### # .. _general-reduction: # # Define General Commutative Reduction Operation # ---------------------------------------------- # Besides the built-in reduction operations like :any:`te.sum`, # :any:`tvm.te.min` and :any:`tvm.te.max`, you can also define your # commutative reduction operation by :any:`te.comm_reducer`. # n = te.var("n") m = te.var("m") product = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name="product") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), name="k") B = te.compute((n,), lambda i: product(A[i, k], axis=k), name="B") ###################################################################### # .. note:: # # Sometimes we would like to perform reduction that involves multiple # values like :code:`argmax`, which can be done by tuple inputs. # See :ref:`reduction-with-tuple-inputs` for more detail. ###################################################################### # Summary # ------- # This tutorial provides a walk through of reduction schedule. # # - Describe reduction with reduce_axis. # - Use rfactor to factor out axis if we need parallelism. # - Define new reduction operation by :any:`te.comm_reducer`
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/scan.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Scan and Recurrent Kernel ========================= **Author**: `Tianqi Chen <https://tqchen.github.io>`_ This is an introduction material on how to do recurrent computing in TVM. Recurrent computing is a typical pattern in neural networks. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te import numpy as np ###################################################################### # TVM supports a scan operator to describe symbolic loop. # The following scan op computes cumsum over columns of X. # # The scan is carried over the highest dimension of the tensor. # :code:`s_state` is a placeholder that describes the transition state of the scan. # :code:`s_init` describes how we can initialize the first k timesteps. # Here since s_init's first dimension is 1, it describes how we initialize # The state at first timestep. # # :code:`s_update` describes how to update the value at timestep t. The update # value can refer back to the values of previous timestep via state placeholder. # Note that while it is invalid to refer to :code:`s_state` at current or later timestep. # # The scan takes in state placeholder, initial value and update description. # It is also recommended(although not necessary) to list the inputs to the scan cell. # The result of the scan is a tensor, giving the result of :code:`s_state` after the # update over the time domain. # m = te.var("m") n = te.var("n") X = te.placeholder((m, n), name="X") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: X[0, i]) s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i]) s_scan = tvm.te.scan(s_init, s_update, s_state, inputs=[X]) ###################################################################### # Schedule the Scan Cell # ---------------------- # We can schedule the body of the scan by scheduling the update and # init part separately. Note that it is invalid to schedule the # first iteration dimension of the update part. # To split on the time iteration, user can schedule on scan_op.scan_axis instead. # s = te.create_schedule(s_scan.op) num_thread = 256 block_x = te.thread_axis("blockIdx.x") thread_x = te.thread_axis("threadIdx.x") xo, xi = s[s_init].split(s_init.op.axis[1], factor=num_thread) s[s_init].bind(xo, block_x) s[s_init].bind(xi, thread_x) xo, xi = s[s_update].split(s_update.op.axis[1], factor=num_thread) s[s_update].bind(xo, block_x) s[s_update].bind(xi, thread_x) print(tvm.lower(s, [X, s_scan], simple_mode=True)) ###################################################################### # Build and Verify # ---------------- # We can build the scan kernel like other TVM kernels, here we use # numpy to verify the correctness of the result. # fscan = tvm.build(s, [X, s_scan], "cuda", name="myscan") dev = tvm.cuda(0) n = 1024 m = 10 a_np = np.random.uniform(size=(m, n)).astype(s_scan.dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(np.zeros((m, n), dtype=s_scan.dtype), dev) fscan(a, b) tvm.testing.assert_allclose(b.numpy(), np.cumsum(a_np, axis=0)) ###################################################################### # Multi-Stage Scan Cell # --------------------- # In the above example we described the scan cell using one Tensor # computation stage in s_update. It is possible to use multiple # Tensor stages in the scan cell. # # The following lines demonstrate a scan with two stage operations # in the scan cell. # m = te.var("m") n = te.var("n") X = te.placeholder((m, n), name="X") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: X[0, i]) s_update_s1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] * 2, name="s1") s_update_s2 = te.compute((m, n), lambda t, i: s_update_s1[t, i] + X[t, i], name="s2") s_scan = tvm.te.scan(s_init, s_update_s2, s_state, inputs=[X]) ###################################################################### # These intermediate tensors can also be scheduled normally. # To ensure correctness, TVM creates a group constraint to forbid # the body of scan to be compute_at locations outside the scan loop. # s = te.create_schedule(s_scan.op) xo, xi = s[s_update_s2].split(s_update_s2.op.axis[1], factor=32) s[s_update_s1].compute_at(s[s_update_s2], xo) print(tvm.lower(s, [X, s_scan], simple_mode=True)) ###################################################################### # Multiple States # --------------- # For complicated applications like RNN, we might need more than one # recurrent state. Scan support multiple recurrent states. # The following example demonstrates how we can build recurrence with two states. # m = te.var("m") n = te.var("n") l = te.var("l") X = te.placeholder((m, n), name="X") s_state1 = te.placeholder((m, n)) s_state2 = te.placeholder((m, l)) s_init1 = te.compute((1, n), lambda _, i: X[0, i]) s_init2 = te.compute((1, l), lambda _, i: 0.0) s_update1 = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + X[t, i]) s_update2 = te.compute((m, l), lambda t, i: s_state2[t - 1, i] + s_state1[t - 1, 0]) s_scan1, s_scan2 = tvm.te.scan( [s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2], inputs=[X] ) s = te.create_schedule(s_scan1.op) print(tvm.lower(s, [X, s_scan1, s_scan2], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial provides a walk through of scan primitive. # # - Describe scan with init and update. # - Schedule the scan cells as normal schedule. # - For complicated workload, use multiple states and steps in scan cell.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/schedule_primitives.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _schedule_primitives: Schedule Primitives in TVM ========================== **Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_ TVM is a domain specific language for efficient kernel construction. In this tutorial, we will show you how to schedule the computation by various primitives provided by TVM. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np ###################################################################### # # There often exist several methods to compute the same result, # however, different methods will result in different locality and # performance. So TVM asks user to provide how to execute the # computation called **Schedule**. # # A **Schedule** is a set of transformation of computation that # transforms the loop of computations in the program. # # declare some variables for use later n = te.var("n") m = te.var("m") ###################################################################### # A schedule can be created from a list of ops, by default the # schedule computes tensor in a serial manner in a row-major order. # declare a matrix element-wise multiply A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.compute((m, n), lambda i, j: A[i, j] * B[i, j], name="C") s = te.create_schedule([C.op]) # lower will transform the computation from definition to the real # callable function. With argument `simple_mode=True`, it will # return you a readable C like statement, we use it here to print the # schedule result. print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # One schedule is composed by multiple stages, and one # **Stage** represents schedule for one operation. We provide various # methods to schedule every stage. ###################################################################### # split # ----- # :code:`split` can split a specified axis into two axes by # :code:`factor`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] * 2, name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # You can also split a axis by :code:`nparts`, which splits the axis # contrary with :code:`factor`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i], name="B") s = te.create_schedule(B.op) bx, tx = s[B].split(B.op.axis[0], nparts=32) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # tile # ---- # :code:`tile` help you execute the computation tile by tile over two # axes. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # fuse # ---- # :code:`fuse` can fuse two consecutive axes of one computation. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) # tile to four axes first: (i.outer, j.outer, i.inner, j.inner) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) # then fuse (i.inner, j.inner) into one axis: (i.inner.j.inner.fused) fused = s[B].fuse(xi, yi) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # reorder # ------- # :code:`reorder` can reorder the axes in the specified order. A = te.placeholder((m, n), name="A") B = te.compute((m, n), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) # tile to four axes first: (i.outer, j.outer, i.inner, j.inner) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], x_factor=10, y_factor=5) # then reorder the axes: (i.inner, j.outer, i.outer, j.inner) s[B].reorder(xi, yo, xo, yi) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # bind # ---- # :code:`bind` can bind a specified axis with a thread axis, often used # in gpu programming. A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda i: A[i] * 2, name="B") s = te.create_schedule(B.op) bx, tx = s[B].split(B.op.axis[0], factor=64) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) print(tvm.lower(s, [A, B], simple_mode=True)) ###################################################################### # compute_at # ---------- # For a schedule that consists of multiple operators, TVM will compute # tensors at the root separately by default. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # :code:`compute_at` can move computation of `B` into the first axis # of computation of `C`. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_at(s[C], C.op.axis[0]) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # compute_inline # -------------- # :code:`compute_inline` can mark one stage as inline, then the body of # computation will be expanded and inserted at the address where the # tensor is required. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_inline() print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # compute_root # ------------ # :code:`compute_root` can move computation of one stage to the root. A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") C = te.compute((m,), lambda i: B[i] * 2, name="C") s = te.create_schedule(C.op) s[B].compute_at(s[C], C.op.axis[0]) s[B].compute_root() print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial provides an introduction to schedule primitives in # tvm, which permits users schedule the computation easily and # flexibly. # # In order to get a good performance kernel implementation, the # general workflow often is: # # - Describe your computation via series of operations. # - Try to schedule the computation with primitives. # - Compile and run to see the performance difference. # - Adjust your schedule according the running result.
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/tedd.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Use Tensor Expression Debug Display (TEDD) for Visualization ============================================================ **Author**: `Yongfeng Gu <https://github.com/yongfeng-nv>`_ This is an introduction about using TEDD to visualize tensor expressions. Tensor Expressions are scheduled with primitives. Although individual primitives are usually easy to understand, they become complicated quickly when you put them together. We have introduced an operational model of schedule primitives in Tensor Expression. * the interactions between different schedule primitives, * the impact of the schedule primitives on the final code generation. The operational model is based on a Dataflow Graph, a Schedule Tree and an IterVar Relationship Graph. Schedule primitives perform operations on these graphs. TEDD renders these three graphs from a given schedule. This tutorial demonstrates how to use TEDD and how to interpret the rendered graphs. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te from tvm import topi from tvm.contrib import tedd ###################################################################### # Define and Schedule Convolution with Bias and ReLU # -------------------------------------------------- # Let's build an example Tensor Expression for a convolution followed by Bias and ReLU. # We first connect conv2d, add, and relu TOPIs. Then, we create a TOPI generic schedule. # batch = 1 in_channel = 256 in_size = 32 num_filter = 256 kernel = 3 stride = 1 padding = "SAME" dilation = 1 A = te.placeholder((in_size, in_size, in_channel, batch), name="A") W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W") B = te.placeholder((1, num_filter, 1), name="bias") with tvm.target.Target("llvm"): t_conv = topi.nn.conv2d_hwcn(A, W, stride, padding, dilation) t_bias = topi.add(t_conv, B) t_relu = topi.nn.relu(t_bias) s = topi.generic.schedule_conv2d_hwcn([t_relu]) ###################################################################### # Render Graphs with TEDD # ----------------------- # We render graphs to see the computation # and how it is scheduled. # If you run the tutorial in a Jupyter notebook, you can use the following commented lines # to render SVG figures showing in notebook directly. # tedd.viz_dataflow_graph(s, dot_file_path="/tmp/dfg.dot") # tedd.viz_dataflow_graph(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_dfg.png # :align: center # # The first one is a dataflow graph. Every node represents a stage with name and memory # scope shown in the middle and inputs/outputs information on the sides. # Edges show nodes' dependency. # tedd.viz_schedule_tree(s, dot_file_path="/tmp/scheduletree.dot") # tedd.viz_schedule_tree(s, show_svg = True) ###################################################################### # We just rendered the schedule tree graph. You may notice an warning about ranges not # available. # The message also suggests to call normalize() to infer range information. We will # skip inspecting the first schedule tree and encourage you to compare the graphs before # and after normalize() for its impact. # s = s.normalize() tedd.viz_schedule_tree(s, dot_file_path="/tmp/scheduletree2.dot") # tedd.viz_schedule_tree(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_st.png # :align: center # # Now, let us take a close look at the second schedule tree. Every block under ROOT # represents a # stage. Stage name shows in the top row and compute shows in the bottom row. # The middle rows are for IterVars, the higher the outer, the lower the inner. # An IterVar row contains its index, name, type, and other optional information. # Let's use the W.shared stage as an example. The top row tells # its name, "W.shared", and memory scope, "Shared". Its compute is # :code:`W(ax0, ax1, ax2, ax3)`. # Its outer most loop IterVar is ax0.ax1.fused.ax2.fused.ax3.fused.outer, # indexed with 0, of kDataPar, bound to threadIdx.y, and with range(min=0, ext=8). # You can also tell # IterVar type with the index box color, shown in the legend. # # If a stage doesn't compute_at any other stage, it has an edge directly to the # ROOT node. Otherwise, it has an edge pointing to the IterVar it attaches to, # such as W.shared attaches to rx.outer in the middle compute stage. # ###################################################################### # .. note:: # # By definition, IterVars are internal nodes and computes are leaf nodes in # a schedule tree. The edges among IterVars and compute within one stage are # omitted, making every stage a block, for better readability. # tedd.viz_itervar_relationship_graph(s, dot_file_path="/tmp/itervar.dot") # tedd.viz_itervar_relationship_graph(s, show_svg = True) ###################################################################### # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tedd_itervar_rel.png # :align: center # # The last one is an IterVar Relationship Graph. Every subgraph represents a # stage and contains IterVar nodes and transformation nodes. For example, # W.shared has three split nodes and three fuse nodes. The rest are IterVar # nodes of the same format as the IterVar rows in Schedule Trees. Root # IterVars are those not driven by any transformation node, such as ax0; leaf # IterVars don't drive any transformation node and have non-negative indices, # such as ax0.ax1.fused.ax2.fused.ax3.fused.outer with index of 0. # ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of TEDD. We use an example built # with TOPI to show the schedules under the hood. You can also use # it before and after any schedule primitive to inspect its effect. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/tensorize.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorials-tensorize: Use Tensorize to Leverage Hardware Intrinsics ============================================= **Author**: `Yizhi Liu <https://github.com/yzhliu>`_ This is an introduction material on how to perform tensorization in TVM. By using schedule primitive :code:`tensorize`, people can replace a unit of computation with the corresponding intrinsics, making it easy to leverage handcrafted micro-kernels, as well as extend TVM to support new hardware architectures. The purpose of this tutorial is to show the functionality and usage of tensorize instead of providing an efficient solution. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te import tvm.testing import numpy as np ###################################################################### # Define Matrix Multiplication # ---------------------------- # Take matrix multiplication as our example. # Matmul first multiply the corresponding elements between two matrix, # then accumulate across a certain axis. # The following lines describe the computation :code:`A * B^T` in TVM. # N, M, L = 1024, 512, 64 A = te.placeholder((N, L), name="A") B = te.placeholder((M, L), name="B") k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[j, k], axis=k), name="C") s = te.create_schedule(C.op) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Schedule the Matmul # ------------------- # Now, suppose we have an accelerator that supports # matrix-vector multiplication (GEMV) as a hardware primitive, # which can take arbitrary size of reduce axis, # but another axis needs to be no larger than 16. # Thus we break down the matmul loops to make the innermost loops a (16x64) GEMV. # factor = 16 x, y = C.op.axis (z,) = C.op.reduce_axis yo, yi = s[C].split(y, factor=factor) s[C].reorder(x, yo, yi, z) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # As showed in the IR printed above, # the inner loops :code:`j.inner` along with :code:`k` together form a computation of GEMV # - within the inner most two loops, the index :code:`i` is fixed, # the access to the matrix :code:`A` only varies by :code:`k`, # which makes the access pattern of :code:`A` a "vector". # In order to leverage our hypothetical hardware's GEMV instruction, # we can tensorize over :code:`j.inner`. # # Define GEMV Tensorization Intrinsic # ----------------------------------- # Before scheduling the tensorization, we need to first define the intrinsic function for GEMV. # It includes two parts, the first is a compute definition of GEMV. # TVM uses it to match the computing pattern in the original Matmul schedule. # The second is to specify how to execute GEMV on the device, # which is done in :code:`intrin_func` below. # def intrin_gemv(m, l): a = te.placeholder((l,), name="a") b = te.placeholder((m, l), name="b") k = te.reduce_axis((0, l), name="k") c = te.compute((m,), lambda i: te.sum(a[k] * b[i, k], axis=k), name="c") Ab = tvm.tir.decl_buffer(a.shape, a.dtype, name="A", offset_factor=1, strides=[1]) Bb = tvm.tir.decl_buffer(b.shape, b.dtype, name="B", offset_factor=1, strides=[te.var("s1"), 1]) Cb = tvm.tir.decl_buffer(c.shape, c.dtype, name="C", offset_factor=1, strides=[1]) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() aa, bb = ins cc = outs[0] ib.emit( tvm.tir.call_extern( "int32", "gemv_update", cc.access_ptr("w"), aa.access_ptr("r"), bb.access_ptr("r"), m, l, bb.strides[0], ) ) return ib.get() return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb}) ###################################################################### # Here :code:`te.decl_tensor_intrin` declares how to execute the computation :code:`c.op`. # Our implementation simply takes the inputs and outputs, # converts them to pointers and emit an external function call. # Note that tensorization requires user to specify :code:`offset_factor`, # with this information, TVM has knowledge of whether the data is aligned # between the start address of the original data structure # and the offset being passed to tensorize, # so that it has chance to optimize with vectorized loading. # We set the factor to 1 for simplification. # # Buffers are also declared for inputs and outputs, though this is not required, # we benefit from the extra information provided by buffers. For example, we pass # :code:`bb.strides[0]` as an argument to the external function :code:`gemv_update`. # For now :code:`bb.strides[0] == l`, # but later we will see how they can differ with more complicated schedules. # # Note that we use :code:`te.var("s1")` as the first stride dimension for :code:`B`. # If the strides can be inferred # - in this case, TVM knows tensor B is compact thus the strides are :code:`[L, 1]` - # such placeholder can be put to let TVM automatically bind the inferred value for us. # gemv = intrin_gemv(factor, L) s[C].tensorize(yi, gemv) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # By tensorizing over :code:`yi`, the inner most two loops are # now replaced by the intrinsic function we defined before. # In order to build and run the module, let's define the external function :code:`gemv_update`, # it is a naive implementation of GEMV, just for demonstration. # def gemv_impl(): cc_code = """ extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) { for (int i = 0; i < m; ++i) { for (int j = 0; j < l; ++j) { cc[i] += aa[j] * bb[i * stride + j]; } } return 0; } """ from tvm.contrib import utils, clang temp = utils.tempdir() ll_path = temp.relpath("temp.ll") # Create LLVM ir from c source code ll_code = clang.create_llvm(cc_code, output=ll_path) return ll_code ###################################################################### # Now we leverage the pragma attribute :code:`import_llvm` to import llvm asm inline. # The importing needs to happen before the tensorized GEMV being executed. # s[C].pragma(x, "import_llvm", gemv_impl()) print(tvm.lower(s, [A, B, C], simple_mode=True)) ###################################################################### # Finally we compare the tensorize version with that :code:`numpy.dot` produces, # ensure our implementation is correct. # func = tvm.build(s, [A, B, C], target="llvm", name="gemv") from tvm.topi.utils import get_const_tuple dtype = A.dtype dev = tvm.device("cpu", 0) a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype) b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Reduce-update for Tensorize # --------------------------- # So far you have learned the basic idea of tensorize, # now let's move one step forward to a more complicated case. # # Assume our accelerator could only multiply a vector by a square matrix, # in which the vector size needs to be no larger than 16. # Given such hardware constrain, now we need to split the reduce axis as following, # zo, zi = s[C].split(z, factor=factor) s[C].reorder(x, yo, zo, yi, zi) ###################################################################### # However, since the tensorize intrinsic now only covers a part of the reduce axis, # instead of using one "body" function, TVM requires a :code:`reduce_reset` function, # which will be invoked before the reduce for-loop, and a :code:`reduce_update` function, # which defines the "update" computing strategy. # def gemv_impl(): cc_code = """ extern "C" int gemv_update(float *cc, float *aa, float *bb, int m, int l, int stride) { for (int i = 0; i < m; ++i) { for (int j = 0; j < l; ++j) { cc[i] += aa[j] * bb[i * stride + j]; } } return 0; } extern "C" int gemv_reset(float *cc, int m) { for (int i = 0; i < m; ++i) { cc[i] = 0.0; } return 0; } """ from tvm.contrib import utils, clang temp = utils.tempdir() ll_path = temp.relpath("temp.ll") # Create LLVM ir from c source code ll_code = clang.create_llvm(cc_code, output=ll_path) return ll_code def intrin_gemv(m, l): a = te.placeholder((l,), name="a") b = te.placeholder((m, l), name="b") k = te.reduce_axis((0, l), name="k") c = te.compute((m,), lambda i: te.sum(a[k] * b[i, k], axis=k), name="c") Ab = tvm.tir.decl_buffer(a.shape, a.dtype, name="A", offset_factor=1, strides=[1]) Bb = tvm.tir.decl_buffer(b.shape, b.dtype, name="B", offset_factor=1, strides=[te.var("s1"), 1]) Cb = tvm.tir.decl_buffer(c.shape, c.dtype, name="C", offset_factor=1, strides=[1]) def intrin_func(ins, outs): aa, bb = ins cc = outs[0] def _body(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( "int32", "gemv_update", cc.access_ptr("w"), aa.access_ptr("r"), bb.access_ptr("r"), m, l, bb.strides[0], ) ) return ib.get() def _reduce_reset(): ib = tvm.tir.ir_builder.create() ib.emit(tvm.tir.call_extern("int32", "gemv_reset", cc.access_ptr("w"), m)) return ib.get() def _reduce_update(): return _body() return _body(), _reduce_reset(), _reduce_update() return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, b: Bb, c: Cb}) ###################################################################### # Note that :code:`intrin_func` now returns a triplet: # :code:`(body, reduce_reset, reduce_update)`. # If tensorization includes all the reduce axes, function :code:`body()` will be invoked, # otherwise :code:`reduce_reset()` and :code:`reduce_update()` together will be used. # In our example :code:`body()` and :code:`reduce_update()` # share the same implementation, # while in other cases, hardware may have different instructions for these two functions. # Moreover, we can see now :code:`bb.strides[0]` is different from :code:`l` # due to the tiling. # # Tensorize for squared GEMV, build and check the results, # gemv = intrin_gemv(factor, factor) s[C].tensorize(yi, gemv) s[C].pragma(yo, "import_llvm", gemv_impl()) func = tvm.build(s, [A, B, C], target="llvm", name="gemv") a = np.random.uniform(size=get_const_tuple(A.shape)).astype(dtype) b = np.random.uniform(size=get_const_tuple(B.shape)).astype(dtype) c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=dtype), dev) func(tvm.nd.array(a, dev), tvm.nd.array(b, dev), c) tvm.testing.assert_allclose(c.numpy(), np.dot(a, b.T), rtol=1e-3) ###################################################################### # Summary # ------- # This tutorial demonstrates the usage of tensorize intrinsic in TVM. # Tensorize provides a way for users to get fully optimized schedule via micro-kernels. # For example, INT8 quantization on Intel CPUs uses tensorization # to invoke AVX instruction directly. # It also enables TVM to compile to ASICs - # checkout :ref:`vta-index` for details. # We also demonstrates how to use inline assembly importing, # which helps users inject asm easily into the schedule. #
https://github.com/zk-ml/tachikoma
gallery/how_to/work_with_schedules/tuple_inputs.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compute and Reduce with Tuple Inputs ======================================= **Author**: `Ziheng Jiang <https://github.com/ZihengJiang>`_ Often we want to compute multiple outputs with the same shape within a single loop or perform reduction that involves multiple values like :code:`argmax`. These problems can be addressed by tuple inputs. In this tutorial, we will introduce the usage of tuple inputs in TVM. """ from __future__ import absolute_import, print_function # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm import te import numpy as np ###################################################################### # Describe Batchwise Computation # ------------------------------ # For operators which have the same shape, we can put them together as # the inputs of :any:`te.compute`, if we want them to be scheduled # together in the next schedule procedure. # n = te.var("n") m = te.var("m") A0 = te.placeholder((m, n), name="A0") A1 = te.placeholder((m, n), name="A1") B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name="B") # The generated IR code would be: s = te.create_schedule(B0.op) print(tvm.lower(s, [A0, A1, B0, B1], simple_mode=True)) ###################################################################### # .. _reduction-with-tuple-inputs: # # Describe Reduction with Collaborative Inputs # -------------------------------------------- # Sometimes, we require multiple inputs to express some reduction # operators, and the inputs will collaborate together, e.g. :code:`argmax`. # In the reduction procedure, :code:`argmax` need to compare the value of # operands, also need to keep the index of operand. It can be expressed # with :py:func:`te.comm_reducer` as below: # x and y are the operands of reduction, both of them is a tuple of index # and value. def fcombine(x, y): lhs = tvm.tir.Select((x[1] >= y[1]), x[0], y[0]) rhs = tvm.tir.Select((x[1] >= y[1]), x[1], y[1]) return lhs, rhs # our identity element also need to be a tuple, so `fidentity` accepts # two types as inputs. def fidentity(t0, t1): return tvm.tir.const(-1, t0), tvm.te.min_value(t1) argmax = te.comm_reducer(fcombine, fidentity, name="argmax") # describe the reduction computation m = te.var("m") n = te.var("n") idx = te.placeholder((m, n), name="idx", dtype="int32") val = te.placeholder((m, n), name="val", dtype="int32") k = te.reduce_axis((0, n), "k") T0, T1 = te.compute((m,), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name="T") # the generated IR code would be: s = te.create_schedule(T0.op) print(tvm.lower(s, [idx, val, T0, T1], simple_mode=True)) ###################################################################### # .. note:: # # For ones who are not familiar with reduction, please refer to # :ref:`general-reduction`. ###################################################################### # Schedule Operation with Tuple Inputs # ------------------------------------ # It is worth mentioning that although you will get multiple outputs # with one batch operation, but they can only be scheduled together # in terms of operation. n = te.var("n") m = te.var("m") A0 = te.placeholder((m, n), name="A0") B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A0[i, j] * 3), name="B") A1 = te.placeholder((m, n), name="A1") C = te.compute((m, n), lambda i, j: A1[i, j] + B0[i, j], name="C") s = te.create_schedule(C.op) s[B0].compute_at(s[C], C.op.axis[0]) # as you can see in the below generated IR code: print(tvm.lower(s, [A0, A1, C], simple_mode=True)) ###################################################################### # Summary # ------- # This tutorial introduces the usage of tuple inputs operation. # # - Describe normal batchwise computation. # - Describe reduction operation with tuple inputs. # - Notice that you can only schedule computation in terms of operation instead of tensor.
https://github.com/zk-ml/tachikoma
gallery/tutorial/auto_scheduler_matmul_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Optimizing Operators with Auto-scheduling ========================================= **Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, \ `Chengfan Jia <https://github.com/jcf94/>`_ In this tutorial, we will show how TVM's Auto Scheduling feature can find optimal schedules without the need for writing a custom template. Different from the template-based :doc:`AutoTVM <autotvm_matmul_x86>` which relies on manual templates to define the search space, the auto-scheduler does not require any templates. Users only need to write the computation declaration without any schedule commands or templates. The auto-scheduler can automatically generate a large search space and find a good schedule in the space. We use matrix multiplication as an example in this tutorial. .. note:: Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import numpy as np import tvm from tvm import te, auto_scheduler ################################################################################ # Defining the Matrix Multiplication # ---------------------------------- # To start, we define a matrix multiplication with a bias addition. Note that # this uses standard operations available in TVMs Tensor Expression language. # The major difference is the use of the :any:`register_workload` decorator at the top # of the function definition. The function should return a list of # input/output tensors. From these tensors, the auto-scheduler can get the # whole computational graph. @auto_scheduler.register_workload # Note the auto_scheduler decorator def matmul_add(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) C = te.placeholder((N, M), name="C", dtype=dtype) k = te.reduce_axis((0, L), name="k") matmul = te.compute( (N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="matmul", attrs={"layout_free_placeholders": [B]}, # enable automatic layout transform for tensor B ) out = te.compute((N, M), lambda i, j: matmul[i, j] + C[i, j], name="out") return [A, B, C, out] ################################################################################ # Create the search task # ---------------------- # With the function defined, we can now create the task for the auto_scheduler # to search against. We specify the particular parameters for this matrix # multiplication, in this case a multiplication of two square matrices of size # 1024x1024. We then create a search task with N=L=M=1024 and dtype="float32" # # .. admonition:: Improve performance with custom targets # # In order for TVM to take full advantage of specific hardware platforms, # you will want to manually specify your CPU capabilities. For example: # # - replace ``llvm`` below with ``llvm -mcpu=core-avx2`` to enable AVX2 # - replace ``llvm`` below with ``llvm -mcpu=skylake-avx512`` to enable AVX-512 target = tvm.target.Target("llvm") N = L = M = 1024 task = tvm.auto_scheduler.SearchTask(func=matmul_add, args=(N, L, M, "float32"), target=target) # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) ################################################################################ # Set Parameters for Auto-Scheduler # --------------------------------- # Next, we set parameters for the auto-scheduler. # # * :code:`num_measure_trials` is the number of measurement trials we can use # during the search. We only make 10 trials in this tutorial for a fast # demonstration. In practice, 1000 is a good value for the search to converge. # You can do more trials according to your time budget. # * In addition, we use :any:`RecordToFile <auto_scheduler.RecordToFile>` to log measurement records into a # file ``matmul.json``. The measurement records can be used to query the history # best, resume the search, and do more analyses later. # * see :any:`TuningOptions <auto_scheduler.TuningOptions>` for more parameters log_file = "matmul.json" tune_option = auto_scheduler.TuningOptions( num_measure_trials=10, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], verbose=2, ) ################################################################################ # Run the search # -------------- # Now we get all inputs ready. Pretty simple, isn't it? We can kick off the # search and let the auto-scheduler do its magic. After some measurement # trials, we can load the best schedule from the log file and apply it. # Run auto-tuning (search) task.tune(tune_option) # Apply the best schedule sch, args = task.apply_best(log_file) ################################################################################ # Inspecting the Optimized Schedule # --------------------------------- # We can lower the schedule to see the IR after auto-scheduling. The # auto-scheduler correctly performs optimizations including multi-level tiling, # layout transformation, parallelization, vectorization, unrolling, and # operator fusion. print("Lowered TIR:") print(tvm.lower(sch, args, simple_mode=True)) ################################################################################ # Check correctness and evaluate performance # ------------------------------------------ # We build the binary and check its correctness and performance. func = tvm.build(sch, args, target) a_np = np.random.uniform(size=(N, L)).astype(np.float32) b_np = np.random.uniform(size=(L, M)).astype(np.float32) c_np = np.random.uniform(size=(N, M)).astype(np.float32) out_np = a_np.dot(b_np) + c_np dev = tvm.cpu() a_tvm = tvm.nd.array(a_np, device=dev) b_tvm = tvm.nd.array(b_np, device=dev) c_tvm = tvm.nd.array(c_np, device=dev) out_tvm = tvm.nd.empty(out_np.shape, device=dev) func(a_tvm, b_tvm, c_tvm, out_tvm) # Check results np.testing.assert_allclose(out_np, out_tvm.numpy(), rtol=1e-3) # Evaluate execution time. evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500) print( "Execution time of this operator: %.3f ms" % (np.median(evaluator(a_tvm, b_tvm, c_tvm, out_tvm).results) * 1000) ) ################################################################################ # Using the record file # --------------------- # During the search, all measurement records are logged into the record file # ``matmul.json```. The measurement records can be used to re-apply search # results, resume the search, and perform other analyses. # # Here is an example where we load the best schedule from a file, and print the # equivalent python schedule API. This can be used for debugging and learning # the behavior of the auto-scheduler. print("Equivalent python schedule:") print(task.print_best(log_file)) ################################################################################ # A more complicated example is to resume the search. In this case, we need to # create the search policy and cost model by ourselves and resume the status of # search policy and cost model with the log file. In the example below we # resume the status and do more 5 trials. def resume_search(task, log_file): print("Resume search:") cost_model = auto_scheduler.XGBModel() cost_model.update_from_file(log_file) search_policy = auto_scheduler.SketchPolicy( task, cost_model, init_search_callbacks=[auto_scheduler.PreloadMeasuredStates(log_file)] ) tune_option = auto_scheduler.TuningOptions( num_measure_trials=5, measure_callbacks=[auto_scheduler.RecordToFile(log_file)] ) task.tune(tune_option, search_policy=search_policy) resume_search(task, log_file) ################################################################################ # Final Notes and Summary # ----------------------- # In this tutorial, we have shown how to use the TVM Auto-Scheduler to # automatically optimize a matrix multiplication, without the need to specify a # search template. It ends a series of examples that starts from the Tensor # Expression (TE) language that demonstrates how TVM can optimize computational # operations.
https://github.com/zk-ml/tachikoma
gallery/tutorial/autotvm_matmul_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-autotvm-matmul-x86: Optimizing Operators with Schedule Templates and AutoTVM ======================================================== **Authors**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Chris Hoge <https://github.com/hogepodge>`_ In this tutorial, we show how the TVM Tensor Expression (TE) language can be used to write schedule templates that can be searched by AutoTVM to find the optimal schedule. This process is called Auto-Tuning, which helps automate the process of optimizing tensor computation. This tutorial builds on the previous :doc:`tutorial on how to write a matrix multiplication using TE <tensor_expr_get_started>`. There are two steps in auto-tuning. - The first step is defining a search space. - The second step is running a search algorithm to explore through this space. In this tutorial, you can learn how to perform these two steps in TVM. The whole workflow is illustrated by a matrix multiplication example. .. note:: Note that this tutorial will not run on Windows or recent versions of macOS. To get it to run, you will need to wrap the body of this tutorial in a :code:`if __name__ == "__main__":` block. """ ################################################################################ # Install dependencies # -------------------- # To use autotvm package in TVM, we need to install some extra dependencies. # # .. code-block:: bash # # pip3 install --user psutil xgboost cloudpickle # # To make TVM run faster in tuning, it is recommended to use cython as FFI of # TVM. In the root directory of TVM, execute: # # .. code-block:: bash # # pip3 install --user cython # sudo make cython3 # # Now return to python code. Begin by importing the required packages. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import logging import sys import numpy as np import tvm from tvm import te import tvm.testing # the module is called `autotvm` from tvm import autotvm ################################################################################ # Basic Matrix Multiplication with TE # ----------------------------------- # Recall the basic implementation of matrix multiplication using TE. We write # it down here with a few changes. We will wrap the multiplication in a python # function definition. For simplicity, we will focus our attention on a split # optimization, using a fixed value that defines the block size of the # reordering. def matmul_basic(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] yo, yi = s[C].split(y, 8) xo, xi = s[C].split(x, 8) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # Matrix Multiplication with AutoTVM # ---------------------------------- # In the previous schedule code, we use a constant "8" as the tiling factor. # However, it might not be the best one because the best tiling factor depends # on real hardware environment and input shape. # # If you want the schedule code to be portable across a wider range of input # shapes and target hardware, it is better to define a set of candidate values # and pick the best one according to the measurement results on target # hardware. # # In autotvm, we can define a tunable parameter, or a "knob" for such kind of # value. ################################################################################ # A Basic Matrix Multiplication Template # -------------------------------------- # We begin with an example of how to create a tunable parameter set for the # block size of the `split` scheduling operation. # Matmul V1: List candidate values @autotvm.template("tutorial/matmul_v1") # 1. use a decorator def matmul_v1(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] # 2. get the config object cfg = autotvm.get_config() # 3. define search space cfg.define_knob("tile_y", [1, 2, 4, 8, 16]) cfg.define_knob("tile_x", [1, 2, 4, 8, 16]) # 4. schedule according to config yo, yi = s[C].split(y, cfg["tile_y"].val) xo, xi = s[C].split(x, cfg["tile_x"].val) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # Here we make four modifications to the previous schedule code and get a # tunable "template". We can explain the modifications one by one. # # 1. Use a decorator to mark this function as a simple template. # 2. Get a config object: You can regard this :code:`cfg` as an argument of # this function but we obtain it in a different way. With this argument, this # function is no longer a deterministic schedule. Instead, we can pass # different configurations to this function and get different schedules. A # function that uses a configuration object like this is called a "template". # # To make the template function more compact, we can do two things to define # the parameter search space within a single function. # # 1. Define a search space across a set values. This is done by making # :code:`cfg` a :any:`ConfigSpace` object. It will collect all of the # tunable knobs in this function and build a search space from it. # 2. Schedule according to an entity in this space. This is done by making # :code:`cfg` a :any:`ConfigEntity` object. When it is a # :any:`ConfigEntity`, it will ignore all space definition API (namely, # :code:`cfg.define_XXXXX(...)`). Instead, it will store deterministic # values for all tunable knobs, and we schedule according to these values. # # During auto-tuning, we will first call this template with a # :any:`ConfigSpace` object to build the search space. Then we call this # template with different :any:`ConfigEntity` in the built space to get # different schedules. Finally we will measure the code generated by # different schedules and pick the best one. # # 3. Define two tunable knobs. The first one is :code:`tile_y` with 5 possible # values. The second one is :code:`tile_x` with a same list of possible values. # These two knobs are independent, so they span a search space with size 25 = # 5x5. # 4. The configuration knobs are passed to the :code:`split` schedule # operation, allowing us to schedule according to the 5x5 deterministic values # we previously defined in :code:`cfg`. ################################################################################ # A Matrix Multiplication Template with the Advanced Parameter API # ---------------------------------------------------------------- # In the previous template, we manually listed all of the possible values for a # knob. This is the lowest level API to define the space, and gives an explicit # enumeration of the parameter space to search. However, we also provide # another set of APIs that can make the definition of the search space easier # and smarter. Where possible, we recommend you use this higher-level API # # In the following example, we use :any:`ConfigSpace.define_split` to define a # split knob. It will enumerate all the possible ways to split an axis and # construct the space. # # We also have :any:`ConfigSpace.define_reorder` for reorder knob and # :any:`ConfigSpace.define_annotate` for annotation like unroll, vectorization, # thread binding. When the high level API cannot meet your requirements, you # can always fall back to using the low level API. @autotvm.template("tutorial/matmul") def matmul(N, L, M, dtype): A = te.placeholder((N, L), name="A", dtype=dtype) B = te.placeholder((L, M), name="B", dtype=dtype) k = te.reduce_axis((0, L), name="k") C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name="C") s = te.create_schedule(C.op) # schedule y, x = s[C].op.axis k = s[C].op.reduce_axis[0] ##### define space begin ##### cfg = autotvm.get_config() cfg.define_split("tile_y", y, num_outputs=2) cfg.define_split("tile_x", x, num_outputs=2) ##### define space end ##### # schedule according to config yo, yi = cfg["tile_y"].apply(s, C, y) xo, xi = cfg["tile_x"].apply(s, C, x) s[C].reorder(yo, xo, k, yi, xi) return s, [A, B, C] ################################################################################ # .. admonition:: More Explanation on :code:`cfg.define_split` # # In this template, :code:`cfg.define_split("tile_y", y, num_outputs=2)` will # enumerate all possible combinations that can split axis y into two axes with # factors of the length of y. For example, if the length of y is 32 and we # want to split it into two axes using factors of 32, then there are 6 # possible values for (length of outer axis, length of inner axis) pair, # namely (32, 1), (16, 2), (8, 4), (4, 8), (2, 16) or (1, 32). These are all 6 # possible values of `tile_y`. # # During scheduling, :code:`cfg["tile_y"]` is a :code:`SplitEntity` object. # We stores the lengths of outer axes and inner axes in # :code:`cfg['tile_y'].size` (a tuple with two elements). In this template, # we apply it by using :code:`yo, yi = cfg['tile_y'].apply(s, C, y)`. # Actually, this is equivalent to :code:`yo, yi = s[C].split(y, # cfg["tile_y"].size[1])` or :code:`yo, yi = s[C].split(y, # nparts=cfg['tile_y"].size[0])` # # The advantage of using cfg.apply API is that it makes multi-level splits # (that is, when num_outputs >= 3) easier. ################################################################################ # Step 2: Use AutoTVM to Optimize the Matrix Multiplication # --------------------------------------------------------- # In Step 1, we wrote a matrix multiplication template that allowed us to # parameterize the block size used in the `split` schedule. We can now conduct # a search over this parameter space. The next step is to pick a tuner to guide # the exploration of this space. # # Auto-tuners in TVM # ~~~~~~~~~~~~~~~~~~ # The job for a tuner can be described by following pseudo code # # .. code-block:: c # # ct = 0 # while ct < max_number_of_trials: # propose a batch of configs # measure this batch of configs on real hardware and get results # ct += batch_size # # When proposing the next batch of configs, the tuner can take different # strategies. Some of the tuner strategies provided by TVM include: # # * :any:`tvm.autotvm.tuner.RandomTuner`: Enumerate the space in a random order # * :any:`tvm.autotvm.tuner.GridSearchTuner`: Enumerate the space in a grid search order # * :any:`tvm.autotvm.tuner.GATuner`: Using genetic algorithm to search through the space # * :any:`tvm.autotvm.tuner.XGBTuner`: Uses a model based method. Train a XGBoost model to # predict the speed of lowered IR and pick the next batch according to the # prediction. # # You can choose the tuner according to the size of your space, your time # budget and other factors. For example, if your space is very small (less # than 1000), a grid-search tuner or a random tuner is good enough. If your # space is at the level of 10^9 (this is the space size of a conv2d operator on # CUDA GPU), XGBoostTuner can explore more efficiently and find better configs. ################################################################################ # Begin tuning # ~~~~~~~~~~~~ # Here we continue our matrix multiplication example. First we create a tuning # task. We can also inspect the initialized search space. In this case, for a # 512x512 square matrix multiplication, the space size is 10x10=100 Note that # the task and search space are independent of the tuner picked. N, L, M = 512, 512, 512 task = autotvm.task.create("tutorial/matmul", args=(N, L, M, "float32"), target="llvm") print(task.config_space) ################################################################################ # Then we need to define how to measure the generated code and pick a tuner. # Since our space is small, a random tuner is just okay. # # We only make 10 trials in this tutorial for demonstration. In practice, you # can do more trials according to your time budget. We will log the tuning # results into a log file. This file can be used to choose the best # configuration discovered by the tuner later. # logging config (for printing tuning log to the screen) logging.getLogger("autotvm").setLevel(logging.DEBUG) logging.getLogger("autotvm").addHandler(logging.StreamHandler(sys.stdout)) ################################################################################ # There are two steps for measuring a config: build and run. By default, we use # all CPU cores to compile program. We then measure them sequentially. To help # reduce variance, we take 5 measurements and average them. measure_option = autotvm.measure_option(builder="local", runner=autotvm.LocalRunner(number=5)) # Begin tuning with RandomTuner, log records to file `matmul.log` # You can use alternatives like XGBTuner. tuner = autotvm.tuner.RandomTuner(task) tuner.tune( n_trial=10, measure_option=measure_option, callbacks=[autotvm.callback.log_to_file("matmul.log")], ) ################################################################################ # With tuning completed, we can choose the configuration from the log file that # has the best measured performance and compile the schedule with the # corresponding parameters. We also do a quick verification that the schedule is # producing correct answers. We can call the function :code:`matmul` directly # under the :any:`autotvm.apply_history_best` context. When we call this # function, it will query the dispatch context with its argument and get the # best config with the same argument. # apply history best from log file with autotvm.apply_history_best("matmul.log"): with tvm.target.Target("llvm"): s, arg_bufs = matmul(N, L, M, "float32") func = tvm.build(s, arg_bufs) # check correctness a_np = np.random.uniform(size=(N, L)).astype(np.float32) b_np = np.random.uniform(size=(L, M)).astype(np.float32) c_np = a_np.dot(b_np) c_tvm = tvm.nd.empty(c_np.shape) func(tvm.nd.array(a_np), tvm.nd.array(b_np), c_tvm) tvm.testing.assert_allclose(c_np, c_tvm.numpy(), rtol=1e-4) ################################################################################ # Final Notes and Summary # ----------------------- # In this tutorial, we have shown how to build operator templates that allow # TVM to search a parameter space and choose optimized schedule configurations. # To gain a deeper understanding of how this works, we recommend expanding on # this example by adding new search parameters to the schedule based on # schedule operations demonstrated in the :ref: `Getting Started With Tensor # Expressions <tensor_expr_get_started>_` tutorial. In the upcoming sections, we # will demonstrate the AutoScheduler, a method for TVM to optimize common # operators without the need for the user to provide a user-defined template.
https://github.com/zk-ml/tachikoma
gallery/tutorial/autotvm_relay_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compiling and Optimizing a Model with the Python Interface (AutoTVM) ==================================================================== **Author**: `Chris Hoge <https://github.com/hogepodge>`_ In the `TVMC Tutorial <tvmc_command_line_driver>`_, we covered how to compile, run, and tune a pre-trained vision model, ResNet-50 v2 using the command line interface for TVM, TVMC. TVM is more that just a command-line tool though, it is an optimizing framework with APIs available for a number of different languages that gives you tremendous flexibility in working with machine learning models. In this tutorial we will cover the same ground we did with TVMC, but show how it is done with the Python API. Upon completion of this section, we will have used the Python API for TVM to accomplish the following tasks: * Compile a pre-trained ResNet-50 v2 model for the TVM runtime. * Run a real image through the compiled model, and interpret the output and model performance. * Tune the model that model on a CPU using TVM. * Re-compile an optimized model using the tuning data collected by TVM. * Run the image through the optimized model, and compare the output and model performance. The goal of this section is to give you an overview of TVM's capabilites and how to use them through the Python API. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # TVM is a deep learning compiler framework, with a number of different modules # available for working with deep learning models and operators. In this # tutorial we will work through how to load, compile, and optimize a model # using the Python API. # # We begin by importing a number of dependencies, including ``onnx`` for # loading and converting the model, helper utilities for downloading test data, # the Python Image Library for working with the image data, ``numpy`` for pre # and post-processing of the image data, the TVM Relay framework, and the TVM # Graph Executor. import onnx from tvm.contrib.download import download_testdata from PIL import Image import numpy as np import tvm.relay as relay import tvm from tvm.contrib import graph_executor ################################################################################ # Downloading and Loading the ONNX Model # -------------------------------------- # # For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a # convolutional neural network that is 50 layers deep and designed to classify # images. The model we will be using has been pre-trained on more than a # million images with 1000 different classifications. The network has an input # image size of 224x224. If you are interested exploring more of how the # ResNet-50 model is structured, we recommend downloading # `Netron <https://netron.app>`_, a freely available ML model viewer. # # TVM provides a helper library to download pre-trained models. By providing a # model URL, file name, and model type through the module, TVM will download # the model and save it to disk. For the instance of an ONNX model, you can # then load it into memory using the ONNX runtime. # # .. admonition:: Working with Other Model Formats # # TVM supports many popular model formats. A list can be found in the # :ref:`Compile Deep Learning Models <tutorial-frontend>` section of the TVM # Documentation. model_url = ( "https://github.com/onnx/models/raw/main/" "vision/classification/resnet/model/" "resnet50-v2-7.onnx" ) model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx") onnx_model = onnx.load(model_path) # Seed numpy's RNG to get consistent results np.random.seed(0) ################################################################################ # Downloading, Preprocessing, and Loading the Test Image # ------------------------------------------------------ # # Each model is particular when it comes to expected tensor shapes, formats and # data types. For this reason, most models require some pre and # post-processing, to ensure the input is valid and to interpret the output. # TVMC has adopted NumPy's ``.npz`` format for both input and output data. # # As input for this tutorial, we will use the image of a cat, but you can feel # free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center # # Download the image data, then convert it to a numpy array to use as an input to the model. img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg" img_path = download_testdata(img_url, "imagenet_cat.png", module="data") # Resize it to 224x224 resized_image = Image.open(img_path).resize((224, 224)) img_data = np.asarray(resized_image).astype("float32") # Our input image is in HWC layout while ONNX expects CHW input, so convert the array img_data = np.transpose(img_data, (2, 0, 1)) # Normalize according to the ImageNet input specification imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1)) imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1)) norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev # Add the batch dimension, as we are expecting 4-dimensional input: NCHW. img_data = np.expand_dims(norm_img_data, axis=0) ############################################################################### # Compile the Model With Relay # ---------------------------- # # The next step is to compile the ResNet model. We begin by importing the model # to relay using the `from_onnx` importer. We then build the model, with # standard optimizations, into a TVM library. Finally, we create a TVM graph # runtime module from the library. target = "llvm" ###################################################################### # .. admonition:: Defining the Correct Target # # Specifying the correct target can have a huge impact on the performance of # the compiled module, as it can take advantage of hardware features # available on the target. For more information, please refer to # :ref:`Auto-tuning a convolutional network for x86 CPU <tune_relay_x86>`. # We recommend identifying which CPU you are running, along with optional # features, and set the target appropriately. For example, for some # processors ``target = "llvm -mcpu=skylake"``, or ``target = "llvm # -mcpu=skylake-avx512"`` for processors with the AVX-512 vector instruction # set. # # The input name may vary across model types. You can use a tool # like Netron to check input names input_name = "data" shape_dict = {input_name: img_data.shape} mod, params = relay.frontend.from_onnx(onnx_model, shape_dict) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) ###################################################################### # Execute on the TVM Runtime # -------------------------- # Now that we've compiled the model, we can use the TVM runtime to make # predictions with it. To use TVM to run the model and make predictions, we # need two things: # # - The compiled model, which we just produced. # - Valid input to the model to make predictions on. dtype = "float32" module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() ################################################################################ # Collect Basic Performance Data # ------------------------------ # We want to collect some basic performance data associated with this # unoptimized model and compare it to a tuned model later. To help account for # CPU noise, we run the computation in multiple batches in multiple # repetitions, then gather some basis statistics on the mean, median, and # standard deviation. import timeit timing_number = 10 timing_repeat = 10 unoptimized = ( np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number)) * 1000 / timing_number ) unoptimized = { "mean": np.mean(unoptimized), "median": np.median(unoptimized), "std": np.std(unoptimized), } print(unoptimized) ################################################################################ # Postprocess the output # ---------------------- # # As previously mentioned, each model will have its own particular way of # providing output tensors. # # In our case, we need to run some post-processing to render the outputs from # ResNet-50 v2 into a more human-readable form, using the lookup-table provided # for the model. from scipy.special import softmax # Download a list of labels labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt" labels_path = download_testdata(labels_url, "synset.txt", module="data") with open(labels_path, "r") as f: labels = [l.rstrip() for l in f] # Open the output and read the output tensor scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] for rank in ranks[0:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ # This should produce the following output: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610553 # # class='n02123159 tiger cat' with probability=0.367179 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Tune the model # -------------- # The previous model was compiled to work on the TVM runtime, but did not # include any platform specific optimization. In this section, we will show you # how to build an optimized model using TVM to target your working platform. # # In some cases, we might not get the expected performance when running # inferences using our compiled module. In cases like this, we can make use of # the auto-tuner, to find a better configuration for our model and get a boost # in performance. Tuning in TVM refers to the process by which a model is # optimized to run faster on a given target. This differs from training or # fine-tuning in that it does not affect the accuracy of the model, but only # the runtime performance. As part of the tuning process, TVM will try running # many different operator implementation variants to see which perform best. # The results of these runs are stored in a tuning records file. # # In the simplest form, tuning requires you to provide three things: # # - the target specification of the device you intend to run this model on # - the path to an output file in which the tuning records will be stored # - a path to the model to be tuned. # import tvm.auto_scheduler as auto_scheduler from tvm.autotvm.tuner import XGBTuner from tvm import autotvm ################################################################################ # Set up some basic parameters for the runner. The runner takes compiled code # that is generated with a specific set of parameters and measures the # performance of it. ``number`` specifies the number of different # configurations that we will test, while ``repeat`` specifies how many # measurements we will take of each configuration. ``min_repeat_ms`` is a value # that specifies how long need to run configuration test. If the number of # repeats falls under this time, it will be increased. This option is necessary # for accurate tuning on GPUs, and is not required for CPU tuning. Setting this # value to 0 disables it. The ``timeout`` places an upper limit on how long to # run training code for each tested configuration. number = 10 repeat = 1 min_repeat_ms = 0 # since we're tuning on a CPU, can be set to 0 timeout = 10 # in seconds # create a TVM runner runner = autotvm.LocalRunner( number=number, repeat=repeat, timeout=timeout, min_repeat_ms=min_repeat_ms, enable_cpu_cache_flush=True, ) ################################################################################ # Create a simple structure for holding tuning options. We use an XGBoost # algorithim for guiding the search. For a production job, you will want to set # the number of trials to be larger than the value of 20 used here. For CPU we # recommend 1500, for GPU 3000-4000. The number of trials required can depend # on the particular model and processor, so it's worth spending some time # evaluating performance across a range of values to find the best balance # between tuning time and model optimization. Because running tuning is time # intensive we set number of trials to 10, but do not recommend a value this # small. The ``early_stopping`` parameter is the minimum number of trails to # run before a condition that stops the search early can be applied. The # measure option indicates where trial code will be built, and where it will be # run. In this case, we're using the ``LocalRunner`` we just created and a # ``LocalBuilder``. The ``tuning_records`` option specifies a file to write # the tuning data to. tuning_option = { "tuner": "xgb", "trials": 20, "early_stopping": 100, "measure_option": autotvm.measure_option( builder=autotvm.LocalBuilder(build_func="default"), runner=runner ), "tuning_records": "resnet-50-v2-autotuning.json", } ################################################################################ # .. admonition:: Defining the Tuning Search Algorithm # # By default this search is guided using an `XGBoost Grid` algorithm. # Depending on your model complexity and amount of time available, you might # want to choose a different algorithm. ################################################################################ # .. admonition:: Setting Tuning Parameters # # In this example, in the interest of time, we set the number of trials and # early stopping to 10. You will likely see more performance improvements if # you set these values to be higher but this comes at the expense of time # spent tuning. The number of trials required for convergence will vary # depending on the specifics of the model and the target platform. # begin by extracting the tasks from the onnx model tasks = autotvm.task.extract_from_program(mod["main"], target=target, params=params) # Tune the extracted tasks sequentially. for i, task in enumerate(tasks): prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) tuner_obj = XGBTuner(task, loss_type="rank") tuner_obj.tune( n_trial=min(tuning_option["trials"], len(task.config_space)), early_stopping=tuning_option["early_stopping"], measure_option=tuning_option["measure_option"], callbacks=[ autotvm.callback.progress_bar(tuning_option["trials"], prefix=prefix), autotvm.callback.log_to_file(tuning_option["tuning_records"]), ], ) ################################################################################ # The output from this tuning process will look something like this: # # .. code-block:: bash # # # [Task 1/24] Current/Best: 10.71/ 21.08 GFLOPS | Progress: (60/1000) | 111.77 s Done. # # [Task 1/24] Current/Best: 9.32/ 24.18 GFLOPS | Progress: (192/1000) | 365.02 s Done. # # [Task 2/24] Current/Best: 22.39/ 177.59 GFLOPS | Progress: (960/1000) | 976.17 s Done. # # [Task 3/24] Current/Best: 32.03/ 153.34 GFLOPS | Progress: (800/1000) | 776.84 s Done. # # [Task 4/24] Current/Best: 11.96/ 156.49 GFLOPS | Progress: (960/1000) | 632.26 s Done. # # [Task 5/24] Current/Best: 23.75/ 130.78 GFLOPS | Progress: (800/1000) | 739.29 s Done. # # [Task 6/24] Current/Best: 38.29/ 198.31 GFLOPS | Progress: (1000/1000) | 624.51 s Done. # # [Task 7/24] Current/Best: 4.31/ 210.78 GFLOPS | Progress: (1000/1000) | 701.03 s Done. # # [Task 8/24] Current/Best: 50.25/ 185.35 GFLOPS | Progress: (972/1000) | 538.55 s Done. # # [Task 9/24] Current/Best: 50.19/ 194.42 GFLOPS | Progress: (1000/1000) | 487.30 s Done. # # [Task 10/24] Current/Best: 12.90/ 172.60 GFLOPS | Progress: (972/1000) | 607.32 s Done. # # [Task 11/24] Current/Best: 62.71/ 203.46 GFLOPS | Progress: (1000/1000) | 581.92 s Done. # # [Task 12/24] Current/Best: 36.79/ 224.71 GFLOPS | Progress: (1000/1000) | 675.13 s Done. # # [Task 13/24] Current/Best: 7.76/ 219.72 GFLOPS | Progress: (1000/1000) | 519.06 s Done. # # [Task 14/24] Current/Best: 12.26/ 202.42 GFLOPS | Progress: (1000/1000) | 514.30 s Done. # # [Task 15/24] Current/Best: 31.59/ 197.61 GFLOPS | Progress: (1000/1000) | 558.54 s Done. # # [Task 16/24] Current/Best: 31.63/ 206.08 GFLOPS | Progress: (1000/1000) | 708.36 s Done. # # [Task 17/24] Current/Best: 41.18/ 204.45 GFLOPS | Progress: (1000/1000) | 736.08 s Done. # # [Task 18/24] Current/Best: 15.85/ 222.38 GFLOPS | Progress: (980/1000) | 516.73 s Done. # # [Task 19/24] Current/Best: 15.78/ 203.41 GFLOPS | Progress: (1000/1000) | 587.13 s Done. # # [Task 20/24] Current/Best: 30.47/ 205.92 GFLOPS | Progress: (980/1000) | 471.00 s Done. # # [Task 21/24] Current/Best: 46.91/ 227.99 GFLOPS | Progress: (308/1000) | 219.18 s Done. # # [Task 22/24] Current/Best: 13.33/ 207.66 GFLOPS | Progress: (1000/1000) | 761.74 s Done. # # [Task 23/24] Current/Best: 53.29/ 192.98 GFLOPS | Progress: (1000/1000) | 799.90 s Done. # # [Task 24/24] Current/Best: 25.03/ 146.14 GFLOPS | Progress: (1000/1000) | 1112.55 s Done. ################################################################################ # Compiling an Optimized Model with Tuning Data # ---------------------------------------------- # # As an output of the tuning process above, we obtained the tuning records # stored in ``resnet-50-v2-autotuning.json``. The compiler will use the results to # generate high performance code for the model on your specified target. # # Now that tuning data for the model has been collected, we can re-compile the # model using optimized operators to speed up our computations. with autotvm.apply_history_best(tuning_option["tuning_records"]): with tvm.transform.PassContext(opt_level=3, config={}): lib = relay.build(mod, target=target, params=params) dev = tvm.device(str(target), 0) module = graph_executor.GraphModule(lib["default"](dev)) ################################################################################ # Verify that the optimized model runs and produces the same results: dtype = "float32" module.set_input(input_name, img_data) module.run() output_shape = (1, 1000) tvm_output = module.get_output(0, tvm.nd.empty(output_shape)).numpy() scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] for rank in ranks[0:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ # Verifying that the predictions are the same: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610550 # # class='n02123159 tiger cat' with probability=0.367181 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Comparing the Tuned and Untuned Models # -------------------------------------- # We want to collect some basic performance data associated with this optimized # model to compare it to the unoptimized model. Depending on your underlying # hardware, number of iterations, and other factors, you should see a performance # improvement in comparing the optimized model to the unoptimized model. import timeit timing_number = 10 timing_repeat = 10 optimized = ( np.array(timeit.Timer(lambda: module.run()).repeat(repeat=timing_repeat, number=timing_number)) * 1000 / timing_number ) optimized = {"mean": np.mean(optimized), "median": np.median(optimized), "std": np.std(optimized)} print("optimized: %s" % (optimized)) print("unoptimized: %s" % (unoptimized)) ################################################################################ # Final Remarks # ------------- # # In this tutorial, we gave a short example of how to use the TVM Python API # to compile, run, and tune a model. We also discussed the need for pre and # post-processing of inputs and outputs. After the tuning process, we # demonstrated how to compare the performance of the unoptimized and optimize # models. # # Here we presented a simple example using ResNet-50 v2 locally. However, TVM # supports many more features including cross-compilation, remote execution and # profiling/benchmarking.
https://github.com/zk-ml/tachikoma
gallery/tutorial/cross_compilation_and_rpc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-cross-compilation-and-rpc: Cross Compilation and RPC ========================= **Author**: `Ziheng Jiang <https://github.com/ZihengJiang/>`_, `Lianmin Zheng <https://github.com/merrymercy/>`_ This tutorial introduces cross compilation and remote device execution with RPC in TVM. With cross compilation and RPC, you can **compile a program on your local machine then run it on the remote device**. It is useful when the remote device resource are limited, like Raspberry Pi and mobile platforms. In this tutorial, we will use the Raspberry Pi for a CPU example and the Firefly-RK3399 for an OpenCL example. """ ###################################################################### # Build TVM Runtime on Device # --------------------------- # # The first step is to build the TVM runtime on the remote device. # # .. note:: # # All instructions in both this section and the next section should be # executed on the target device, e.g. Raspberry Pi. We assume the target # is running Linux. # # Since we do compilation on the local machine, the remote device is only used # for running the generated code. We only need to build the TVM runtime on # the remote device. # # .. code-block:: bash # # git clone --recursive https://github.com/apache/tvm tvm # cd tvm # make runtime -j2 # # After building the runtime successfully, we need to set environment variables # in :code:`~/.bashrc` file. We can edit :code:`~/.bashrc` # using :code:`vi ~/.bashrc` and add the line below (Assuming your TVM # directory is in :code:`~/tvm`): # # .. code-block:: bash # # export PYTHONPATH=$PYTHONPATH:~/tvm/python # # To update the environment variables, execute :code:`source ~/.bashrc`. ###################################################################### # Set Up RPC Server on Device # --------------------------- # To start an RPC server, run the following command on your remote device # (Which is Raspberry Pi in this example). # # .. code-block:: bash # # python -m tvm.exec.rpc_server --host 0.0.0.0 --port=9090 # # If you see the line below, it means the RPC server started # successfully on your device. # # .. code-block:: bash # # INFO:root:RPCServer: bind to 0.0.0.0:9090 # ###################################################################### # Declare and Cross Compile Kernel on Local Machine # ------------------------------------------------- # # .. note:: # # Now we go back to the local machine, which has a full TVM installed # (with LLVM). # # Here we will declare a simple kernel on the local machine: # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import numpy as np import tvm from tvm import te from tvm import rpc from tvm.contrib import utils n = tvm.runtime.convert(1024) A = te.placeholder((n,), name="A") B = te.compute((n,), lambda i: A[i] + 1.0, name="B") s = te.create_schedule(B.op) ###################################################################### # Then we cross compile the kernel. # The target should be 'llvm -mtriple=armv7l-linux-gnueabihf' for # Raspberry Pi 3B, but we use 'llvm' here to make this tutorial runnable # on our webpage building server. See the detailed note in the following block. local_demo = True if local_demo: target = "llvm" else: target = "llvm -mtriple=armv7l-linux-gnueabihf" func = tvm.build(s, [A, B], target=target, name="add_one") # save the lib at a local temp folder temp = utils.tempdir() path = temp.relpath("lib.tar") func.export_library(path) ###################################################################### # .. note:: # # To run this tutorial with a real remote device, change :code:`local_demo` # to False and replace :code:`target` in :code:`build` with the appropriate # target triple for your device. The target triple which might be # different for different devices. For example, it is # :code:`'llvm -mtriple=armv7l-linux-gnueabihf'` for Raspberry Pi 3B and # :code:`'llvm -mtriple=aarch64-linux-gnu'` for RK3399. # # Usually, you can query the target by running :code:`gcc -v` on your # device, and looking for the line starting with :code:`Target:` # (Though it may still be a loose configuration.) # # Besides :code:`-mtriple`, you can also set other compilation options # like: # # * -mcpu=<cpuname> # Specify a specific chip in the current architecture to generate code for. By default this is inferred from the target triple and autodetected to the current architecture. # * -mattr=a1,+a2,-a3,... # Override or control specific attributes of the target, such as whether SIMD operations are enabled or not. The default set of attributes is set by the current CPU. # To get the list of available attributes, you can do: # # .. code-block:: bash # # llc -mtriple=<your device target triple> -mattr=help # # These options are consistent with `llc <http://llvm.org/docs/CommandGuide/llc.html>`_. # It is recommended to set target triple and feature set to contain specific # feature available, so we can take full advantage of the features of the # board. # You can find more details about cross compilation attributes from # `LLVM guide of cross compilation <https://clang.llvm.org/docs/CrossCompilation.html>`_. ###################################################################### # Run CPU Kernel Remotely by RPC # ------------------------------ # We show how to run the generated CPU kernel on the remote device. # First we obtain an RPC session from remote device. if local_demo: remote = rpc.LocalSession() else: # The following is my environment, change this to the IP address of your target device host = "10.77.1.162" port = 9090 remote = rpc.connect(host, port) ###################################################################### # Upload the lib to the remote device, then invoke a device local # compiler to relink them. Now `func` is a remote module object. remote.upload(path) func = remote.load_module("lib.tar") # create arrays on the remote device dev = remote.cpu() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) # the function will run on the remote device func(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) ###################################################################### # When you want to evaluate the performance of the kernel on the remote # device, it is important to avoid the overhead of network. # :code:`time_evaluator` will returns a remote function that runs the # function over number times, measures the cost per run on the remote # device and returns the measured cost. Network overhead is excluded. time_f = func.time_evaluator(func.entry_name, dev, number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) ######################################################################### # Run OpenCL Kernel Remotely by RPC # --------------------------------- # For remote OpenCL devices, the workflow is almost the same as above. # You can define the kernel, upload files, and run via RPC. # # .. note:: # # Raspberry Pi does not support OpenCL, the following code is tested on # Firefly-RK3399. You may follow this `tutorial <https://gist.github.com/mli/585aed2cec0b5178b1a510f9f236afa2>`_ # to setup the OS and OpenCL driver for RK3399. # # Also we need to build the runtime with OpenCL enabled on rk3399 board. In the TVM # root directory, execute # # .. code-block:: bash # # cp cmake/config.cmake . # sed -i "s/USE_OPENCL OFF/USE_OPENCL ON/" config.cmake # make runtime -j4 # # The following function shows how we run an OpenCL kernel remotely def run_opencl(): # NOTE: This is the setting for my rk3399 board. You need to modify # them according to your environment. opencl_device_host = "10.77.1.145" opencl_device_port = 9090 target = tvm.target.Target("opencl", host="llvm -mtriple=aarch64-linux-gnu") # create schedule for the above "add one" compute declaration s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) func = tvm.build(s, [A, B], target=target) remote = rpc.connect(opencl_device_host, opencl_device_port) # export and upload path = temp.relpath("lib_cl.tar") func.export_library(path) remote.upload(path) func = remote.load_module("lib_cl.tar") # run dev = remote.cl() a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev) func(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) print("OpenCL test passed!") ###################################################################### # Summary # ------- # This tutorial provides a walk through of cross compilation and RPC # features in TVM. # # - Set up an RPC server on the remote device. # - Set up the target device configuration to cross compile the kernels on the # local machine. # - Upload and run the kernels remotely via the RPC API.
https://github.com/zk-ml/tachikoma
gallery/tutorial/install.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Installing TVM ============== **Authors**: `Jocelyn Shiue <https://github.com/>`_, `Chris Hoge <https://github.com/hogepodge>`_ Depending on your needs and your working environment, there are a few different methods for installing TVM. These include: * Installing from source * Installing from third-party binary package. """ ################################################################################ # Installing From Source # ---------------------- # Installing from source is the recommended method for installing TVM. It will # allow you to enable specific features such as GPU support, microcontroller # support (microTVM), and a debugging runtime, and other features. You will also # want to install from source if you want to actively contribute to the TVM # project. The full instructions are on the :ref:`Install TVM From Source # <install-from-source>` page. ################################################################################ # Installing From Binary Packages # -------------------------------- # You may install convenient third party binary package distributions to # quickly try things out. TLCPack is a third party volunteer community that # builds binary packages from TVM source. It offers a support matrix with # instructions to install on different platforms, with different features. # Check out `TLCPack <https://tlcpack.ai>`_ to learn more. Note that the # third party binary packages could contain additional licensing terms for # the hardware drivers that are bundled with it. # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore
https://github.com/zk-ml/tachikoma
gallery/tutorial/intro_topi.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-topi: Introduction to TOPI ==================== **Author**: `Ehsan M. Kermani <https://github.com/ehsanmok>`_ This is an introductory tutorial to TVM Operator Inventory (TOPI). TOPI provides numpy-style generic operations and schedules with higher abstractions than TVM. In this tutorial, we will see how TOPI can save us from writing boilerplate code in TVM. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm import tvm.testing from tvm import te from tvm import topi import numpy as np ###################################################################### # Basic example # ------------- # Let's revisit the sum of rows operation (equivalent to :code:`B = numpy.sum(A, axis=1)`') \ # To compute the sum of rows of a two dimensional TVM tensor A, we should # specify the symbolic operation as well as schedule as follows # n = te.var("n") m = te.var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") s = te.create_schedule(B.op) ###################################################################### # and to examine the IR code in human readable format, we can do # print(tvm.lower(s, [A], simple_mode=True)) ###################################################################### # However, for such a common operation we had to define the reduce axis ourselves as well as explicit computation with # :code:`te.compute`. Imagine for more complicated operations how much details we need to provide. # Fortunately, we can replace those two lines with simple :code:`topi.sum` much like :code:`numpy.sum` # C = topi.sum(A, axis=1) ts = te.create_schedule(C.op) print(tvm.lower(ts, [A], simple_mode=True)) ###################################################################### # Numpy-style operator overloading # -------------------------------- # We can add two tensors using :code:`topi.broadcast_add` that have correct (broadcastable with specific) shapes. # Even shorter, TOPI provides operator overloading for such common operations. For example, # x, y = 100, 10 a = te.placeholder((x, y, y), name="a") b = te.placeholder((y, y), name="b") c = a + b # same as topi.broadcast_add d = a * b # same as topi.broadcast_mul ###################################################################### # Overloaded with the same syntax, TOPI handles broadcasting a primitive (`int`, `float`) to a tensor :code:`d - 3.14`. ###################################################################### # Generic schedules and fusing operations # --------------------------------------- # Up to now, we have seen an example of how TOPI can save us from writing explicit computations in lower level API. # But it doesn't stop here. Still we did the scheduling as before. TOPI also provides higher level # scheduling recipes depending on a given context. For example, for CUDA, # we can schedule the following series of operations ending with :code:`topi.sum` using only # :code:`topi.generic.schedule_reduce` # e = topi.elemwise_sum([c, d]) f = e / 2.0 g = topi.sum(f) with tvm.target.cuda(): sg = topi.cuda.schedule_reduce(g) print(tvm.lower(sg, [a, b], simple_mode=True)) ###################################################################### # As you can see, scheduled stages of computation have been accumulated and we can examine them by # print(sg.stages) ###################################################################### # We can test the correctness by comparing with :code:`numpy` result as follows # func = tvm.build(sg, [a, b, g], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(x, y, y)).astype(a.dtype) b_np = np.random.uniform(size=(y, y)).astype(b.dtype) g_np = np.sum(np.add(a_np + b_np, a_np * b_np) / 2.0) a_nd = tvm.nd.array(a_np, dev) b_nd = tvm.nd.array(b_np, dev) g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev) func(a_nd, b_nd, g_nd) tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-5) ###################################################################### # TOPI also provides common neural nets operations such as _softmax_ with optimized schedule # tarray = te.placeholder((512, 512), name="tarray") softmax_topi = topi.nn.softmax(tarray) with tvm.target.Target("cuda"): sst = topi.cuda.schedule_softmax(softmax_topi) print(tvm.lower(sst, [tarray], simple_mode=True)) ###################################################################### # Fusing convolutions # ------------------- # We can fuse :code:`topi.nn.conv2d` and :code:`topi.nn.relu` together. # # .. note:: # # TOPI functions are all generic functions. They have different implementations # for different backends to optimize for performance. # For each backend, it is necessary to call them under a target scope for both # compute declaration and schedule. TVM will choose the right function to call with # the target information. data = te.placeholder((1, 3, 224, 224)) kernel = te.placeholder((10, 3, 5, 5)) with tvm.target.Target("cuda"): conv = topi.cuda.conv2d_nchw(data, kernel, 1, 2, 1) out = topi.nn.relu(conv) sconv = topi.cuda.schedule_conv2d_nchw([out]) print(tvm.lower(sconv, [data, kernel], simple_mode=True)) ###################################################################### # Summary # ------- # In this tutorial, we have seen # # - How to use TOPI API for common operations with numpy-style operators. # - How TOPI facilitates generic schedules and operator fusion for a context, to generate optimized kernel codes.
https://github.com/zk-ml/tachikoma
gallery/tutorial/introduction.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Introduction ============ **Authors**: `Jocelyn Shiue <https://github.com/>`_, `Chris Hoge <https://github.com/hogepodge>`_, `Lianmin Zheng <https://github.com/merrymercy>`_ Apache TVM is an open source machine learning compiler framework for CPUs, GPUs, and machine learning accelerators. It aims to enable machine learning engineers to optimize and run computations efficiently on any hardware backend. The purpose of this tutorial is to take a guided tour through all of the major features of TVM by defining and demonstrating key concepts. A new user should be able to work through the tutorial from start to finish and be able to operate TVM for automatic model optimization, while having a basic understanding of the TVM architecture and how it works. Contents -------- #. :doc:`Introduction <introduction>` #. :doc:`Installing TVM <install>` #. :doc:`Compiling and Optimizing a Model with the Command Line Interface <tvmc_command_line_driver>` #. :doc:`Compiling and Optimizing a Model with the Python Interface <autotvm_relay_x86>` #. :doc:`Working with Operators Using Tensor Expression <tensor_expr_get_started>` #. :doc:`Optimizing Operators with Templates and AutoTVM <autotvm_matmul_x86>` #. :doc:`Optimizing Operators with Template-free AutoScheduler <auto_scheduler_matmul_x86>` #. :doc:`Cross Compilation and Remote Procedure Calls (RPC) <cross_compilation_and_rpc>` #. :doc:`Compiling Deep Learning Models for GPUs <relay_quick_start>` """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # An Overview of TVM and Model Optimization # ========================================= # # The diagram below illustrates the steps a machine model takes as it is # transformed with the TVM optimizing compiler framework. # # .. image:: https://raw.githubusercontent.com/apache/tvm-site/main/images/tutorial/overview.png # :width: 100% # :alt: A High Level View of TVM # # 1. Import the model from a framework like *Tensorflow*, *PyTorch*, or *Onnx*. # The importer layer is where TVM can ingest models from other frameworks, like # Tensorflow, PyTorch, or ONNX. The level of support that TVM offers for each # frontend varies as we are constantly improving the open source project. If # you're having issues importing your model into TVM, you may want to try # converting it to ONNX. # # 2. Translate to *Relay*, TVM's high-level model language. # A model that has been imported into TVM is represented in Relay. Relay is a # functional language and intermediate representation (IR) for neural networks. # It has support for: # # - Traditional data flow-style representations # - Functional-style scoping, let-binding which makes it a fully featured # differentiable language # - Ability to allow the user to mix the two programming styles # # Relay applies graph-level optimization passes to optimize the model. # # 3. Lower to *Tensor Expression* (TE) representation. Lowering is when a # higher-level representation is transformed into a lower-level # representation. After applying the high-level optimizations, Relay # runs FuseOps pass to partition the model into many small subgraphs and lowers # the subgraphs to TE representation. Tensor Expression (TE) is a # domain-specific language for describing tensor computations. # TE also provides several *schedule* primitives to specify low-level loop # optimizations, such as tiling, vectorization, parallelization, # unrolling, and fusion. # To aid in the process of converting Relay representation into TE representation, # TVM includes a Tensor Operator Inventory (TOPI) that has pre-defined # templates of common tensor operators (e.g., conv2d, transpose). # # 4. Search for the best schedule using the auto-tuning module *AutoTVM* or *AutoScheduler*. # A schedule specifies the low-level loop optimizations for an operator or # subgraph defined in TE. Auto-tuning modules search for the best schedule # and compare them with cost models and on-device measurements. # There are two auto-tuning modules in TVM. # # - **AutoTVM**: A template-based auto-tuning module. It runs search algorithms # to find the best values for the tunable knobs in a user-defined template. # For common operators, their templates are already provided in TOPI. # - **AutoScheduler (a.k.a. Ansor)**: A template-free auto-tuning module. # It does not require pre-defined schedule templates. Instead, it generates # the search space automatically by analyzing the computation definition. # It then searches for the best schedule in the generated search space. # # 5. Choose the optimal configurations for model compilation. After tuning, the # auto-tuning module generates tuning records in JSON format. This step # picks the best schedule for each subgraph. # # 6. Lower to Tensor Intermediate Representation (TIR), TVM's low-level # intermediate representation. After selecting the optimal configurations # based on the tuning step, each TE subgraph is lowered to TIR and be # optimized by low-level optimization passes. Next, the optimized TIR is # lowered to the target compiler of the hardware platform. # This is the final code generation phase to produce an optimized model # that can be deployed into production. TVM supports several different # compiler backends including: # # - LLVM, which can target arbitrary microprocessor architecture including # standard x86 and ARM processors, AMDGPU and NVPTX code generation, and any # other platform supported by LLVM. # - Specialized compilers, such as NVCC, NVIDIA's compiler. # - Embedded and specialized targets, which are implemented through TVM's # Bring Your Own Codegen (BYOC) framework. # # 7. Compile down to machine code. At the end of this process, the # compiler-specific generated code can be lowered to machine code. # # TVM can compile models down to a linkable object module, which can then be # run with a lightweight TVM runtime that provides C APIs to dynamically # load the model, and entry points for other languages such as Python and # Rust. TVM can also build a bundled deployment in which the runtime is # combined with the model in a single package. # # The remainder of the tutorial will cover these aspects of TVM in more detail.
https://github.com/zk-ml/tachikoma
gallery/tutorial/relay_quick_start.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-relay-quick-start: Quick Start Tutorial for Compiling Deep Learning Models ======================================================= **Author**: `Yao Wang <https://github.com/kevinthesun>`_, `Truman Tian <https://github.com/SiNZeRo>`_ This example shows how to build a neural network with Relay python frontend and generates a runtime library for Nvidia GPU with TVM. Notice that you need to build TVM with cuda and llvm enabled. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ###################################################################### # Overview for Supported Hardware Backend of TVM # ---------------------------------------------- # The image below shows hardware backend currently supported by TVM: # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/tvm_support_list.png # :align: center # # In this tutorial, we'll choose cuda and llvm as target backends. # To begin with, let's import Relay and TVM. import numpy as np from tvm import relay from tvm.relay import testing import tvm from tvm import te from tvm.contrib import graph_executor import tvm.testing ###################################################################### # Define Neural Network in Relay # ------------------------------ # First, let's define a neural network with relay python frontend. # For simplicity, we'll use pre-defined resnet-18 network in Relay. # Parameters are initialized with Xavier initializer. # Relay also supports other model formats such as MXNet, CoreML, ONNX and # Tensorflow. # # In this tutorial, we assume we will do inference on our device and # the batch size is set to be 1. Input images are RGB color images of # size 224 * 224. We can call the # :py:meth:`tvm.relay.expr.TupleWrapper.astext()` to show the network # structure. batch_size = 1 num_class = 1000 image_shape = (3, 224, 224) data_shape = (batch_size,) + image_shape out_shape = (batch_size, num_class) mod, params = relay.testing.resnet.get_workload( num_layers=18, batch_size=batch_size, image_shape=image_shape ) # set show_meta_data=True if you want to show meta data print(mod.astext(show_meta_data=False)) ###################################################################### # Compilation # ----------- # Next step is to compile the model using the Relay/TVM pipeline. # Users can specify the optimization level of the compilation. # Currently this value can be 0 to 3. The optimization passes include # operator fusion, pre-computation, layout transformation and so on. # # :py:func:`relay.build` returns three components: the execution graph in # json format, the TVM module library of compiled functions specifically # for this graph on the target hardware, and the parameter blobs of # the model. During the compilation, Relay does the graph-level # optimization while TVM does the tensor-level optimization, resulting # in an optimized runtime module for model serving. # # We'll first compile for Nvidia GPU. Behind the scene, :py:func:`relay.build` # first does a number of graph-level optimizations, e.g. pruning, fusing, etc., # then registers the operators (i.e. the nodes of the optimized graphs) to # TVM implementations to generate a `tvm.module`. # To generate the module library, TVM will first transfer the high level IR # into the lower intrinsic IR of the specified target backend, which is CUDA # in this example. Then the machine code will be generated as the module library. opt_level = 3 target = tvm.target.cuda() with tvm.transform.PassContext(opt_level=opt_level): lib = relay.build(mod, target, params=params) ##################################################################### # Run the generate library # ------------------------ # Now we can create graph executor and run the module on Nvidia GPU. # create random input dev = tvm.cuda() data = np.random.uniform(-1, 1, size=data_shape).astype("float32") # create module module = graph_executor.GraphModule(lib["default"](dev)) # set input and parameters module.set_input("data", data) # run module.run() # get output out = module.get_output(0, tvm.nd.empty(out_shape)).numpy() # Print first 10 elements of output print(out.flatten()[0:10]) ###################################################################### # Save and Load Compiled Module # ----------------------------- # We can also save the graph, lib and parameters into files and load them # back in deploy environment. #################################################### # save the graph, lib and params into separate files from tvm.contrib import utils temp = utils.tempdir() path_lib = temp.relpath("deploy_lib.tar") lib.export_library(path_lib) print(temp.listdir()) #################################################### # load the module back. loaded_lib = tvm.runtime.load_module(path_lib) input_data = tvm.nd.array(data) module = graph_executor.GraphModule(loaded_lib["default"](dev)) module.run(data=input_data) out_deploy = module.get_output(0).numpy() # Print first 10 elements of output print(out_deploy.flatten()[0:10]) # check whether the output from deployed module is consistent with original one tvm.testing.assert_allclose(out_deploy, out, atol=1e-5)
https://github.com/zk-ml/tachikoma
gallery/tutorial/tensor_expr_get_started.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-tensor-expr-get-started: Working with Operators Using Tensor Expression ============================================== **Author**: `Tianqi Chen <https://tqchen.github.io>`_ In this tutorial we will turn our attention to how TVM works with Tensor Expression (TE) to define tensor computations and apply loop optimizations. TE describes tensor computations in a pure functional language (that is each expression has no side effects). When viewed in context of the TVM as a whole, Relay describes a computation as a set of operators, and each of these operators can be represented as a TE expression where each TE expression takes input tensors and produces an output tensor. This is an introductory tutorial to the Tensor Expression language in TVM. TVM uses a domain specific tensor expression for efficient kernel construction. We will demonstrate the basic workflow with two examples of using the tensor expression language. The first example introduces TE and scheduling with vector addition. The second expands on these concepts with a step-by-step optimization of a matrix multiplication with TE. This matrix multiplication example will serve as the comparative basis for future tutorials covering more advanced features of TVM. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # Example 1: Writing and Scheduling Vector Addition in TE for CPU # --------------------------------------------------------------- # # Let's look at an example in Python in which we will implement a TE for # vector addition, followed by a schedule targeted towards a CPU. # We begin by initializing a TVM environment. import tvm import tvm.testing from tvm import te import numpy as np ################################################################################ # You will get better performance if you can identify the CPU you are targeting # and specify it. If you're using LLVM, you can get this information from the # command ``llc --version`` to get the CPU type, and you can check # ``/proc/cpuinfo`` for additional extensions that your processor might # support. For example, you can use ``llvm -mcpu=skylake-avx512`` for CPUs with # AVX-512 instructions. tgt = tvm.target.Target(target="llvm", host="llvm") ################################################################################ # Describing the Vector Computation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # We describe a vector addition computation. TVM adopts tensor semantics, with # each intermediate result represented as a multi-dimensional array. The user # needs to describe the computation rule that generates the tensors. We first # define a symbolic variable ``n`` to represent the shape. We then define two # placeholder Tensors, ``A`` and ``B``, with given shape ``(n,)``. We then # describe the result tensor ``C``, with a ``compute`` operation. The # ``compute`` defines a computation, with the output conforming to the # specified tensor shape and the computation to be performed at each position # in the tensor defined by the lambda function. Note that while ``n`` is a # variable, it defines a consistent shape between the ``A``, ``B`` and ``C`` # tensors. Remember, no actual computation happens during this phase, as we # are only declaring how the computation should be done. n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") ################################################################################ # .. admonition:: Lambda Functions # # The second argument to the ``te.compute`` method is the function that # performs the computation. In this example, we're using an anonymous function, # also known as a ``lambda`` function, to define the computation, in this case # addition on the ``i``\th element of ``A`` and ``B``. ################################################################################ # Create a Default Schedule for the Computation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # While the above lines describe the computation rule, we can compute ``C`` in # many different ways to fit different devices. For a tensor with multiple # axes, you can choose which axis to iterate over first, or computations can be # split across different threads. TVM requires that the user to provide a # schedule, which is a description of how the computation should be performed. # Scheduling operations within TE can change loop orders, split computations # across different threads, and group blocks of data together, amongst other # operations. An important concept behind schedules is that they only describe # how the computation is performed, so different schedules for the same TE will # produce the same result. # # TVM allows you to create a naive schedule that will compute ``C`` in by # iterating in row major order. # # .. code-block:: c # # for (int i = 0; i < n; ++i) { # C[i] = A[i] + B[i]; # } s = te.create_schedule(C.op) ###################################################################### # Compile and Evaluate the Default Schedule # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # With the TE expression and a schedule, we can produce runnable code for our # target language and architecture, in this case LLVM and a CPU. We provide # TVM with the schedule, a list of the TE expressions that are in the schedule, # the target and host, and the name of the function we are producing. The result # of the output is a type-erased function that can be called directly from Python. # # In the following line, we use ``tvm.build`` to create a function. The build # function takes the schedule, the desired signature of the function (including # the inputs and outputs) as well as target language we want to compile to. fadd = tvm.build(s, [A, B, C], tgt, name="myadd") ################################################################################ # Let's run the function, and compare the output to the same computation in # numpy. The compiled TVM function exposes a concise C API that can be invoked # from any language. We begin by creating a device, which is a device (CPU in this # example) that TVM can compile the schedule to. In this case the device is an # LLVM CPU target. We can then initialize the tensors in our device and # perform the custom addition operation. To verify that the computation is # correct, we can compare the result of the output of the c tensor to the same # computation performed by numpy. dev = tvm.device(tgt.kind.name, 0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # To get a comparison of how fast this version is compared to numpy, create a # helper function to run a profile of the TVM generated code. import timeit np_repeat = 100 np_running_time = timeit.timeit( setup="import numpy\n" "n = 32768\n" 'dtype = "float32"\n' "a = numpy.random.rand(n, 1).astype(dtype)\n" "b = numpy.random.rand(n, 1).astype(dtype)\n", stmt="answer = a + b", number=np_repeat, ) print("Numpy running time: %f" % (np_running_time / np_repeat)) def evaluate_addition(func, target, optimization, log): dev = tvm.device(target.kind.name, 0) n = 32768 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) evaluator = func.time_evaluator(func.entry_name, dev, number=10) mean_time = evaluator(a, b, c).mean print("%s: %f" % (optimization, mean_time)) log.append((optimization, mean_time)) log = [("numpy", np_running_time / np_repeat)] evaluate_addition(fadd, tgt, "naive", log=log) ################################################################################ # Updating the Schedule to Use Parallelism # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Now that we've illustrated the fundamentals of TE, let's go deeper into what # schedules do, and how they can be used to optimize tensor expressions for # different architectures. A schedule is a series of steps that are applied to # an expression to transform it in a number of different ways. When a schedule # is applied to an expression in TE, the inputs and outputs remain the same, # but when compiled the implementation of the expression can change. This # tensor addition, in the default schedule, is run serially but is easy to # parallelize across all of the processor threads. We can apply the parallel # schedule operation to our computation. s[C].parallel(C.op.axis[0]) ################################################################################ # The ``tvm.lower`` command will generate the Intermediate Representation (IR) # of the TE, with the corresponding schedule. By lowering the expression as we # apply different schedule operations, we can see the effect of scheduling on # the ordering of the computation. We use the flag ``simple_mode=True`` to # return a readable C-style statement. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # It's now possible for TVM to run these blocks on independent threads. Let's # compile and run this new schedule with the parallel operation applied: fadd_parallel = tvm.build(s, [A, B, C], tgt, name="myadd_parallel") fadd_parallel(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) evaluate_addition(fadd_parallel, tgt, "parallel", log=log) ################################################################################ # Updating the Schedule to Use Vectorization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Modern CPUs also have the ability to perform SIMD operations on floating # point values, and we can apply another schedule to our computation expression # to take advantage of this. Accomplishing this requires multiple steps: first # we have to split the schedule into inner and outer loops using the split # scheduling primitive. The inner loops can use vectorization to use SIMD # instructions using the vectorize scheduling primitive, then the outer loops # can be parallelized using the parallel scheduling primitive. Choose the split # factor to be the number of threads on your CPU. # Recreate the schedule, since we modified it with the parallel operation in # the previous example n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") s = te.create_schedule(C.op) # This factor should be chosen to match the number of threads appropriate for # your CPU. This will vary depending on architecture, but a good rule is # setting this factor to equal the number of available CPU cores. factor = 4 outer, inner = s[C].split(C.op.axis[0], factor=factor) s[C].parallel(outer) s[C].vectorize(inner) fadd_vector = tvm.build(s, [A, B, C], tgt, name="myadd_parallel") evaluate_addition(fadd_vector, tgt, "vector", log=log) print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Comparing the Different Schedules # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # We can now compare the different schedules baseline = log[0][1] print("%s\t%s\t%s" % ("Operator".rjust(20), "Timing".rjust(20), "Performance".rjust(20))) for result in log: print( "%s\t%s\t%s" % (result[0].rjust(20), str(result[1]).rjust(20), str(result[1] / baseline).rjust(20)) ) ################################################################################ # .. admonition:: Code Specialization # # As you may have noticed, the declarations of ``A``, ``B`` and ``C`` all # take the same shape argument, ``n``. TVM will take advantage of this to # pass only a single shape argument to the kernel, as you will find in the # printed device code. This is one form of specialization. # # On the host side, TVM will automatically generate check code that checks # the constraints in the parameters. So if you pass arrays with different # shapes into fadd, an error will be raised. # # We can do more specializations. For example, we can write :code:`n = # tvm.runtime.convert(1024)` instead of :code:`n = te.var("n")`, in the # computation declaration. The generated function will only take vectors with # length 1024. ################################################################################ # We've defined, scheduled, and compiled a vector addition operator, which we # were then able to execute on the TVM runtime. We can save the operator as a # library, which we can then load later using the TVM runtime. ################################################################################ # Targeting Vector Addition for GPUs (Optional) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # TVM is capable of targeting multiple architectures. In the next example, we # will target compilation of the vector addition to GPUs. # If you want to run this code, change ``run_cuda = True`` # Note that by default this example is not run in the docs CI. run_cuda = False if run_cuda: # Change this target to the correct backend for you gpu. For example: cuda (NVIDIA GPUs), # rocm (Radeon GPUS), OpenCL (opencl). tgt_gpu = tvm.target.Target(target="cuda", host="llvm") # Recreate the schedule n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") print(type(C)) s = te.create_schedule(C.op) bx, tx = s[C].split(C.op.axis[0], factor=64) ################################################################################ # Finally we must bind the iteration axis bx and tx to threads in the GPU # compute grid. The naive schedule is not valid for GPUs, and these are # specific constructs that allow us to generate code that runs on a GPU. s[C].bind(bx, te.thread_axis("blockIdx.x")) s[C].bind(tx, te.thread_axis("threadIdx.x")) ###################################################################### # Compilation # ----------- # After we have finished specifying the schedule, we can compile it # into a TVM function. By default TVM compiles into a type-erased # function that can be directly called from the python side. # # In the following line, we use tvm.build to create a function. # The build function takes the schedule, the desired signature of the # function (including the inputs and outputs) as well as target language # we want to compile to. # # The result of compilation fadd is a GPU device function (if GPU is # involved) as well as a host wrapper that calls into the GPU # function. fadd is the generated host wrapper function, it contains # a reference to the generated device function internally. fadd = tvm.build(s, [A, B, C], target=tgt_gpu, name="myadd") ################################################################################ # The compiled TVM function exposes a concise C API that can be invoked from # any language. # # We provide a minimal array API in python to aid quick testing and prototyping. # The array API is based on the `DLPack <https://github.com/dmlc/dlpack>`_ standard. # # - We first create a GPU device. # - Then tvm.nd.array copies the data to the GPU. # - ``fadd`` runs the actual computation # - ``numpy()`` copies the GPU array back to the CPU (so we can verify correctness). # # Note that copying the data to and from the memory on the GPU is a required step. dev = tvm.device(tgt_gpu.kind.name, 0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Inspect the Generated GPU Code # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # You can inspect the generated code in TVM. The result of tvm.build is a TVM # Module. fadd is the host module that contains the host wrapper, it also # contains a device module for the CUDA (GPU) function. # # The following code fetches the device module and prints the content code. if ( tgt_gpu.kind.name == "cuda" or tgt_gpu.kind.name == "rocm" or tgt_gpu.kind.name.startswith("opencl") ): dev_module = fadd.imported_modules[0] print("-----GPU code-----") print(dev_module.get_source()) else: print(fadd.get_source()) ################################################################################ # Saving and Loading Compiled Modules # ----------------------------------- # Besides runtime compilation, we can save the compiled modules into a file and # load them back later. # # The following code first performs the following steps: # # - It saves the compiled host module into an object file. # - Then it saves the device module into a ptx file. # - cc.create_shared calls a compiler (gcc) to create a shared library from tvm.contrib import cc from tvm.contrib import utils temp = utils.tempdir() fadd.save(temp.relpath("myadd.o")) if tgt.kind.name == "cuda": fadd.imported_modules[0].save(temp.relpath("myadd.ptx")) if tgt.kind.name == "rocm": fadd.imported_modules[0].save(temp.relpath("myadd.hsaco")) if tgt.kind.name.startswith("opencl"): fadd.imported_modules[0].save(temp.relpath("myadd.cl")) cc.create_shared(temp.relpath("myadd.so"), [temp.relpath("myadd.o")]) print(temp.listdir()) ################################################################################ # .. admonition:: Module Storage Format # # The CPU (host) module is directly saved as a shared library (.so). There # can be multiple customized formats of the device code. In our example, the # device code is stored in ptx, as well as a meta data json file. They can be # loaded and linked separately via import. ################################################################################ # Load Compiled Module # ~~~~~~~~~~~~~~~~~~~~ # We can load the compiled module from the file system and run the code. The # following code loads the host and device module separately and links them # together. We can verify that the newly loaded function works. fadd1 = tvm.runtime.load_module(temp.relpath("myadd.so")) if tgt.kind.name == "cuda": fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.ptx")) fadd1.import_module(fadd1_dev) if tgt.kind.name == "rocm": fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.hsaco")) fadd1.import_module(fadd1_dev) if tgt.kind.name.startswith("opencl"): fadd1_dev = tvm.runtime.load_module(temp.relpath("myadd.cl")) fadd1.import_module(fadd1_dev) fadd1(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # Pack Everything into One Library # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # In the above example, we store the device and host code separately. TVM also # supports export everything as one shared library. Under the hood, we pack # the device modules into binary blobs and link them together with the host # code. Currently we support packing of Metal, OpenCL and CUDA modules. fadd.export_library(temp.relpath("myadd_pack.so")) fadd2 = tvm.runtime.load_module(temp.relpath("myadd_pack.so")) fadd2(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. admonition:: Runtime API and Thread-Safety # # The compiled modules of TVM do not depend on the TVM compiler. Instead, # they only depend on a minimum runtime library. The TVM runtime library # wraps the device drivers and provides thread-safe and device agnostic calls # into the compiled functions. # # This means that you can call the compiled TVM functions from any thread, on # any GPUs, provided that you have compiled the code for that GPU. ################################################################################ # Generate OpenCL Code # -------------------- # TVM provides code generation features into multiple backends. We can also # generate OpenCL code or LLVM code that runs on CPU backends. # # The following code blocks generate OpenCL code, creates array on an OpenCL # device, and verifies the correctness of the code. if tgt.kind.name.startswith("opencl"): fadd_cl = tvm.build(s, [A, B, C], tgt, name="myadd") print("------opencl code------") print(fadd_cl.imported_modules[0].get_source()) dev = tvm.cl(0) n = 1024 a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd_cl(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) ################################################################################ # .. admonition:: TE Scheduling Primitives # # TVM includes a number of different scheduling primitives: # # - split: splits a specified axis into two axises by the defined factor. # - tile: tiles will split a computation across two axes by the defined factors. # - fuse: fuses two consecutive axises of one computation. # - reorder: can reorder the axises of a computation into a defined order. # - bind: can bind a computation to a specific thread, useful in GPU programming. # - compute_at: by default, TVM will compute tensors at the outermost level # of the function, or the root, by default. compute_at specifies that one # tensor should be computed at the first axis of computation for another # operator. # - compute_inline: when marked inline, a computation will be expanded then # inserted into the address where the tensor is required. # - compute_root: moves a computation to the outermost layer, or root, of the # function. This means that stage of the computation will be fully computed # before it moves on to the next stage. # # A complete description of these primitives can be found in the # :ref:`Schedule Primitives <schedule_primitives>` docs page. ################################################################################ # Example 2: Manually Optimizing Matrix Multiplication with TE # ------------------------------------------------------------ # # Now we will consider a second, more advanced example, demonstrating how with # just 18 lines of python code TVM speeds up a common matrix multiplication operation by 18x. # # **Matrix multiplication is a compute intensive operation. There are # two important optimizations for good CPU performance:** # # 1. Increase the cache hit rate of memory access. Both complex # numerical computation and hot-spot memory access can be # accelerated by a high cache hit rate. This requires us to # transform the origin memory access pattern to a pattern that fits # the cache policy. # # 2. SIMD (Single instruction multi-data), also known as the vector # processing unit. On each cycle instead of processing a single # value, SIMD can process a small batch of data. This requires us # to transform the data access pattern in the loop body in uniform # pattern so that the LLVM backend can lower it to SIMD. # # The techniques used in this tutorial are a subset of tricks mentioned in this # `repository <https://github.com/flame/how-to-optimize-gemm>`_. Some of them # have been applied by TVM abstraction automatically, but some of them cannot # be automatically applied due to TVM constraints. ################################################################################ # Preparation and Performance Baseline # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We begin by collecting performance data on the `numpy` implementation of # matrix multiplication. import tvm import tvm.testing from tvm import te import numpy # The size of the matrix # (M, K) x (K, N) # You are free to try out different shapes, sometimes TVM optimization outperforms numpy with MKL. M = 1024 K = 1024 N = 1024 # The default tensor data type in tvm dtype = "float32" # You will want to adjust the target to match any CPU vector extensions you # might have. For example, if you're using using Intel AVX2 (Advanced Vector # Extensions) ISA for SIMD, you can get the best performance by changing the # following line to ``llvm -mcpu=core-avx2``, or specific type of CPU you use. # Recall that you're using llvm, you can get this information from the command # ``llc --version`` to get the CPU type, and you can check ``/proc/cpuinfo`` # for additional extensions that your processor might support. target = tvm.target.Target(target="llvm", host="llvm") dev = tvm.device(target.kind.name, 0) # Random generated tensor for testing a = tvm.nd.array(numpy.random.rand(M, K).astype(dtype), dev) b = tvm.nd.array(numpy.random.rand(K, N).astype(dtype), dev) # Repeatedly perform a matrix multiplication to get a performance baseline # for the default numpy implementation np_repeat = 100 np_running_time = timeit.timeit( setup="import numpy\n" "M = " + str(M) + "\n" "K = " + str(K) + "\n" "N = " + str(N) + "\n" 'dtype = "float32"\n' "a = numpy.random.rand(M, K).astype(dtype)\n" "b = numpy.random.rand(K, N).astype(dtype)\n", stmt="answer = numpy.dot(a, b)", number=np_repeat, ) print("Numpy running time: %f" % (np_running_time / np_repeat)) answer = numpy.dot(a.numpy(), b.numpy()) ################################################################################ # Now we write a basic matrix multiplication using TVM TE and verify that it # produces the same results as the numpy implementation. We also write a # function that will help us measure the performance of the schedule # optimizations. # TVM Matrix Multiplication using TE k = te.reduce_axis((0, K), "k") A = te.placeholder((M, K), name="A") B = te.placeholder((K, N), name="B") C = te.compute((M, N), lambda x, y: te.sum(A[x, k] * B[k, y], axis=k), name="C") # Default schedule s = te.create_schedule(C.op) func = tvm.build(s, [A, B, C], target=target, name="mmult") c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) def evaluate_operation(s, vars, target, name, optimization, log): func = tvm.build(s, [A, B, C], target=target, name="mmult") assert func c = tvm.nd.array(numpy.zeros((M, N), dtype=dtype), dev) func(a, b, c) tvm.testing.assert_allclose(c.numpy(), answer, rtol=1e-5) evaluator = func.time_evaluator(func.entry_name, dev, number=10) mean_time = evaluator(a, b, c).mean print("%s: %f" % (optimization, mean_time)) log.append((optimization, mean_time)) log = [] evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="none", log=log) ################################################################################ # Let's take a look at the intermediate representation of the operator and # default schedule using the TVM lower function. Note how the implementation is # essentially a naive implementation of a matrix multiplication, using three # nested loops over the indices of the A and B matrices. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 1: Blocking # ~~~~~~~~~~~~~~~~~~~~~~~~ # # A important trick to enhance the cache hit rate is blocking, where you # structure memory access such that the inside a block is a small neighborhood # that has high memory locality. In this tutorial, we pick a block factor of # 32. This will result in a block that will fill a 32 * 32 * sizeof(float) area # of memory. This corresponds to a cache size of 4KB, in relation to a # reference cache size of 32 KB for L1 cache. # # We begin by creating a default schedule for the ``C`` operation, then apply a # ``tile`` scheduling primitive to it with the specified block factor, with the # scheduling primitive returning the resulting loop order from outermost to # innermost, as a vector ``[x_outer, y_outer, x_inner, y_inner]``. We then get # the reduction axis for output of the operation, and perform a split operation # on it using a factor of 4. This factor doesn't directly impact the blocking # optimization we're working on right now, but will be useful later when we # apply vectorization. # # Now that the operation has been blocked, we can reorder the computation to # put the reduction operation into the outermost loop of the computation, # helping to guarantee that the blocked data remains in cache. This completes # the schedule, and we can build and test the performance compared to the naive # schedule. bn = 32 # Blocking by loop tiling xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) # Hoist reduction domain outside the blocking loop s[C].reorder(xo, yo, ko, ki, xi, yi) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="blocking", log=log) ################################################################################ # By reordering the computation to take advantage of caching, you should see a # significant improvement in the performance of the computation. Now, print the # internal representation and compare it to the original: print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 2: Vectorization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Another important optimization trick is vectorization. When the memory access # pattern is uniform, the compiler can detect this pattern and pass the # continuous memory to the SIMD vector processor. In TVM, we can use the # ``vectorize`` interface to hint the compiler this pattern, taking advantage # of this hardware feature. # # In this tutorial, we chose to vectorize the inner loop row data since it is # already cache friendly from our previous optimizations. # Apply the vectorization optimization s[C].vectorize(yi) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="vectorization", log=log) # The generalized IR after vectorization print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 3: Loop Permutation # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If we look at the above IR, we can see the inner loop row data is vectorized # and B is transformed into PackedB (this is evident by the `(float32x32*)B2` # portion of the inner loop). The traversal of PackedB is sequential now. So we # will look at the access pattern of A. In current schedule, A is accessed # column by column which is not cache friendly. If we change the nested loop # order of `ki` and inner axes `xi`, the access pattern for A matrix will be # more cache friendly. s = te.create_schedule(C.op) xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) # re-ordering s[C].reorder(xo, yo, ko, xi, ki, yi) s[C].vectorize(yi) evaluate_operation( s, [A, B, C], target=target, name="mmult", optimization="loop permutation", log=log ) # Again, print the new generalized IR print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 4: Array Packing # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Another important trick is array packing. This trick is to reorder the # storage dimension of the array to convert the continuous access pattern on # certain dimension to a sequential pattern after flattening. # # .. image:: https://github.com/dmlc/web-data/raw/main/tvm/tutorial/array-packing.png # :align: center # # Just as it is shown in the figure above, after blocking the computations, we # can observe the array access pattern of B (after flattening), which is # regular but discontinuous. We expect that after some transformation we can # get a continuous access pattern. By reordering a ``[16][16]`` array to a # ``[16/4][16][4]`` array the access pattern of B will be sequential when # grabbing the corresponding value from the packed array. # # To accomplish this, we are going to have to start with a new default # schedule, taking into account the new packing of B. It's worth taking a # moment to comment on this: TE is a powerful and expressive language for # writing optimized operators, but it often requires some knowledge of the # underlying algorithm, data structures, and hardware target that you are # writing for. Later in the tutorial, we will discuss some of the options for # letting TVM take that burden. Regardless, let's move on with the new # optimized schedule. # We have to re-write the algorithm slightly. packedB = te.compute((N / bn, K, bn), lambda x, y, z: B[y, x * bn + z], name="packedB") C = te.compute( (M, N), lambda x, y: te.sum(A[x, k] * packedB[y // bn, k, tvm.tir.indexmod(y, bn)], axis=k), name="C", ) s = te.create_schedule(C.op) xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) (k,) = s[C].op.reduce_axis ko, ki = s[C].split(k, factor=4) s[C].reorder(xo, yo, ko, xi, ki, yi) s[C].vectorize(yi) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="array packing", log=log) # Here is the generated IR after array packing. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 5: Optimizing Block Writing Through Caching # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Up to this point all of our optimizations have focused on efficiently # accessing and computing the data from the `A` and `B` matrices to compute the # `C` matrix. After the blocking optimization, the operator will write result # to `C` block by block, and the access pattern is not sequential. We can # address this by using a sequential cache array, using a combination of # `cache_write`, `compute_at`, and `unroll`to hold the block results and write # to `C` when all the block results are ready. s = te.create_schedule(C.op) # Allocate write cache CC = s.cache_write(C, "global") xo, yo, xi, yi = s[C].tile(C.op.axis[0], C.op.axis[1], bn, bn) # Write cache is computed at yo s[CC].compute_at(s[C], yo) # New inner axes xc, yc = s[CC].op.axis (k,) = s[CC].op.reduce_axis ko, ki = s[CC].split(k, factor=4) s[CC].reorder(ko, xc, ki, yc) s[CC].unroll(ki) s[CC].vectorize(yc) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation(s, [A, B, C], target=target, name="mmult", optimization="block caching", log=log) # Here is the generated IR after write cache blocking. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Optimization 6: Parallelization # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # So far, our computation is only designed to use a single core. Nearly all # modern processors have multiple cores, and computation can benefit from # running computations in parallel. The final optimization is to take advantage # of thread-level parallelization. # parallel s[C].parallel(xo) x, y, z = s[packedB].op.axis s[packedB].vectorize(z) s[packedB].parallel(x) evaluate_operation( s, [A, B, C], target=target, name="mmult", optimization="parallelization", log=log ) # Here is the generated IR after parallelization. print(tvm.lower(s, [A, B, C], simple_mode=True)) ################################################################################ # Summary of Matrix Multiplication Example # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # After applying the above simple optimizations with only 18 lines of code, our # generated code can begin to approach the performance of `numpy` with the Math # Kernel Library (MKL). Since we've been logging the performance as we've been # working, we can compare the results. baseline = log[0][1] print("%s\t%s\t%s" % ("Operator".rjust(20), "Timing".rjust(20), "Performance".rjust(20))) for result in log: print( "%s\t%s\t%s" % (result[0].rjust(20), str(result[1]).rjust(20), str(result[1] / baseline).rjust(20)) ) ################################################################################ # Note that the outputs on the web page reflect the running times on a # non-exclusive Docker container, and should be considered unreliable. It is # highly encouraged to run the tutorial by yourself to observe the performance # gain achieved by TVM, and to carefully work through each example to # understand the iterative improvements that are made to the matrix # multiplication operation. ################################################################################ # Final Notes and Summary # ----------------------- # As mentioned earlier, how to apply optimizations using TE and scheduling # primitives can require some knowledge of the underlying architecture and # algorithms. However, TE was designed to act as a foundation for more complex # algorithms that can search the potential optimization. With the knowledge you # have from this introduction to TE, we can now begin to explore how TVM can # automate the schedule optimization process. # # This tutorial provided a walk-through of TVM Tensor Expression (TE) workflow # using a vector add and a matrix multiplication examples. The general workflow # is # # - Describe your computation via a series of operations. # - Describe how we want to compute use schedule primitives. # - Compile to the target function we want. # - Optionally, save the function to be loaded later. # # Upcoming tutorials expand on the matrix multiplication example, and show how # you can build generic templates of the matrix multiplication and other # operations with tunable parameters that allows you to automatically optimize # the computation for specific platforms.
https://github.com/zk-ml/tachikoma
gallery/tutorial/tensor_ir_blitz_course.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tir_blitz: Blitz Course to TensorIR ======================== **Author**: `Siyuan Feng <https://github.com/Hzfengsy>`_ TensorIR is a domain specific language for deep learning programs serving two broad purposes: - An implementation for transforming and optimizing programs on various hardware backends. - An abstraction for automatic _tensorized_ program optimization. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore import tvm from tvm.ir.module import IRModule from tvm.script import tir as T import numpy as np ################################################################################################ # IRModule # -------- # An IRModule is the central data structure in TVM, which contains deep learning programs. # It is the basic object of interest of IR transformation and model building. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/design/tvm_life_of_irmodule.png # :align: center # :width: 85% # # This is the life cycle of an IRModule, which can be created from TVMScript. TensorIR schedule # primitives and passes are two major ways to transform an IRModule. Also, a sequence of # transformations on an IRModule is acceptable. Note that we can print an IRModule at **ANY** stage # to TVMScript. After all transformations and optimizations are complete, we can build the IRModule # to a runnable module to deploy on target devices. # # Based on the design of TensorIR and IRModule, we are able to create a new programming method: # # 1. Write a program by TVMScript in a python-AST based syntax. # # 2. Transform and optimize a program with python api. # # 3. Interactively inspect and try the performance with an imperative style transformation API. ################################################################################################ # Create an IRModule # ------------------ # IRModule can be created by writing TVMScript, which is a round-trippable syntax for TVM IR. # # Different than creating a computational expression by Tensor Expression # (:ref:`tutorial-tensor-expr-get-started`), TensorIR allow users to program through TVMScript, # a language embedded in python AST. The new method makes it possible to write complex programs # and further schedule and optimize it. # # Following is a simple example for vector addition. # @tvm.script.ir_module class MyModule: @T.prim_func def main(a: T.handle, b: T.handle): # We exchange data between function by handles, which are similar to pointer. T.func_attr({"global_symbol": "main", "tir.noalias": True}) # Create buffer from handles. A = T.match_buffer(a, (8,), dtype="float32") B = T.match_buffer(b, (8,), dtype="float32") for i in range(8): # A block is an abstraction for computation. with T.block("B"): # Define a spatial block iterator and bind it to value i. vi = T.axis.spatial(8, i) B[vi] = A[vi] + 1.0 ir_module = MyModule print(type(ir_module)) print(ir_module.script()) ################################################################################################ # Besides, we can also use tensor expression DSL to write simple operators, and convert them # to an IRModule. # from tvm import te A = te.placeholder((8,), dtype="float32", name="A") B = te.compute((8,), lambda *i: A(*i) + 1.0, name="B") func = te.create_prim_func([A, B]) ir_module_from_te = IRModule({"main": func}) print(ir_module_from_te.script()) ################################################################################################ # Build and Run an IRModule # ------------------------- # We can build the IRModule into a runnable module with specific target backends. # mod = tvm.build(ir_module, target="llvm") # The module for CPU backends. print(type(mod)) ################################################################################################ # Prepare the input array and output array, then run the module. # a = tvm.nd.array(np.arange(8).astype("float32")) b = tvm.nd.array(np.zeros((8,)).astype("float32")) mod(a, b) print(a) print(b) ################################################################################################ # Transform an IRModule # --------------------- # The IRModule is the central data structure for program optimization, which can be transformed # by :code:`Schedule`. # A schedule contains multiple primitive methods to interactively transform the program. # Each primitive transforms the program in certain ways to bring additional performance optimizations. # # .. image:: https://raw.githubusercontent.com/tlc-pack/web-data/main/images/design/tvm_tensor_ir_opt_flow.png # :align: center # :width: 100% # # The image above is a typical workflow for optimizing a tensor program. First, we need to create a # schedule on the initial IRModule created from either TVMScript or Tensor Expression. Then, a # sequence of schedule primitives will help to improve the performance. And at last, we can lower # and build it into a runnable module. # # Here we just demonstrate a very simple transformation. First we create schedule on the input `ir_module`. sch = tvm.tir.Schedule(ir_module) print(type(sch)) ################################################################################################ # Tile the loop into 3 loops and print the result. # Get block by its name block_b = sch.get_block("B") # Get loops surrounding the block (i,) = sch.get_loops(block_b) # Tile the loop nesting. i_0, i_1, i_2 = sch.split(i, factors=[2, 2, 2]) print(sch.mod.script()) ################################################################################################ # We can also reorder the loops. Now we move loop `i_2` to outside of `i_1`. sch.reorder(i_0, i_2, i_1) print(sch.mod.script()) ################################################################################################ # Transform to a GPU program # ~~~~~~~~~~~~~~~~~~~~~~~~~~ # If we want to deploy models on GPUs, thread binding is necessary. Fortunately, we can # also use primitives and do incrementally transformation. # sch.bind(i_0, "blockIdx.x") sch.bind(i_2, "threadIdx.x") print(sch.mod.script()) ################################################################################################ # After binding the threads, now build the IRModule with :code:`cuda` backends. ctx = tvm.cuda(0) cuda_mod = tvm.build(sch.mod, target="cuda") cuda_a = tvm.nd.array(np.arange(8).astype("float32"), ctx) cuda_b = tvm.nd.array(np.zeros((8,)).astype("float32"), ctx) cuda_mod(cuda_a, cuda_b) print(cuda_a) print(cuda_b)
https://github.com/zk-ml/tachikoma
gallery/tutorial/tvmc_command_line_driver.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Compiling and Optimizing a Model with TVMC ========================================== **Authors**: `Leandro Nunes <https://github.com/leandron>`_, `Matthew Barrett <https://github.com/mbaret>`_, `Chris Hoge <https://github.com/hogepodge>`_ In this section, we will work with TVMC, the TVM command line driver. TVMC is a tool that exposes TVM features such as auto-tuning, compiling, profiling and execution of models through a command line interface. Upon completion of this section, we will have used TVMC to accomplish the following tasks: * Compile a pre-trained ResNet-50 v2 model for the TVM runtime. * Run a real image through the compiled model, and interpret the output and model performance. * Tune the model on a CPU using TVM. * Re-compile an optimized model using the tuning data collected by TVM. * Run the image through the optimized model, and compare the output and model performance. The goal of this section is to give you an overview of TVM and TVMC's capabilities, and set the stage for understanding how TVM works. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # Using TVMC # ---------- # # TVMC is a Python application, part of the TVM Python package. # When you install TVM using a Python package, you will get TVMC as # as a command line application called ``tvmc``. The location of this command # will vary depending on your platform and installation method. # # Alternatively, if you have TVM as a Python module on your # ``$PYTHONPATH``,you can access the command line driver functionality # via the executable python module, ``python -m tvm.driver.tvmc``. # # For simplicity, this tutorial will mention TVMC command line using # ``tvmc <options>``, but the same results can be obtained with # ``python -m tvm.driver.tvmc <options>``. # # You can check the help page using: # # .. code-block:: bash # # tvmc --help # # The main features of TVM available to ``tvmc`` are from subcommands # ``compile``, and ``run``, and ``tune``. To read about specific options under # a given subcommand, use ``tvmc <subcommand> --help``. We will cover each of # these commands in this tutorial, but first we need to download a pre-trained # model to work with. # ################################################################################ # Obtaining the Model # ------------------- # # For this tutorial, we will be working with ResNet-50 v2. ResNet-50 is a # convolutional neural network that is 50 layers deep and designed to classify # images. The model we will be using has been pre-trained on more than a # million images with 1000 different classifications. The network has an input # image size of 224x224. If you are interested exploring more of how the # ResNet-50 model is structured, we recommend downloading `Netron # <https://netron.app>`_, a freely available ML model viewer. # # For this tutorial we will be using the model in ONNX format. # # .. code-block:: bash # # wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx # ################################################################################ # .. admonition:: Supported model formats # # TVMC supports models created with Keras, ONNX, TensorFlow, TFLite # and Torch. Use the option ``--model-format`` if you need to # explicitly provide the model format you are using. See ``tvmc # compile --help`` for more information. # ################################################################################ # .. admonition:: Adding ONNX Support to TVM # # TVM relies on the ONNX python library being available on your system. You can # install ONNX using the command ``pip3 install --user onnx onnxoptimizer``. You # may remove the ``--user`` option if you have root access and want to install # ONNX globally. The ``onnxoptimizer`` dependency is optional, and is only used # for ``onnx>=1.9``. # ################################################################################ # Compiling an ONNX Model to the TVM Runtime # ------------------------------------------ # # Once we've downloaded the ResNet-50 model, the next step is to compile it. To # accomplish that, we are going to use ``tvmc compile``. The output we get from # the compilation process is a TAR package of the model compiled to a dynamic # library for our target platform. We can run that model on our target device # using the TVM runtime. # # .. code-block:: bash # # # This may take several minutes depending on your machine # tvmc compile \ # --target "llvm" \ # --input-shapes "data:[1,3,224,224]" \ # --output resnet50-v2-7-tvm.tar \ # resnet50-v2-7.onnx # # Let's take a look at the files that ``tvmc compile`` creates in the module: # # .. code-block:: bash # # mkdir model # tar -xvf resnet50-v2-7-tvm.tar -C model # ls model # # You will see three files listed. # # * ``mod.so`` is the model, represented as a C++ library, that can be loaded # by the TVM runtime. # * ``mod.json`` is a text representation of the TVM Relay computation graph. # * ``mod.params`` is a file containing the parameters for the pre-trained # model. # # This module can be directly loaded by your application, and the model can be # run via the TVM runtime APIs. ################################################################################ # .. admonition:: Defining the Correct Target # # Specifying the correct target (option ``--target``) can have a huge # impact on the performance of the compiled module, as it can take # advantage of hardware features available on the target. For more # information, please refer to :ref:`Auto-tuning a convolutional network for # x86 CPU <tune_relay_x86>`. We recommend identifying which CPU you are # running, along with optional features, and set the target appropriately. ################################################################################ # Running the Model from The Compiled Module with TVMC # ---------------------------------------------------- # # Now that we've compiled the model to this module, we can use the TVM runtime # to make predictions with it. TVMC has the TVM runtime built in to it, # allowing you to run compiled TVM models. To use TVMC to run the model and # make predictions, we need two things: # # - The compiled module, which we just produced. # - Valid input to the model to make predictions on. # # Each model is particular when it comes to expected tensor shapes, formats and # data types. For this reason, most models require some pre and # post-processing, to ensure the input is valid and to interpret the output. # TVMC has adopted NumPy's ``.npz`` format for both input and output data. This # is a well-supported NumPy format to serialize multiple arrays into a file. # # As input for this tutorial, we will use the image of a cat, but you can feel # free to substitute this image for any of your choosing. # # .. image:: https://s3.amazonaws.com/model-server/inputs/kitten.jpg # :height: 224px # :width: 224px # :align: center ################################################################################ # Input pre-processing # ~~~~~~~~~~~~~~~~~~~~ # # For our ResNet-50 v2 model, the input is expected to be in ImageNet format. # Here is an example of a script to pre-process an image for ResNet-50 v2. # # You will need to have a supported version of the Python Image Library # installed. You can use ``pip3 install --user pillow`` to satisfy this # requirement for the script. # # .. code-block:: python # :caption: preprocess.py # :name: preprocess.py # # #!python ./preprocess.py # from tvm.contrib.download import download_testdata # from PIL import Image # import numpy as np # # img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg" # img_path = download_testdata(img_url, "imagenet_cat.png", module="data") # # # Resize it to 224x224 # resized_image = Image.open(img_path).resize((224, 224)) # img_data = np.asarray(resized_image).astype("float32") # # # ONNX expects NCHW input, so convert the array # img_data = np.transpose(img_data, (2, 0, 1)) # # # Normalize according to ImageNet # imagenet_mean = np.array([0.485, 0.456, 0.406]) # imagenet_stddev = np.array([0.229, 0.224, 0.225]) # norm_img_data = np.zeros(img_data.shape).astype("float32") # for i in range(img_data.shape[0]): # norm_img_data[i, :, :] = (img_data[i, :, :] / 255 - imagenet_mean[i]) / imagenet_stddev[i] # # # Add batch dimension # img_data = np.expand_dims(norm_img_data, axis=0) # # # Save to .npz (outputs imagenet_cat.npz) # np.savez("imagenet_cat", data=img_data) # ################################################################################ # Running the Compiled Module # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # With both the model and input data in hand, we can now run TVMC to make a # prediction: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # resnet50-v2-7-tvm.tar # # Recall that the ``.tar`` model file includes a C++ library, a description of # the Relay model, and the parameters for the model. TVMC includes the TVM # runtime, which can load the model and make predictions against input. When # running the above command, TVMC outputs a new file, ``predictions.npz``, that # contains the model output tensors in NumPy format. # # In this example, we are running the model on the same machine that we used # for compilation. In some cases we might want to run it remotely via an RPC # Tracker. To read more about these options please check ``tvmc run --help``. ################################################################################ # Output Post-Processing # ~~~~~~~~~~~~~~~~~~~~~~ # # As previously mentioned, each model will have its own particular way of # providing output tensors. # # In our case, we need to run some post-processing to render the outputs from # ResNet-50 v2 into a more human-readable form, using the lookup-table provided # for the model. # # The script below shows an example of the post-processing to extract labels # from the output of our compiled module. # # .. code-block:: python # :caption: postprocess.py # :name: postprocess.py # # #!python ./postprocess.py # import os.path # import numpy as np # # from scipy.special import softmax # # from tvm.contrib.download import download_testdata # # # Download a list of labels # labels_url = "https://s3.amazonaws.com/onnx-model-zoo/synset.txt" # labels_path = download_testdata(labels_url, "synset.txt", module="data") # # with open(labels_path, "r") as f: # labels = [l.rstrip() for l in f] # # output_file = "predictions.npz" # # # Open the output and read the output tensor # if os.path.exists(output_file): # with np.load(output_file) as data: # scores = softmax(data["output_0"]) # scores = np.squeeze(scores) # ranks = np.argsort(scores)[::-1] # # for rank in ranks[0:5]: # print("class='%s' with probability=%f" % (labels[rank], scores[rank])) # # Running this script should produce the following output: # # .. code-block:: bash # # python postprocess.py # # class='n02123045 tabby, tabby cat' with probability=0.610553 # # class='n02123159 tiger cat' with probability=0.367179 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 # # Try replacing the cat image with other images, and see what sort of # predictions the ResNet model makes. ################################################################################ # Automatically Tuning the ResNet Model # ------------------------------------- # # The previous model was compiled to work on the TVM runtime, but did not # include any platform specific optimization. In this section, we will show you # how to build an optimized model using TVMC to target your working platform. # # In some cases, we might not get the expected performance when running # inferences using our compiled module. In cases like this, we can make use of # the auto-tuner, to find a better configuration for our model and get a boost # in performance. Tuning in TVM refers to the process by which a model is # optimized to run faster on a given target. This differs from training or # fine-tuning in that it does not affect the accuracy of the model, but only # the runtime performance. As part of the tuning process, TVM will try running # many different operator implementation variants to see which perform best. # The results of these runs are stored in a tuning records file, which is # ultimately the output of the ``tune`` subcommand. # # In the simplest form, tuning requires you to provide three things: # # - the target specification of the device you intend to run this model on # - the path to an output file in which the tuning records will be stored, and # finally # - a path to the model to be tuned. # # The example below demonstrates how that works in practice: # # .. code-block:: bash # # # The default search algorithm requires xgboost, see below for further # # details on tuning search algorithms # pip install xgboost # # tvmc tune \ # --target "llvm" \ # --output resnet50-v2-7-autotuner_records.json \ # resnet50-v2-7.onnx # # In this example, you will see better results if you indicate a more specific # target for the ``--target`` flag. For example, on an Intel i7 processor you # could use ``--target llvm -mcpu=skylake``. For this tuning example, we are # tuning locally on the CPU using LLVM as the compiler for the specified # achitecture. # # TVMC will perform a search against the parameter space for the model, trying # out different configurations for operators and choosing the one that runs # fastest on your platform. Although this is a guided search based on the CPU # and model operations, it can still take several hours to complete the search. # The output of this search will be saved to the # ``resnet50-v2-7-autotuner_records.json`` file, which will later be used to # compile an optimized model. # # .. admonition:: Defining the Tuning Search Algorithm # # By default this search is guided using an ``XGBoost Grid`` algorithm. # Depending on your model complexity and amount of time avilable, you might # want to choose a different algorithm. A full list is available by # consulting ``tvmc tune --help``. # # The output will look something like this for a consumer-level Skylake CPU: # # .. code-block:: bash # # tvmc tune \ # --target "llvm -mcpu=broadwell" \ # --output resnet50-v2-7-autotuner_records.json \ # resnet50-v2-7.onnx # # [Task 1/24] Current/Best: 9.65/ 23.16 GFLOPS | Progress: (60/1000) | 130.74 s Done. # # [Task 1/24] Current/Best: 3.56/ 23.16 GFLOPS | Progress: (192/1000) | 381.32 s Done. # # [Task 2/24] Current/Best: 13.13/ 58.61 GFLOPS | Progress: (960/1000) | 1190.59 s Done. # # [Task 3/24] Current/Best: 31.93/ 59.52 GFLOPS | Progress: (800/1000) | 727.85 s Done. # # [Task 4/24] Current/Best: 16.42/ 57.80 GFLOPS | Progress: (960/1000) | 559.74 s Done. # # [Task 5/24] Current/Best: 12.42/ 57.92 GFLOPS | Progress: (800/1000) | 766.63 s Done. # # [Task 6/24] Current/Best: 20.66/ 59.25 GFLOPS | Progress: (1000/1000) | 673.61 s Done. # # [Task 7/24] Current/Best: 15.48/ 59.60 GFLOPS | Progress: (1000/1000) | 953.04 s Done. # # [Task 8/24] Current/Best: 31.97/ 59.33 GFLOPS | Progress: (972/1000) | 559.57 s Done. # # [Task 9/24] Current/Best: 34.14/ 60.09 GFLOPS | Progress: (1000/1000) | 479.32 s Done. # # [Task 10/24] Current/Best: 12.53/ 58.97 GFLOPS | Progress: (972/1000) | 642.34 s Done. # # [Task 11/24] Current/Best: 30.94/ 58.47 GFLOPS | Progress: (1000/1000) | 648.26 s Done. # # [Task 12/24] Current/Best: 23.66/ 58.63 GFLOPS | Progress: (1000/1000) | 851.59 s Done. # # [Task 13/24] Current/Best: 25.44/ 59.76 GFLOPS | Progress: (1000/1000) | 534.58 s Done. # # [Task 14/24] Current/Best: 26.83/ 58.51 GFLOPS | Progress: (1000/1000) | 491.67 s Done. # # [Task 15/24] Current/Best: 33.64/ 58.55 GFLOPS | Progress: (1000/1000) | 529.85 s Done. # # [Task 16/24] Current/Best: 14.93/ 57.94 GFLOPS | Progress: (1000/1000) | 645.55 s Done. # # [Task 17/24] Current/Best: 28.70/ 58.19 GFLOPS | Progress: (1000/1000) | 756.88 s Done. # # [Task 18/24] Current/Best: 19.01/ 60.43 GFLOPS | Progress: (980/1000) | 514.69 s Done. # # [Task 19/24] Current/Best: 14.61/ 57.30 GFLOPS | Progress: (1000/1000) | 614.44 s Done. # # [Task 20/24] Current/Best: 10.47/ 57.68 GFLOPS | Progress: (980/1000) | 479.80 s Done. # # [Task 21/24] Current/Best: 34.37/ 58.28 GFLOPS | Progress: (308/1000) | 225.37 s Done. # # [Task 22/24] Current/Best: 15.75/ 57.71 GFLOPS | Progress: (1000/1000) | 1024.05 s Done. # # [Task 23/24] Current/Best: 23.23/ 58.92 GFLOPS | Progress: (1000/1000) | 999.34 s Done. # # [Task 24/24] Current/Best: 17.27/ 55.25 GFLOPS | Progress: (1000/1000) | 1428.74 s Done. # # Tuning sessions can take a long time, so ``tvmc tune`` offers many options to customize your tuning # process, in terms of number of repetitions (``--repeat`` and ``--number``, for example), the tuning # algorithm to be used, and so on. Check ``tvmc tune --help`` for more information. # ################################################################################ # Compiling an Optimized Model with Tuning Data # ---------------------------------------------- # # As an output of the tuning process above, we obtained the tuning records # stored in ``resnet50-v2-7-autotuner_records.json``. This file can be used in # two ways: # # - As input to further tuning (via ``tvmc tune --tuning-records``). # - As input to the compiler # # The compiler will use the results to generate high performance code for the # model on your specified target. To do that we can use ``tvmc compile # --tuning-records``. Check ``tvmc compile --help`` for more information. # # Now that tuning data for the model has been collected, we can re-compile the # model using optimized operators to speed up our computations. # # .. code-block:: bash # # tvmc compile \ # --target "llvm" \ # --tuning-records resnet50-v2-7-autotuner_records.json \ # --output resnet50-v2-7-tvm_autotuned.tar \ # resnet50-v2-7.onnx # # Verify that the optimized model runs and produces the same results: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # resnet50-v2-7-tvm_autotuned.tar # # python postprocess.py # # Verifying that the predictions are the same: # # .. code-block:: bash # # # class='n02123045 tabby, tabby cat' with probability=0.610550 # # class='n02123159 tiger cat' with probability=0.367181 # # class='n02124075 Egyptian cat' with probability=0.019365 # # class='n02129604 tiger, Panthera tigris' with probability=0.001273 # # class='n04040759 radiator' with probability=0.000261 ################################################################################ # Comparing the Tuned and Untuned Models # -------------------------------------- # # TVMC gives you tools for basic performance benchmarking between the models. # You can specify a number of repetitions and that TVMC report on the model run # time (independent of runtime startup). We can get a rough idea of how much # tuning has improved the model performance. For example, on a test Intel i7 # system, we see that the tuned model runs 47% faster than the untuned model: # # .. code-block:: bash # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # --print-time \ # --repeat 100 \ # resnet50-v2-7-tvm_autotuned.tar # # # Execution time summary: # # mean (ms) max (ms) min (ms) std (ms) # # 92.19 115.73 89.85 3.15 # # tvmc run \ # --inputs imagenet_cat.npz \ # --output predictions.npz \ # --print-time \ # --repeat 100 \ # resnet50-v2-7-tvm.tar # # # Execution time summary: # # mean (ms) max (ms) min (ms) std (ms) # # 193.32 219.97 185.04 7.11 # ################################################################################ # Final Remarks # ------------- # # In this tutorial, we presented TVMC, a command line driver for TVM. We # demonstrated how to compile, run, and tune a model. We also discussed the # need for pre and post-processing of inputs and outputs. After the tuning # process, we demonstrated how to compare the performance of the unoptimized # and optimize models. # # Here we presented a simple example using ResNet-50 v2 locally. However, TVMC # supports many more features including cross-compilation, remote execution and # profiling/benchmarking. # # To see what other options are available, please have a look at ``tvmc # --help``. # # In the next tutorial, `Compiling and Optimizing a Model with the Python # Interface <auto_tuning_with_pyton>`_, we will cover the same compilation # and optimization steps using the Python interface.
https://github.com/zk-ml/tachikoma
gallery/tutorial/tvmc_python.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Getting Starting using TVMC Python: a high-level API for TVM ============================================================= **Author**: `Jocelyn Shiue <https://github.com/CircleSpin>`_ Hi! Here we explain the scripting tool designed for the complete TVM beginner. πŸ™‚ Before we get started let's get an example model if you don't already have one. Follow the steps to download a resnet model via the terminal: .. code-block:: python mkdir myscripts cd myscripts wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx mv resnet50-v2-7.onnx my_model.onnx touch tvmcpythonintro.py Let's start editing the python file in your favorite text editor. """ # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ################################################################################ # Step 0: Imports # ~~~~~~~~~~~~~~~ # # .. code-block:: python # # from tvm.driver import tvmc # # ################################################################################ # Step 1: Load a model # ~~~~~~~~~~~~~~~~~~~~ # # Let's import our model into tvmc. This step converts a machine learning model from # a supported framework into TVM's high level graph representation language called Relay. # This is to have a unified starting point for all models in tvm. The frameworks we currently # support are: Keras, ONNX, Tensorflow, TFLite, and PyTorch. # # .. code-block:: python # # model = tvmc.load('my_model.onnx') #Step 1: Load # # If you'd like to see the Relay, you can run: # ``model.summary()`` # # All frameworks support overwriting the input shapes with a shape_dict argument. # For most frameworks this is optional, but for Pytorch this is necessary as # TVM cannot automatically search for it. # # .. code-block:: python # # #model = tvmc.load('my_model.onnx', shape_dict={'input1' : [1, 2, 3, 4], 'input2' : [1, 2, 3, 4]}) #Step 1: Load + shape_dict # # A suggested way to see the model's input/shape_dict is via `netron <https://netron.app/>`_. After opening the model, # click the first node to see the name(s) and shape(s) in the inputs section. ################################################################################ # Step 2: Compile # ~~~~~~~~~~~~~~~ # # Now that our model is in Relay, our next step is to compile it to a desired # hardware to run on. We refer to this hardware as a target. This compilation process # translates the model from Relay into a lower-level language that the # target machine can understand. # # In order to compile a model a tvm.target string is required. # To learn more about tvm.targets and their options look at the `documentation <https://tvm.apache.org/docs/api/python/target.html>`_. # Some examples include: # # 1. cuda (Nvidia GPU) # 2. llvm (CPU) # 3. llvm -mcpu=cascadelake (Intel CPU) # # .. code-block:: python # # package = tvmc.compile(model, target="llvm") #Step 2: Compile # # # The compilation step returns a package. # ################################################################################ # Step 3: Run # ~~~~~~~~~~~ # # The compiled package can now be run on the hardware target. The device # input options are: CPU, Cuda, CL, Metal, and Vulkan. # # .. code-block:: python # # result = tvmc.run(package, device="cpu") #Step 3: Run # # And you can print the results: # ``print(result)`` # ################################################################################ # Step 1.5: Tune [Optional & Recommended] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Run speed can further be improved by tuning. This optional step uses # machine learning to look at each operation within a model (a function) and # tries to find a faster way to run it. We do this through a cost model, and # benchmarking possible schedules. # # The target is the same as compile. # # .. code-block:: python # # tvmc.tune(model, target="llvm") #Step 1.5: Optional Tune # # The terminal output should look like: # # .. code-block:: python # # [Task 1/13] Current/Best: 82.00/ 106.29 GFLOPS | Progress: (48/769) | 18.56 s # [Task 1/13] Current/Best: 54.47/ 113.50 GFLOPS | Progress: (240/769) | 85.36 s # ..... # # There may be UserWarnings that can be ignored. # This should make the end result faster, but it can take hours to tune. # # See the section 'Saving the Tuning Results' below. Be sure to pass the tuning # results into compile if you want the results to apply. # # .. code-block:: python # # #tvmc.compile(model, target="llvm", tuning_records = "records.log") #Step 2: Compile ################################################################################ # Save and then start the process in the terminal: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # .. code-block:: python # # python my_tvmc_script.py # # Note: Your fans may become very active # ################################################################################ # Example results: # ~~~~~~~~~~~~~~~~ # # .. code-block:: python # # Time elapsed for training: 18.99 s # Execution time summary: # mean (ms) max (ms) min (ms) std (ms) # 25.24 26.12 24.89 0.38 # # # Output Names: # ['output_0'] # ################################################################################ # Additional TVMC Functionalities # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ################################################################################ # Saving the model # ~~~~~~~~~~~~~~~~ # # To make things faster for later, after loading the model (Step 1) save the Relay version. # The model will then appear where you saved it for later in the coverted syntax. # # .. code-block:: python # # model = tvmc.load('my_model.onnx') #Step 1: Load # model.save(desired_model_path) # # ################################################################################ # Saving the package # ~~~~~~~~~~~~~~~~~~ # # After the model has been compiled (Step 2) the package also is also saveable. # # .. code-block:: python # # tvmc.compile(model, target="llvm", package_path="whatever") #Step 2: Compile # # new_package = tvmc.TVMCPackage(package_path="whatever") # result = tvmc.run(new_package, device="cpu") #Step 3: Run # # ################################################################################ # Using Autoscheduler # ~~~~~~~~~~~~~~~~~~~ # # Use the next generation of tvm to enable potentially faster run speed results. # The search space of the schedules is automatically generated unlike # previously where they needed to be hand written. (Learn more: # `1 <https://tvm.apache.org/2021/03/03/intro-auto-scheduler>`_, # `2 <https://arxiv.org/abs/2006.06762>`_) # # .. code-block:: python # # tvmc.tune(model, target="llvm", enable_autoscheduler = True) # # ################################################################################ # Saving the tuning results # ~~~~~~~~~~~~~~~~~~~~~~~~~ # # The tuning results can be saved in a file for later reuse. # # Method 1: # .. code-block:: python # # log_file = "hello.json" # # # Run tuning # tvmc.tune(model, target="llvm", tuning_records=log_file) # # ... # # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm", prior_records=log_file) # # Method 2: # .. code-block:: python # # # Run tuning # tuning_records = tvmc.tune(model, target="llvm") # # ... # # # Later run tuning and reuse tuning results # tvmc.tune(model, target="llvm", prior_records=tuning_records) # ################################################################################ # Tuning a more complex model: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # If you notice T's printing that look like ``.........T.T..T..T..T.T.T.T.T.T.`` # increase the searching time frame: # # .. code-block:: python # # tvmc.tune(model,trials=10000,timeout=10,) # ################################################################################ # Compiling a model for a remote device: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # A remote procedural call (RPC) is useful when you would like to compile for hardware # that is not on your local machine. The tvmc methods support this. # To set up the RPC server take a look at the 'Set up RPC Server on Device' # section in this `document <https://tvm.apache.org/docs/tutorials/get_started/cross_compilation_and_rpc.html>`_. # # Within the TVMC Script include the following and adjust accordingly: # # .. code-block:: python # # tvmc.tune( # model, # target=target, # Compilation target as string // Device to compile for # target_host=target_host, # Host processor # hostname=host_ip_address, # The IP address of an RPC tracker, used when benchmarking remotely. # port=port_number, # The port of the RPC tracker to connect to. Defaults to 9090. # rpc_key=your_key, # The RPC tracker key of the target device. Required when rpc_tracker is provided # ) #
https://github.com/zk-ml/tachikoma
gallery/tutorial/uma.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ .. _tutorial-uma: Making your Hardware Accelerator TVM-ready with UMA =================================================== **Authors**: `Michael J. Klaiber <https://github.com/MichaelJKlaiber>`_, `Christoph Gerum <https://github.com/cgerum>`_, `Paul Palomero Bernardo <https://github.com/PaulPalomeroBernardo/>`_ """ ###################################################################### # This is an introductory tutorial to the **Universal Modular Accelerator Interface** (UMA). # UMA provides an easy-to-use API to integrate new hardware accelerators into TVM. # # This tutorial gives you step-by-step guidance how to use UMA to # make your hardware accelerator TVM-ready. # While there is no one-fits-all solution for this problem, UMA targets to provide a stable and Python-only # API to integrate a number of hardware accelerator classes into TVM. # # # In this tutorial you will get to know the UMA API in three use cases of increasing complexity. # In these use case the three mock-accelerators # **Vanilla**, **Strawberry** and **Chocolate** are introduced and # integrated into TVM using UMA. # # sphinx_gallery_start_ignore from tvm import testing testing.utils.install_request_hook(depth=3) # sphinx_gallery_end_ignore ###################################################################### # Vanilla # ------------- # **Vanilla** is a simple accelerator consisting of a MAC array and has no internal memory. # It is can ONLY process Conv2D layers, all other layers are executed on a CPU, that also orchestrates **Vanilla**. # Both the CPU and Vanilla use a shared memory. # ###################################################################### # .. image:: https://raw.githubusercontent.com/apache/tvm-site/main/images/tutorial/uma_vanilla_block_diagram.png # :width: 100% # :alt: A block diagram of Vanilla # ###################################################################### # **Vanilla** has a C interface ``vanilla_conv2dnchw(...)``` for carrying out a Conv2D operation (including same-padding), # that accepts pointers to input feature map, weights and result, # as well as the dimensions of `Conv2D`: `oc`, `iw`, `ih`, `ic`, `kh`, `kw`. # # .. code-block:: c++ # # int vanilla_conv2dnchw(float* ifmap, float* weights, float* result, int oc, int iw, int ih, int ic, int kh, int kw); ################################################################################ # The script `uma_cli` creates code skeletons with API-calls into the UMA-API for new accelerators. # # For **Vanilla** we use it as follows: (``--tutorial vanilla`` adds all the additional files required for this part of the tutorial) # # .. code-block:: bash # # pip install inflection # cd $TVM_HOME/apps/uma # python uma_cli.py --add_hardware vanilla_accelerator --tutorial vanilla # ################################################################################ # uma_cli.py generates these files in the directory ``vanilla_accelerator`` which we are going to revist. # # .. code-block:: bash # # backend.py # codegen.py # conv2dnchw.cc # passes.py # patterns.py # run.py # strategies.py ################################################################################ # Vanilla backend # # The generated backend for vanilla is found in `vanilla_accelerator/backend.py`: ###################################################################### # # .. code-block:: python # # class VanillaAcceleratorBackend(UMABackend): # """UMA backend for VanillaAccelerator.""" # # def __init__(self): # super().__init__() # # self._register_pattern("conv2d", conv2d_pattern()) # self._register_tir_pass(PassPhase.TIR_PHASE_0, VanillaAcceleratorConv2DPass()) # self._register_codegen(fmt="c", includes=gen_includes) # # @property # def target_name(self): # return "vanilla_accelerator" ################################################################################ # Define offloaded patterns # # To specify that `Conv2D` is offloaded to **Vanilla**, it is described as Relay dataflow pattern # (`DFPattern <https://tvm.apache.org/docs/reference/langref/relay_pattern.html>`_) in `vanilla_accelerator/patterns.py` ################################################################################ # # .. code-block:: python # # def conv2d_pattern(): # pattern = is_op("nn.conv2d")(wildcard(), wildcard()) # pattern = pattern.has_attr({"strides": [1, 1]}) # return pattern ################################################################################ # To map **Conv2D** operations from the input graph to **Vanilla**'s # low level function call ``vanilla_conv2dnchw(...)``, the TIR pass # *VanillaAcceleratorConv2DPass* (that will be discussed later in this tutorial) # is registered in `VanillaAcceleratorBackend`. ################################################################################ # Codegen ################################################################################ # The file ``vanilla_accelerator/codegen.py`` defines static C-code that is added to the # resulting C-Code generated by TVMΕ› C-Codegen in ``gen_includes``. # Here C-code is added to include **Vanilla**'s low level library``vanilla_conv2dnchw()``. # # .. code-block:: python # # def gen_includes() -> str: # topdir = pathlib.Path(__file__).parent.absolute() # # includes = "" # includes += f'#include "{topdir}/conv2dnchw.cc"' # return includes ################################################################################ # As shown above in `VanillaAcceleratorBackend` it is registered to UMA with # the `self._register_codegen` # # .. code-block:: python # # self._register_codegen(fmt="c", includes=gen_includes) ########################################################### # Building the Neural Network and run it on Vanilla # # To demonstrate UMA's functionality, we will generate C code for a single Conv2D layer and run it on # the Vanilla accelerator. # The file ``vanilla_accelerator/run.py`` provides a demo running a Conv2D layer # making use of Vanilla's C-API. # # # .. code-block:: python # # def main(): # mod, inputs, output_list, runner = create_conv2d() # # uma_backend = VanillaAcceleratorBackend() # uma_backend.register() # mod = uma_backend.partition(mod) # target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c")) # # export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path # print(f"Generated files are in {export_directory}") # compile_and_run( # AOTModel(module=mod, inputs=inputs, outputs=output_list), # runner, # interface_api="c", # use_unpacked_api=True, # target=target, # test_dir=str(export_directory), # ) # # # main() ############################################################ # By running ``vanilla_accelerator/run.py`` the output files are generated in the model library format (MLF). # ########################################################### # Output: # # .. code-block:: bash # # Generated files are in /tmp/tvm-debug-mode-tempdirs/2022-07-13T13-26-22___x5u76h0p/00000 ########################################################### # Let's examine the generated files: # # # Output: # # .. code-block:: bash # # cd /tmp/tvm-debug-mode-tempdirs/2022-07-13T13-26-22___x5u76h0p/00000 # cd build/ # ls -1 # # codegen # lib.tar # metadata.json # parameters # runtime # src ########################################################### # To evaluate the generated C code go to ``codegen/host/src/default_lib2.c`` # # .. code-block:: bash # # cd codegen/host/src/ # ls -1 # # default_lib0.c # default_lib1.c # default_lib2.c # ########################################################### # In `default_lib2.c` you can now see that the generated code calls # into Vanilla's C-API and executes a Conv2D layer: # # .. code-block:: c++ # # TVM_DLL int32_t tvmgen_default_vanilla_accelerator_main_0(float* placeholder, float* placeholder1, float* conv2d_nchw, uint8_t* global_workspace_1_var) { # vanilla_accelerator_conv2dnchw(placeholder, placeholder1, conv2d_nchw, 32, 14, 14, 32, 3, 3); # return 0; # } # ########################################################### # Strawberry # --------------- # Coming soon ... ########################################################### # Chocolate # -------------- # Coming soon ... # ###################################################################### # Request for Community Input # ----------------------------- # If this tutorial **did not** fit to your accelerator, lease add your requirements to the UMA thread in # the TVM discuss forum: `Link <https://discuss.tvm.apache.org/t/rfc-uma-universal-modular-accelerator-interface/12039>`_. # We are eager to extend this tutorial to provide guidance on making further classes of AI hardware # accelerators TVM-ready using the UMA interface. # ###################################################################### # References # ----------- # [UMA-RFC] `UMA: Universal Modular Accelerator Interface <https://github.com/apache/tvm-rfcs/blob/main/rfcs/0060_UMA_Unified_Modular_Accelerator_Interface.md>`_, # TVM RFC, June 2022. # # [DFPattern] `Pattern Matching in Relay <https://tvm.apache.org/docs/reference/langref/relay_pattern.html>`_ #
https://github.com/zk-ml/tachikoma
golang/sample/deploy.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Get Started with TVM Go ======================= """ from __future__ import absolute_import, print_function import tvm from tvm import te import numpy as np # Global declarations of environment. tgt = "llvm" ###################################################################### # Describe the Computation # ------------------------ n = te.var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") ###################################################################### # Schedule the Computation # ------------------------ s = te.create_schedule(C.op) ###################################################################### # Compilation # ----------- fadd = tvm.build(s, [A, B, C], tgt, name="myadd") ###################################################################### # Save Compiled Module # -------------------- from tvm.contrib import cc from tvm.contrib import utils fadd.save("deploy.o") cc.create_shared("deploy.so", ["deploy.o"])
https://github.com/zk-ml/tachikoma
golang/sample/gen_mobilenet_lib.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from tvm import relay, transform, runtime from tvm.contrib.download import download_testdata ################################################ # Utils for downloading and extracting zip files # ---------------------------------------------- def extract(path): import tarfile if path.endswith("tgz") or path.endswith("gz"): dir_path = os.path.dirname(path) tar = tarfile.open(path) tar.extractall(path=dir_path) tar.close() else: raise RuntimeError("Could not decompress the file: " + path) ################################### # Download TFLite pre-trained model # --------------------------------- model_url = "https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz" model_path = download_testdata(model_url, "mobilenet_v2_1.4_224.tgz", module=["tf", "official"]) model_dir = os.path.dirname(model_path) extract(model_path) # now we have mobilenet_v2_1.4_224.tflite on disk model_file = os.path.join(model_dir, "mobilenet_v2_1.4_224.tflite") # get TFLite model from buffer tflite_model_buf = open(model_file, "rb").read() try: import tflite tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0) except AttributeError: import tflite.Model tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0) ############################## # Load Neural Network in Relay # ---------------------------- # TFLite input tensor name, shape and type input_tensor = "input" input_shape = (1, 224, 224, 3) input_dtype = "float32" # parse TFLite model and convert into Relay computation graph mod, params = relay.frontend.from_tflite( tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype} ) ############# # Compilation # ----------- target = "llvm" # Build with Relay with transform.PassContext(opt_level=3): graph, lib, params = relay.build_module.build(mod, target, params=params) ############################################### # Save the graph, lib and parameters into files # --------------------------------------------- lib.export_library("./mobilenet.so") print("lib export succeefully") with open("./mobilenet.json", "w") as fo: fo.write(graph) with open("./mobilenet.params", "wb") as fo: fo.write(runtime.save_param_dict(params))
https://github.com/zk-ml/tachikoma
golang/src/gotvm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief gotvm native interface declaration. * \file gotvm.h * * These declarations are in cgo interface definition while calling API * across golang and native C boundaries. */ #ifndef GOTVM_GOTVM_H_ #define GOTVM_GOTVM_H_ #ifdef __cplusplus extern "C" { #endif #include <dlpack/dlpack.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <tvm/runtime/c_runtime_api.h> // Some type definitions for golang "C" typedef void* native_voidp; // Version extern char* _TVM_VERSION(void); // Wrappers : For incompatible cgo API. // To handle array of strings wrapped into __gostring__ extern int _TVMFuncListGlobalNames(void*); // To handle TVMValue slice to/from native sequential TVMValue array. extern void _TVMValueNativeSet(void* to, void* from, int index); extern void _TVMValueNativeGet(void* to, void* from, int index); // Callbacks extern int _ConvertFunction(void* fptr, void* funp); #ifdef __cplusplus } #endif #endif // GOTVM_GOTVM_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/analyzer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/analyzer.h * \brief Algebra expression simplifications. */ #ifndef TVM_ARITH_ANALYZER_H_ #define TVM_ARITH_ANALYZER_H_ #include <tvm/arith/int_set.h> #include <tvm/ir/expr.h> #include <tvm/support/with.h> #include <limits> #include <memory> #include <unordered_map> #include <vector> namespace tvm { /*! \brief namespace of arithmetic analysis. */ namespace arith { //------------------------------------------------------- // Base integer analysis API. // // We have multiple type of analyzers to do relaxed // integer set analysis(bound analysis, modulo) and // equivalence checking and simplification. // // Importantly, each analyzer may need result from // another analyzer. //------------------------------------------------------- // Forward declare Analyzer class Analyzer; using tir::Var; enum DivMode { /*! \brief Truncated division. */ kTruncDiv, /*! \brief Floor division. */ kFloorDiv }; /*! * \brief Constant integer up and lower bound(inclusive). * Useful for value bound analysis. * * set = [min_value, max_value] */ class ConstIntBoundNode : public Object { public: int64_t min_value; int64_t max_value; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("min_value", &min_value); v->Visit("max_value", &max_value); } bool SEqualReduce(const ConstIntBoundNode* other, SEqualReducer equal) const { return equal(min_value, other->min_value) && equal(max_value, other->max_value); } /*! \brief Number to represent +inf */ static const constexpr int64_t kPosInf = std::numeric_limits<int64_t>::max(); /*! * \brief Number to represent -inf * \note We can make use the of fact that -kPosInf == kNegInf in the project. */ static const constexpr int64_t kNegInf = -kPosInf; static constexpr const char* _type_key = "arith.ConstIntBound"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstIntBoundNode, Object); }; /*! * \brief reference class to ConstIntBoundNode * \sa ConstIntBoundNode */ class ConstIntBound : public ObjectRef { public: /*! * \brief constructor by fields. * \param min_value The mininum value. * \param max_value The maximum value. */ TVM_DLL ConstIntBound(int64_t min_value, int64_t max_value); static const constexpr int64_t kPosInf = ConstIntBoundNode::kPosInf; static const constexpr int64_t kNegInf = ConstIntBoundNode::kNegInf; TVM_DEFINE_OBJECT_REF_METHODS(ConstIntBound, ObjectRef, ConstIntBoundNode); }; /*! * \brief Analyzer to get constant integer bound over expression. */ class ConstIntBoundAnalyzer { public: using BoundMapType = std::unordered_map<PrimExpr, ConstIntBound, ObjectPtrHash, ObjectPtrEqual>; /*! * \brief analyze the expr * \param expr The expression of interest. * \return the result of the analysis. */ TVM_DLL ConstIntBound operator()(const PrimExpr& expr) const; /*! * \brief analyze the expr with the intermediate memorized to avoid redundant computation * \param expr The expression of interest. * \param bound The lookup table to store the intermediate results * \return the result of the analysis. */ TVM_DLL ConstIntBound operator()(const PrimExpr& expr, BoundMapType* bound); /*! * \brief Update constant int bound information of var. * * \param var The variable of interest. * \param info The bound information. * \param allow_override whether we allow override of existing information. */ TVM_DLL void Update(const Var& var, const ConstIntBound& info, bool allow_override = false); /*! * \brief Bind variable to a range. * * \param var The variable. * \param range The range we bind to. * \param allow_override Whether we allow overriding an existing var's range. */ TVM_DLL void Bind(const Var& var, const Range& range, bool allow_override = false); private: friend class Analyzer; friend class ConstraintContext; explicit ConstIntBoundAnalyzer(Analyzer* parent); TVM_DLL ~ConstIntBoundAnalyzer(); /*! * \brief Update the internal state to enter constraint. * \param constraint A constraint expression. * * \return an exit function that must be called to cleanup the constraint can be nullptr. */ std::function<void()> EnterConstraint(const PrimExpr& constraint); struct Entry; class Impl; /*! \brief Internal impl */ Impl* impl_; }; /*! * \brief Range of a linear integer function. * Use to do specify the possible index values. * * set = { coeff * x + base | x in Z } * * When coeff != 0, it can also be written as * set = { n | n % coeff == base } * * This is useful to decide if the index is dividable by certain value. * For example, if index = 0 + 4 x, then we know it can be divided by 4. */ class ModularSetNode : public Object { public: /*! \brief linear co-efficient */ int64_t coeff; /*! \brief The base */ int64_t base; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("coeff", &coeff); v->Visit("base", &base); } bool SEqualReduce(const ModularSetNode* other, SEqualReducer equal) const { return equal(coeff, other->coeff) && equal(base, other->base); } static constexpr const char* _type_key = "arith.ModularSet"; TVM_DECLARE_FINAL_OBJECT_INFO(ModularSetNode, Object); }; /*! * \brief reference of ModularSetNode * \sa ModularSetNode */ class ModularSet : public ObjectRef { public: TVM_DLL ModularSet(int64_t coeff, int64_t base); TVM_DEFINE_OBJECT_REF_METHODS(ModularSet, ObjectRef, ModularSetNode); }; /*! * \brief Analyzer to get modular information over expression. */ class ModularSetAnalyzer { public: /*! * \brief analyze the expr * \param expr The expression of interest. * \return the result of the analysis. */ TVM_DLL ModularSet operator()(const PrimExpr& expr); /*! * \brief Update constant int bound information of var. * * \param var The variable of interest. * \param info The bound information. * \param allow_override whether we allow override of existing information. */ TVM_DLL void Update(const Var& var, const ModularSet& info, bool allow_override = false); private: friend class Analyzer; friend class ConstraintContext; explicit ModularSetAnalyzer(Analyzer* parent); TVM_DLL ~ModularSetAnalyzer(); /*! * \brief Update the internal state to enter constraint. * \param constraint A constraint expression. * * \return an exit function that must be called to cleanup the constraint can be nullptr. */ std::function<void()> EnterConstraint(const PrimExpr& constraint); struct Entry; class Impl; /*! \brief Internal impl */ Impl* impl_; }; /*! * \brief Rewrite-rule based simplifier. */ class RewriteSimplifier { public: /*! * \brief analyze the expr * \param expr The expression of interest. * \return the result of the analysis. */ TVM_DLL PrimExpr operator()(const PrimExpr& expr); /*! * \brief Update binding of var to a new expression. * * \param var The variable of interest. * \param new_expr * \param allow_override Whether we allow override of existing information. */ TVM_DLL void Update(const Var& var, const PrimExpr& new_expr, bool allow_override = false); /*! * \brief Update the internal state to enter constraint. * \param constraint A constraint expression. * * \return an exit function that must be called to cleanup the constraint can be nullptr. */ TVM_DLL std::function<void()> EnterConstraint(const PrimExpr& constraint); /*! \brief Flags to enable more computationally-intensive simplifications * * These simplifications may be required for specific schedules, but * would impose too high a compile-time cost to enable by default. * They can be enabled on an as-needed basis by calling * `RewriteSimplifier::SetEnabledExtensions` prior to using * `RewriteSimplifier::operator()`. * * Flags are defined as powers of two to allow future expansion. To * enable multiple extensions, a user should pass a bitwise OR of the * flags for each desired extension. */ enum Extension { // No extensions enabled kNone = 0, /* When simplifying an inequality, attempt to use scope-based knowns. * * Example: * if_then_else(i<j && j<k, i<k, false) => if_then_else(i<j && j<k, true, false) */ kTransitivelyProveInequalities = (1 << 0), /* When simplifying a boolean expression, convert to an AND of ORs * (conjunctive normal form). * * Example: * (a && b) || c => (a || c) && (b || c) */ kConvertBooleanToAndOfOrs = (1 << 1), /* When simplifying a boolean AND or a boolean OR, simplify each * branch under the assumption that the other branch does not * already dominate the result. That is, simplify each branch of * (A && B) under the assumption that the other branch is true, * and simplify each branch of (A || B) under the assumption that * the other branch is false. * * Example: * (n < 10) && (n < 5) => (n < 10) * (n < 10) || (n < 5) => (n < 5) */ kApplyConstraintsToBooleanBranches = (1 << 2), }; /*! \brief Enable an optional extension or extensions * * \param flags A bitwise OR of all optional extensions that should * be enabled. */ TVM_DLL void SetEnabledExtensions(Extension flags); /*! \brief Return the currently enabled extensions */ TVM_DLL Extension GetEnabledExtensions() const; private: friend class Analyzer; friend class ConstraintContext; friend class CanonicalSimplifier; explicit RewriteSimplifier(Analyzer* parent); TVM_DLL ~RewriteSimplifier(); class Impl; /*! \brief Internal impl */ Impl* impl_; }; /*! * \brief Canonical-form based simplifier. */ class CanonicalSimplifier { public: /*! * \brief analyze the expr * \param expr The expression of interest. * \return the result of the analysis. */ TVM_DLL PrimExpr operator()(const PrimExpr& expr); /*! * \brief Update binding of var to a new expression. * * \param var The variable of interest. * \param new_expr * \param allow_override whether we allow override of existing information. */ TVM_DLL void Update(const Var& var, const PrimExpr& new_expr, bool allow_override = false); private: friend class Analyzer; friend class ConstraintContext; explicit CanonicalSimplifier(Analyzer* parent); TVM_DLL ~CanonicalSimplifier(); class Impl; /*! \brief Internal impl */ Impl* impl_; }; /*! \brief Structure for representing result of known * * Values are assigned to allow these flags to be used in bitwise * operations. */ enum class CompareResult : int { kInconsistent = 0, kEQ = 1, kLT = 2, kLE = 3, kGT = 4, kGE = 5, kNE = 6, kUnknown = 7 }; inline constexpr CompareResult operator&(CompareResult lhs, CompareResult rhs) { return CompareResult(static_cast<int>(lhs) & static_cast<int>(rhs)); } inline constexpr CompareResult operator|(CompareResult lhs, CompareResult rhs) { return CompareResult(static_cast<int>(lhs) | static_cast<int>(rhs)); } /*! * \brief Using previously specified knowns, compare the expressions provided * * Given known expressions [(a OP b), (b OP c), ..., (y OP z)], search * for a known result for `(a OP z)`. */ class TransitiveComparisonAnalyzer { public: /* \brief Using previously specified knowns, compare the expressions provided * * \param lhs The left-hand side of the comparison * * \param rhs The right-hand side of the comparison * * \param propagate_inequalities If true, attempt to find a sequence * of transitive inequalities that allow the lhs and rhs to be * compared. If false, only use the known comparison that have been * directly provided. Using `propagate_inequalities = false` is * roughly equivalent to comparing against all known inequality * expressions using `ExprDeepEqual`, but also allows for constant * offsets on either side of the inequality. * * \return The most specific result that can be proven about the * comparison. If nothing can be proven, returns kUnknown. */ TVM_DLL CompareResult TryCompare(const PrimExpr& lhs, const PrimExpr& rhs, bool propagate_inequalities = true); /*! \brief Bind a variable as being equal to a known expression * * \param var The variable of interest. * \param expr The bound expression * \param allow_override Whether to allow override of existing information. */ TVM_DLL void Bind(const Var& var, const PrimExpr& expr, bool allow_override = false); /*! \brief Bind a variable as being within a specified range * * \param var The variable of interest. * \param range The known range * \param allow_override Whether to allow override of existing information. */ TVM_DLL void Bind(const Var& var, const Range& range, bool allow_override = false); /*! * \brief Update the internal state to enter constraint. * \param constraint A constraint expression. * * \return an exit function that must be called to cleanup the constraint can be nullptr. */ TVM_DLL std::function<void()> EnterConstraint(const PrimExpr& constraint); private: friend class Analyzer; friend class ConstraintContext; TransitiveComparisonAnalyzer(); TVM_DLL ~TransitiveComparisonAnalyzer(); class Impl; /*! \brief Internal impl */ std::unique_ptr<Impl> impl_; }; /*! * \brief Constraint context. * * \code * * Var("x"); * arith::Analyzer analyzer; * { * With<arith::ConstraintContext> scope(&analyzer, x % 3 == 0); * ICHECK_EQ(analyzer.modular_set(x)->coeff, 3); * } * // constraint no longer in effect. * ICHECK_NE(analyzer.modular_set(x)->coeff, 3); * * \endcode */ class ConstraintContext { private: // declare friend to enable with. friend class With<ConstraintContext>; /*! * \brief Construct a constraint context. * \param analyzer The analyzer. * \param constraint The constraint to be applied. */ ConstraintContext(Analyzer* analyzer, PrimExpr constraint) : analyzer_(analyzer), constraint_(constraint) {} // enter the scope. void EnterWithScope(); // exit the scope. void ExitWithScope(); /*! \brief The analyzer */ Analyzer* analyzer_; /*! \brief The constraint */ PrimExpr constraint_; /*! \brief function to be called in recovery */ std::vector<std::function<void()>> recovery_functions_; }; /*! * \brief Integer set analyzer. */ class IntSetAnalyzer { public: /*! * \brief Find a symbolic integer set that contains all possible values of * expr given the domain of each variables. * * \param expr The expression of interest. * \param dom_map The domain map to indicate which variable to relax. * \return the result of the analysis. */ TVM_DLL IntSet operator()(const PrimExpr& expr, const Map<Var, IntSet>& dom_map); /*! * \brief Find a symbolic integer set that contains all possible * values of expr given the domain of each variables, using * the domain map defined by bound variables. * * \param expr The expression of interest. * \return the result of the analysis. */ TVM_DLL IntSet operator()(const PrimExpr& expr); /*! * \brief Update binding of var to a new expression. * * \param var The variable of interest. * \param new_interval_set The set of allowed values for this var. * \param allow_override whether we allow override of existing information. */ TVM_DLL void Update(const Var& var, const IntSet& new_interval_set, bool allow_override = false); /*! * \brief Update binding of var to a new expression. * * \param var The variable of interest. * \param new_range The range of allowed values for this var. * \param allow_override whether we allow override of existing information. */ TVM_DLL void Bind(const Var& var, const Range& new_range, bool allow_override = false); std::function<void()> EnterConstraint(const PrimExpr& constraint); private: friend class Analyzer; explicit IntSetAnalyzer(Analyzer* parent); TVM_DLL ~IntSetAnalyzer(); class Impl; /*! \brief Internal impl */ Impl* impl_; }; /*! * \brief Analyzer that contains bunch of sub-analyzers. * * Each sub-analyzer can make use of another sub-analyzer * by weak reference of this. * * NOTE for sub-analyzer developers: * If the analyzer uses memoization, we need to clear the internal * cache when information about a Var has been overridden. */ class TVM_DLL Analyzer { public: /* * Disable copy constructor. */ Analyzer(const Analyzer&) = delete; Analyzer& operator=(const Analyzer&) = delete; /*! \brief sub-analyzer: const integer bound */ ConstIntBoundAnalyzer const_int_bound; /*! \brief sub-analyzer: modular set */ ModularSetAnalyzer modular_set; /*! \brief sub-analyzer rewrite simplify */ RewriteSimplifier rewrite_simplify; /*! \brief sub-analyzer canonical simplify */ CanonicalSimplifier canonical_simplify; /*! \brief sub-analyzer: int set */ IntSetAnalyzer int_set; /*! \brief sub-analyzer transitive comparisons */ TransitiveComparisonAnalyzer transitive_comparisons; /*! \brief constructor */ Analyzer(); /*! * \brief Notify all the sub-analyzers that var * is created and binded to expr. * * Each var can only be bound once. * * \param var The variable. * \param expr The expression we bind to. * \param allow_override Whether we allow overriding an existing var's * expression. This option should not be used if there is any dependency * between variables. */ void Bind(const Var& var, const PrimExpr& expr, bool allow_override = false); /*! * \brief Notify all the sub-analyzers that var * is created and binded to a range. * * Each var can only be binded once. * * \param var The variable. * \param range The range we bind to. * \param allow_override Whether we allow overriding an existing var's * expression. This option should not be used if there is any dependency * between variables. */ void Bind(const Var& var, const Range& range, bool allow_override = false); /*! * \brief Bind all the vars in the Map * * \param variables The {variable -> range} map. * \param allow_override Whether we allow overriding an existing var's * expression. This option should not be used if there is any dependency * between variables. */ void Bind(const Map<Var, Range>& variables, bool allow_override = false); /*! * \brief Whether can we prove expr >= val. * Non-negative proof is very useful in integer analysis * to lower divisions and mods given difference in trunc and ceil mode. * * \param expr The expression. * \param lower_bound The lower bound. * \return Whether we can prove it. * * \note Analyzer will call into sub-analyzers to get the result. */ bool CanProveGreaterEqual(const PrimExpr& expr, int64_t lower_bound); /*! * \brief Whether can we prove expr < val. * Non-negative proof is very useful in integer analysis * to lower divisions and mods given difference in trunc and ceil mode. * * \param expr The expression. * \param upper_bound The upper bound. * \return Whether we can prove it. * * \note Analyzer will call into sub-analyzers to get the result. */ bool CanProveLess(const PrimExpr& expr, int64_t upper_bound); /*! * \brief Whether can we prove lhs == rhs. * * \param lhs The input lhs. * \param rhs The input rhs. * \return Whether we can prove lhs == rhs. * * \note Analyzer will call into sub-analyzers to get the result. */ bool CanProveEqual(const PrimExpr& lhs, const PrimExpr& rhs); /*! * \brief Whether can we prove condition. * * \param cond The expression to be proved. * \return The result. * * \note Analyzer will call into sub-analyzers to get the result. */ bool CanProve(const PrimExpr& cond); /*! * \brief Simplify expr. * * \param expr The expression to be simplified. * \param steps The simplification runs in the order of * rewrite_simplify (step 1) -> canonical_simplify (step 2) -> * rewrite_simplify (step 3) -> canonical_simplify (step 4) -> ... * param steps controls how many steps to run. * Default is 2, i.e., rewrite_simplify + canonical_simplify. * \return The result. * * \note Analyzer will call into sub-analyzers to get the result. */ PrimExpr Simplify(const PrimExpr& expr, int steps = 2); }; } // namespace arith } // namespace tvm #endif // TVM_ARITH_ANALYZER_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/bound.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/bound.h * \brief Bound deducers. */ #ifndef TVM_ARITH_BOUND_H_ #define TVM_ARITH_BOUND_H_ #include <tvm/arith/int_set.h> #include <tvm/ir/expr.h> #include <tvm/tir/expr.h> #include <tvm/tir/stmt.h> #include <unordered_map> namespace tvm { namespace arith { using tir::Region; using tir::Stmt; using tir::Var; using tir::VarNode; /*! * \brief Deduce the bound of the target variable in a expression, * give the domain of each variables. Return undefined IntSet to * represent failure. * * \note The returned set may be smaller than set that * contains all possible values of v that satisfies the bound. * * \param v The target variable to be deduced. * \param cond The conditional expression. * \param hint_map The domain of variable, used to help deduce. * \param relax_map The domain of each variable, used to relax the domain, * The deduce bound must implies e for all value in relax_map * \return An integer set that always satisfies the condition. */ IntSet DeduceBound(PrimExpr v, PrimExpr cond, const Map<Var, IntSet>& hint_map, const Map<Var, IntSet>& relax_map); /*! * \brief Same as DeduceBound with unordered_map signature. * * \param v The target variable to be deduced. * \param cond The conditional expression. * \param hint_map The domain of variable, used to help deduce. * \param relax_map The domain of each variable, used to relax the domain, * The deduce bound mush implies e for all value in relax_map * \return An integer set that always satisfies the condition. */ IntSet DeduceBound(PrimExpr v, PrimExpr cond, const std::unordered_map<const VarNode*, IntSet>& hint_map, const std::unordered_map<const VarNode*, IntSet>& relax_map); /*! * \brief Infer a regular domain that covers all the calls or provides within the given statement. * \param body The given statement. * \param buffer The buffer to check the access info. * \param consider_loads If loads are considered. * \param consider_stores If stores are considered. * \return The domain that covers all the calls or provides within the given statement. */ Region DomainTouched(const Stmt& body, const tir::Buffer& buffer, bool consider_loads, bool consider_stores); } // namespace arith } // namespace tvm #endif // TVM_ARITH_BOUND_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/int_set.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/int_set.h * \brief Integer set */ #ifndef TVM_ARITH_INT_SET_H_ #define TVM_ARITH_INT_SET_H_ #include <tvm/ir/expr.h> #include <tvm/tir/expr.h> #include <unordered_map> namespace tvm { namespace arith { using tir::IterVar; using tir::Var; using tir::VarNode; class Analyzer; //----------------------------------------------- // Integer set data structure. // // This is a API build on top of the base // integer analysis API to provide set analysis. //------------------------------------------------ /*! * \brief Sign type of an integer expression. */ enum SignType { kPositive, kNegative, kZero, kUnknown }; /*! * \brief Base class of all Integer set containers. * represent a set of integers in one dimension. * \sa IntSet */ class IntSetNode : public Object { public: static constexpr const char* _type_key = "IntSet"; static constexpr bool _type_has_method_sequal_reduce = false; TVM_DECLARE_BASE_OBJECT_INFO(IntSetNode, Object); }; /*! * \brief Managed reference to IntSetNode. * \sa IntSetNode */ class IntSet : public ObjectRef { public: /*! * \brief Find a range that covers the region. * \param max_range The range to be covered. * \return The covering range. */ Range CoverRange(Range max_range) const; /*! \return Lower bound of the set */ PrimExpr min() const; /*! \return upper bound of the set */ PrimExpr max() const; /*! \return The sign of the elements in the integer set */ SignType GetSignType() const; /*! \return Whether the set represent nothing */ bool IsNothing() const; /*! \return Whether the set represent everything */ bool IsEverything() const; /*! \return Whether the set is a single point */ bool IsSinglePoint() const; /*! \return Whether the set is proved to be bigger than 0 */ bool CanProvePositive() const; /*! \return Whether the set is proved to be smaller than 0 */ bool CanProveNegative() const; /*! \return Whether the set is proved to be smaller than or equal to 0 */ bool CanProveNonPositive() const; /*! \return Whether the set is proved to be larger than or equal to 0 */ bool CanProveNonNegative() const; /*! \return Whether the set has upper bound. */ bool HasUpperBound() const; /*! \return Whether the set has lower bound. */ bool HasLowerBound() const; /*! * \brief The single point value, call only if IsSinglePoint is true * \return The point value. */ PrimExpr PointValue() const; /*! * \brief Try to match IntSet with range r. * * \note It is guanrateed that IntSet::FromRange(r).MatchRange(r) == true * \return true if we can prove they are the same. */ bool MatchRange(const tvm::Range& r) const; /*! \return The set contains nothing */ static IntSet Nothing(); /*! \return The set contains everything */ static IntSet Everything(); /*! * \brief construct a point set. * \param point The point in the set. * \return construct a single point set */ static IntSet SinglePoint(PrimExpr point); /*! * \brief construct a integer set from vector expression. * \param vec The vector expression, can also be single point. * \return The result set containing the indices in the vector. */ static IntSet Vector(PrimExpr vec); /*! * \brief Construct a set representing a range [min, min + extent). * \param min The minimum of the range range * \param extent The extent of the range. * \return The constructed set. */ static IntSet FromMinExtent(PrimExpr min, PrimExpr extent); /*! * \brief Construct a set representing a range. * \param r The range * \return The constructed set. */ static IntSet FromRange(tvm::Range r); /*! * \brief Construct a set representing a interval. * \param min The minimum value of the interval. * \param max The maximum value of the interval. * \return The constructed set. */ static IntSet Interval(PrimExpr min, PrimExpr max); TVM_DEFINE_OBJECT_REF_METHODS(IntSet, ObjectRef, IntSetNode); }; //----------------------------------------------- // Integer set legacy API. //------------------------------------------------ /*! * \brief Convert std::unordered_map<const VarNode*, IntSet> to Map<Var, IntSet> * * \param dom_map The domain map to convert. * \return The converted map. */ Map<Var, IntSet> ConvertDomMap(const std::unordered_map<const VarNode*, IntSet>& dom_map); /*! * \brief Find an symbolic integer set that contains all possible values of * e given the domain of each iteration variables. * * \param e The expression to be evaluated. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values of e. */ IntSet EvalSet(PrimExpr e, const Map<IterVar, IntSet>& dom_map); /*! * \brief Find an symbolic integer set that contains all possible values of * e given the domain of each variables. * * \param e The expression to be evaluated. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values of e. */ IntSet EvalSet(PrimExpr e, const Map<Var, IntSet>& dom_map); /*! * \brief Same as EvalSet, but takes unordered_map * * \param e The expression to be evaluated. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values of e. */ IntSet EvalSet(PrimExpr e, const std::unordered_map<const tir::VarNode*, IntSet>& dom_map); /*! * \brief Find an symbolic integer set that contains is union over * all the possible conditional values in dom_map. * * \param r The initial range. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values. */ IntSet EvalSet(Range r, const Map<IterVar, IntSet>& dom_map); /*! * \brief Find an symbolic integer set that contains is union over * all the possible conditional values in dom_map. * * \param s The initial set. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values. */ IntSet EvalSet(IntSet s, const std::unordered_map<const VarNode*, IntSet>& dom_map); /*! * \brief Same as EvalSet, but takes unordered_map * * \param r The range to be evaluated. * \param dom_map The domain of each variable. * \return An integer set that can cover all the possible values of e. */ IntSet EvalSet(Range r, const std::unordered_map<const VarNode*, IntSet>& dom_map); /*! * \brief Same as EvalSet, but takes Array<Range> * * \param region The range to be evaluated. * \param dom_map The domain of each variable. * \return An array of integer sets that can cover all the possible values. */ Array<IntSet> EvalSet(const Array<Range>& region, const Map<Var, IntSet>& dom_map); /*! \brief Map from Expr to IntSet */ using ExprIntSetMap = std::unordered_map<PrimExpr, IntSet, ObjectPtrHash, ObjectPtrEqual>; /*! * \brief Find the integer set of every sub-expression, given the * domain of each iteration variables. * * \param e The expression to be evaluated. * \param dom_map The domain of each variable. * \return the map from the expression to its possible value. */ ExprIntSetMap EvalSetForEachSubExpr(PrimExpr e, const std::unordered_map<const VarNode*, IntSet>& dom_map); /*! * \brief Create a union set of all sets, possibly relaxed * \param sets The sets to be combined * \return the set after union */ IntSet Union(const Array<IntSet>& sets); /*! * \brief The union of N-dimensional integer sets * \param nd_int_sets A list of N-dimensional integer sets * \return An N-dimensional integer set as the result of union */ Array<IntSet> UnionRegion(const Array<Array<IntSet>>& nd_int_sets); /*! * \brief Create a lower-bound of union set, where some of the segments may be dropped * \param sets The sets to be combined * \return the set after union */ IntSet UnionLowerBound(const Array<IntSet>& sets); /*! * \brief The union of N-dimensional integer sets * \param nd_int_sets A list of N-dimensional integer sets * \return An N-dimensional integer set as the result of union */ Array<IntSet> UnionRegionLowerBound(const Array<Array<IntSet>>& nd_int_sets); /*! * \brief Create an intersected set of all sets * \param sets The sets to be intersected * \return the set after intersected */ IntSet Intersect(const Array<IntSet>& sets); /*! * \brief Converts the Ranges to IntSets * \param var_dom The ranges of variables * \return The integer sets of the variables */ Map<Var, arith::IntSet> AsIntSet(const Map<Var, Range>& var_dom); /*! * \brief Analyze the region with affine map, given the domain of variables and their predicate. * The result should be strict, i.e. no region is discarded or relaxed. * \param region The region to be analyzed * \param var_dom The ranges of the variables * \param predicate The predicate for the affine map * \param analyzer The analyzer used * \return NullOpt if the detection fails, or an array of arith::IntSet as the result of analysis */ TVM_DLL Optional<Array<IntSet>> EstimateRegionStrictBound(const Array<Range>& region, const Map<Var, Range>& var_dom, const PrimExpr& predicate, arith::Analyzer* analyzer); /*! * \brief Analyze the region with affine map, given the domain of variables and their predicate. * Some subregion may be discarded during the lower-bound analysis. * \param region The region to be analyzed * \param var_dom The ranges of the variables * \param predicate The predicate for the affine map * \param analyzer The analyzer used * \return NullOpt if the detection fails, or an array of arith::IntSet as the result of analysis */ TVM_DLL Optional<Array<IntSet>> EstimateRegionLowerBound(const Array<Range>& region, const Map<Var, Range>& var_dom, const PrimExpr& predicate, arith::Analyzer* analyzer); /*! * \brief Analyze the region with affine map, given the domain of variables and their predicate * Relaxation of the region may be used in upper-bound analysis, i.e. some extra region may be added * to the result. * \param region The region to be analyzed * \param var_dom The ranges of the variables * \param predicate The predicate for the affine map * \param analyzer The analyzer used * \return an array of arith::IntSet as the result of analysis */ TVM_DLL Array<IntSet> EstimateRegionUpperBound(const Array<Range>& region, const Map<Var, Range>& var_dom, const PrimExpr& predicate, arith::Analyzer* analyzer); } // namespace arith } // namespace tvm #endif // TVM_ARITH_INT_SET_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/int_solver.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/int_solver.h * \brief integer constraints data structures and solvers */ #ifndef TVM_ARITH_INT_SOLVER_H_ #define TVM_ARITH_INT_SOLVER_H_ #include <tvm/ir/expr.h> #include <tvm/tir/expr.h> #include <tvm/tir/op.h> #include <unordered_map> #include <utility> #include <vector> #include "analyzer.h" namespace tvm { namespace arith { using tir::IterVar; using tir::Var; using tir::VarNode; // According to experiments two best simplifications orders were can->rw and rw->can->rw, // but rw->can->rw is better for a couple of cases. // Also we should end with rw because it factors multipliers out. constexpr int kSimplifyRewriteCanonicalRewrite = 3; /*! * \brief Represent integer grouped bounds which are classified into * lower bounds (inclusive), upper bounds (inclusive) and equalities. * It also contains coefficient as a multiplier for the bounds, i.e., * coef * var >= lower * coef * var == equal * coef * var <= upper * \sa IntGroupBounds */ class IntGroupBoundsNode : public Object { public: PrimExpr coef; Array<PrimExpr> lower; Array<PrimExpr> equal; Array<PrimExpr> upper; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("coef", &coef); v->Visit("lower", &lower); v->Visit("equal", &equal); v->Visit("upper", &upper); } bool SEqualReduce(const IntGroupBoundsNode* other, SEqualReducer eq) const { return eq(coef, other->coef) && eq(lower, other->lower) && eq(equal, other->equal) && eq(upper, other->upper); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(coef); hash_reduce(lower); hash_reduce(equal); hash_reduce(upper); } static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const char* _type_key = "arith.IntGroupBounds"; TVM_DECLARE_FINAL_OBJECT_INFO(IntGroupBoundsNode, Object); }; /*! * \brief Managed reference to IntGroupBoundsNode. * \sa IntGroupBoundsNode */ class IntGroupBounds : public ObjectRef { public: /*! * \brief Constructor by fields * \param coef The coefficient. Must be integer. * coef * var >= lower * coef * var == equal * coef * var >= upper * \param lower the lower bounds (include) * \param equal equalities * \param upper the upper bounds (include) */ TVM_DLL IntGroupBounds(PrimExpr coef, Array<PrimExpr> lower, Array<PrimExpr> equal, Array<PrimExpr> upper); /*! * \brief Construct bounds from a range. * \param r The range * \return constructed bounds. */ static IntGroupBounds FromRange(const Range& r); /*! * \brief Perform substitution on all components of the struct. */ IntGroupBounds Substitute(const Map<Var, PrimExpr>& subst) const; /*! * \brief Find the best range from the grouped bounds. * \param vranges_addl additional variable ranges that help infer the best range. * \return The best range (has the least difference between the lower bound and upper bound). * undefined if (-inf, +inf). */ Range FindBestRange(const Map<Var, Range>& vranges_addl = {}) const; /*! * \brief Combine the bounds with another range. * \param r range to be combined. * \return combined bounds. */ IntGroupBounds operator+(const Range& r); TVM_DEFINE_OBJECT_REF_METHODS(IntGroupBounds, ObjectRef, IntGroupBoundsNode); }; /*! * \brief Represent integer constrains including (integer) variables, their ranges and * the relations between them (either equations or inequalities). * \sa LinearSystem */ class IntConstraintsNode : public Object { public: // e.g., \alpha, \beta, must be integers Array<Var> variables; // e.g., 1 <= \alpha <= N, etc. // it is absolutely ok to include ranges for parameters // (variables that are not in this->variables) in this map Map<Var, Range> ranges; // linear equalities or inequalities // e.g., A \alpha = \beta or A \alpha <= \beta Array<PrimExpr> relations; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("variables", &variables); v->Visit("ranges", &ranges); v->Visit("relations", &relations); } bool SEqualReduce(const IntConstraintsNode* other, SEqualReducer equal) const { return equal(variables, other->variables) && equal(ranges, other->ranges) && equal(relations, other->relations); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(variables); hash_reduce(ranges); hash_reduce(relations); } static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const char* _type_key = "arith.IntConstraints"; TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsNode, Object); }; /*! * \brief Managed reference to IntConstraintsNode. * \sa IntConstraintsNode */ class IntConstraints : public ObjectRef { public: /*! * \brief Constructor by fields * \param variables The variables in the constraints, must be integers. * \param ranges The ranges of the variables. * \param relations The linear relations between the variables * (either equations or inequalities) */ TVM_DLL IntConstraints(Array<Var> variables, Map<Var, Range> ranges, Array<PrimExpr> relations); TVM_DEFINE_OBJECT_REF_METHODS(IntConstraints, ObjectRef, IntConstraintsNode); }; /*! * \brief We can have different set of variables to represent the same constraints. * For example, the following two systems are equivalent, * {a + b = 0 | a >= 0, b >= 0} and * {m - n = 0 | m >= 0, n <= 0} * This data structure represents the transformation * between two equivalent linear systems. * In the above example, * src : {a + b = 0 | a >= 0, b >= 0} * dst : {m - n = 0 | m >= 0, n <= 0} * src_to_dst : {a -> m, b -> -n} * dst_to_src : {m -> a, n -> -b} * \sa IntConstraintsTransform */ class IntConstraintsTransformNode : public Object { public: IntConstraints src; IntConstraints dst; Map<Var, PrimExpr> src_to_dst; Map<Var, PrimExpr> dst_to_src; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("src", &src); v->Visit("dst", &dst); v->Visit("src_to_dst", &src_to_dst); v->Visit("dst_to_src", &dst_to_src); } bool SEqualReduce(const IntConstraintsTransformNode* other, SEqualReducer equal) const { return equal(src, other->src) && equal(dst, other->dst) && equal(src_to_dst, other->src_to_dst) && equal(dst_to_src, other->dst_to_src); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(src); hash_reduce(dst); hash_reduce(src_to_dst); hash_reduce(dst_to_src); } static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const char* _type_key = "arith.IntConstraintsTransform"; TVM_DECLARE_FINAL_OBJECT_INFO(IntConstraintsTransformNode, Object); }; /*! * \brief Managed reference to IntConstraintsTransformNode. * \sa IntConstraintsTransformNode */ class IntConstraintsTransform : public ObjectRef { public: /*! * \brief Constructor by fields * \param src source integer constraints, e.g., {a + b = 0 | a >= 0, b >= 0} * \param dst integer constraints equivalent to the source, * e.g., {m - n = 0 | m >= 0, n <= 0} * \param src_to_dst mapping from variables in the \p src to the variables in the \p dst, * e.g., {a -> m, b -> -n} * \param dst_to_src mapping from variables in the \p dst to the variables in the \p src, * e.g., {m -> a, n -> -b} */ TVM_DLL IntConstraintsTransform(IntConstraints src, IntConstraints dst, Map<Var, PrimExpr> src_to_dst, Map<Var, PrimExpr> dst_to_src); /*! * \brief Chain-compose two IntConstraintsTransform together. * this->dst must be the same as other->src. * @param other another IntConstraintsTransform whose src is same as this->dst. * @return composed IntConstraintsTransform(this->src, other->dst) * with its variables and ranges are properly modified. */ IntConstraintsTransform operator+(const IntConstraintsTransform& other) const; TVM_DEFINE_OBJECT_REF_METHODS(IntConstraintsTransform, ObjectRef, IntConstraintsTransformNode); }; typedef std::pair<Map<Var, IntGroupBounds>, Array<PrimExpr>> PartialSolvedInequalities; /*! * \brief Obtain Smith Normal Form of linear equation A x = y. * Smith Normal Form of matrix A_{mxn} is S_{mxn} = U_{mxm} A_{mxn} V_{nxn}, * in which S_{mxn} is diag(s1, s2, ..., sr, 0, ..., 0) and r is the rank of A. * NOTE: Although in standard Smith Normal Form the diagonal elements satisfy * s_i | s_{i+1} (| means divides), the implement here does not guarantee it. * TODO(yzhliu): From sergei-grechanik: * computing the proper Smith normal form may improve stability of automatic * differentiation (generating the same gradient code for slightly different but equivalent input * code U_{mxm} and V_{nxn} are invertible matrices. This function modifies \p S to be S_{mxn}, \p V * to be V_{nxn}, \p y to be U_{mxm} y_{mx1} and \p x to be V^{-1} x. \param S the original * A_{mxn}, it will be modified to S_{mxn} \param V an identity matrix, it will be modified to * V_{nxn} \param x the x in A x = y. it will be modified to V^{-1}_{nxn} x_{nx1} \param y the y * in A x = y. it will be modified to U_{mxm} y_{mx1} */ void SmithNormalFormDiag(std::vector<std::vector<int64_t>>* S, std::vector<std::vector<int64_t>>* V, std::vector<PrimExpr>* x, std::vector<PrimExpr>* y); /*! * \brief Solve linear equations. * \param system_to_solve the variables to solve, their ranges, and a list of equations. * \return A new linear system, with less variables (if \p system_to_solve is NOT of full rank), * or no variable (if \p system_to_solve is of full rank), * or an empty linear system (if \p system_to_solve is unsolvable). * It also provides the ranges of the variables in the new system, * as well as inequalities inferred from the \p system_to_solve. * You can get the mapping from the original variables to the solution via ret->src_to_dst. */ IntConstraintsTransform SolveLinearEquations(const IntConstraints& system_to_solve); /*! * \brief Solve linear inequalities. * \param system_to_solve the variables to solve, their ranges, and a list of inequalities. * The inequalities are rewritten using Fourier-Motzkin elimination. * This function takes an array of (in)equalities and an array of variables, and essentially * rewrites the (in)equalities into an array of (in)equalities of the following form, * * x0 >= f0(x1, x2, ..., xn) * x0 <= g0(x1, x2, ..., xn) * x1 >= f1(x2, ..., xn) * x1 <= g1(x2, ..., xn) * ... * xn >= fn() // just a constant * xn <= gn() // just a constant * * \return A map of variables and their solved bounds, * and constrains that cannot be solved to bounds. */ PartialSolvedInequalities SolveLinearInequalities(const IntConstraints& system_to_solve); /*! * \brief Combine the information into an array of (in)equalities. * \param variables The variables in \p bounds. * It is used to determine the iteration order to avoid indeterministic results. * \param bounds grouped boundary of the variables. * \param relations other relations. */ Array<PrimExpr> AsConditions(const Array<Var>& variables, const Map<Var, IntGroupBounds>& bounds, const Array<PrimExpr>& relations); /*! * \brief Solve linear inequalities and infer the range of each variable. * \param system_to_solve the variables to solve, their ranges, and a list of inequalities. * \return The result ranges for each variables. * The returned IntConstraints(variables, ranges, relations) contains, * 1. variables - the variables that have been solved. * 2. ranges - the best range of each variable. * 3. relations - constraints that cannot be transformed to * Range will be stored in relations. */ IntConstraints SolveInequalitiesToRange(const IntConstraints& system_to_solve); /*! * \brief Solve linear inequalities and deskew the ranges towards zero. * \param system_to_solve the variables to solve, their ranges, and a list of inequalities. * \return A transform (src IntConstraints -> dst IntConstraints) * from original variables to a set of new variables. * The ranges of new variables always start from zero, * their extents are solved from \p system_to_solve. * src IntConstraints is the same as \p system_to_solve. * dst IntConstraints(variables, ranges, relations) contains, * 1. variables - the variables that have been solved. * 2. ranges - the best range (start from zero) of each variable. * 3. relations - constraints that cannot be transformed to * Range will be stored in relations. * Variable mapping can be obtained from * IntConstraintsTransform.src_to_dst and IntConstraintsTransform.dst_to_src. */ IntConstraintsTransform SolveInequalitiesDeskewRange(const IntConstraints& system_to_solve); } // namespace arith } // namespace tvm #endif // TVM_ARITH_INT_SOLVER_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/iter_affine_map.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/iter_affine_map.h * \brief Iterator quasi-affine mapping patterns. * * This file defines a collection of mapping patterns * maps a collection of independent iterators to another * collection of independent iterators. * * There are two main kinds of mapping patterns: * * - Fuse: fuse a collection of iterators into a single one * * domain(x0) = [0, 4), domain(x1) = [0, 3), domain(x2) = [0, 2) * fuse(x0, x1, x2): y = x2 * 12 + x1 * 4 + x0 * domain(y) = [0, 24) * * - Split: split an iterator into multiple ones * * domain(x) = [0, 24) * split(x, 3, 12): [y0, y1, y2] = [x % 3, (x % 12) / 3, x / 12] * domain(y0) = [0, 3), domain(y1) = [0, 4), domain(y2) = [0, 2) * * We use the name "(quasi)affine" to be consistent with * the terminology used in the polyhedral compilation. * Notably, fuse is an affine transformation, * while split corresponds to additional floordiv/mod operations * that can appear in quasi-affine transformations. */ #ifndef TVM_ARITH_ITER_AFFINE_MAP_H_ #define TVM_ARITH_ITER_AFFINE_MAP_H_ #include <tvm/arith/analyzer.h> #include <tvm/ir/diagnostic.h> #include <tvm/ir/expr.h> #include <tvm/tir/var.h> namespace tvm { namespace arith { /*! * \brief Base class of all iter map expressions. * * An IterMapExpr is a special expression to store * the result of IterMapDetection. * It should not appear in a legal TIR PrimFunc. */ class IterMapExprNode : public PrimExprNode { public: // overrides void VisitAttrs(tvm::AttrVisitor* v) {} static constexpr const char* _type_key = "arith.IterMapExpr"; static constexpr const uint32_t _type_child_slots = 3; TVM_DECLARE_BASE_OBJECT_INFO(IterMapExprNode, PrimExprNode); }; /*! * \brief Managed reference to IterMapExprNode. * \sa IterMapExprNode */ class IterMapExpr : public PrimExpr { public: TVM_DEFINE_OBJECT_REF_METHODS(IterMapExpr, PrimExpr, IterMapExprNode); }; /*! * \brief Mark the source as an iterator in [0, extent). * * IterMark is used to mark source expression as a valid * iterator to make future analysis easy. */ class IterMarkNode : public Object { public: /*! * \brief The source expression, can either be * a IterSumExpr or a Var. */ PrimExpr source; /*! * \brief The extent of the iteration. */ PrimExpr extent; // overrides void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("source", &source); v->Visit("extent", &extent); } bool SEqualReduce(const IterMarkNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(source, other->source) && equal(extent, other->extent); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(source); hash_reduce(extent); } static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; static constexpr const char* _type_key = "arith.IterMark"; TVM_DECLARE_FINAL_OBJECT_INFO(IterMarkNode, Object); }; /*! * \brief Managed reference to IterMarkExprNode. * \sa IterMarkExprNode */ class IterMark : public ObjectRef { public: /*! * \brief constructor. * \param source The source expression. * \param extent The extent of the iterator. */ TVM_DLL IterMark(PrimExpr source, PrimExpr extent); TVM_DEFINE_OBJECT_REF_METHODS(IterMark, ObjectRef, IterMarkNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(IterMarkNode); }; /*! * \brief Split of an iterator. * * result = floormod(floordiv(source, lower_factor), extent) * scale */ class IterSplitExprNode : public IterMapExprNode { public: /*! \brief The source marked iterator. */ IterMark source; /*! \brief The lower factor to split the source. */ PrimExpr lower_factor; /*! \brief The extent of the split. */ PrimExpr extent; /*! \brief Additional scale. */ PrimExpr scale; // overrides void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("source", &source); v->Visit("lower_factor", &lower_factor); v->Visit("extent", &extent); v->Visit("scale", &scale); } bool SEqualReduce(const IterSplitExprNode* other, SEqualReducer equal) const { return equal(source, other->source) && equal(lower_factor, other->lower_factor) && equal(extent, other->extent) && equal(scale, other->scale); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(source); hash_reduce(lower_factor); hash_reduce(extent); hash_reduce(scale); } static constexpr const char* _type_key = "arith.IterSplitExpr"; TVM_DECLARE_FINAL_OBJECT_INFO(IterSplitExprNode, IterMapExprNode); }; /*! * \brief Managed reference to IterSplitExprNode. * \sa IterSplitExprNode */ class IterSplitExpr : public IterMapExpr { public: /*! * \brief constructor from just source. * \param source The source expression. */ TVM_DLL explicit IterSplitExpr(IterMark source); /*! * \brief constructor from just source. * \param source The source expression. * \param scale The additional scaling factor. */ TVM_DLL explicit IterSplitExpr(IterMark source, PrimExpr scale); /*! * \brief constructor * \param source The source expression. * \param lower_factor The lower factor to split the source. * \param extent The extent of the split. * \param scale The additional scaling factor. */ TVM_DLL explicit IterSplitExpr(IterMark source, PrimExpr lower_factor, PrimExpr extent, PrimExpr scale); TVM_DEFINE_OBJECT_REF_METHODS(IterSplitExpr, IterMapExpr, IterSplitExprNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(IterSplitExprNode); }; /*! * \brief Fuse multiple iterators by summing them with scaling. * * result = sum(args) + base */ class IterSumExprNode : public IterMapExprNode { public: /*! \brief The args to the sum. */ Array<IterSplitExpr> args; /*! \brief The base offset. */ PrimExpr base; // overrides void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("args", &args); v->Visit("base", &base); } bool SEqualReduce(const IterSumExprNode* other, SEqualReducer equal) const { return equal(args, other->args) && equal(base, other->base); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(args); hash_reduce(base); } static constexpr const char* _type_key = "arith.IterSumExpr"; TVM_DECLARE_FINAL_OBJECT_INFO(IterSumExprNode, IterMapExprNode); }; /*! * \brief Managed reference to IterSumExprNode. * \sa IterSumExprNode */ class IterSumExpr : public IterMapExpr { public: /*! * \brief constructor. * \param args The args to the sum. * \param base The base offset. */ TVM_DLL IterSumExpr(Array<IterSplitExpr> args, PrimExpr base); TVM_DEFINE_OBJECT_REF_METHODS(IterSumExpr, IterMapExpr, IterSumExprNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(IterSumExprNode); }; /*! \brief Mapping level for iterators. */ enum IterMapLevel { // Require the mapping to be bijective. Bijective = 0, // Require the mapping to be surjective. Surjective = 1, // No mapping safety check. NoCheck = 3 }; /*! * \brief Result of DetectIterMap. */ class IterMapResultNode : public Object { public: // The detected pattern if a match exists. Array<IterSumExpr> indices; // Any errors that occurred while converting the input indices. If // the array is empty, the conversion was successful. Array<String> errors; /*! \brief Boolean expression indicating if a specific value w * * `padding_predicate` evaluates to true for a set of indices that * are outside the bounds of the provided index iterators, but * inside the bounds of the returned index iterators. This * expression is in terms of the variables provided in * `input_iters`. */ PrimExpr padding_predicate; // overrides void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("errors", &errors); v->Visit("indices", &indices); v->Visit("padding_predicate", &padding_predicate); } static constexpr const char* _type_key = "arith.IterMapResult"; TVM_DECLARE_FINAL_OBJECT_INFO(IterMapResultNode, Object); }; /*! * \brief Managed reference to IterMapResultNode. * \sa IterMapResultNode */ class IterMapResult : public ObjectRef { public: // constructor IterMapResult() { data_ = make_object<IterMapResultNode>(); } /*! \return mutable pointers to the node. */ IterMapResultNode* operator->() const { return static_cast<IterMapResultNode*>(get_mutable()); } }; /*! * \brief Detect if indices can be written as * [y_0 + c_0, y_1 + c_1, ..., y_n + c_n] * * Here y = some-quasi-affine-iter-map(input_iters) * and c are symbolic constants. * * We also requires that y_i and y_j to be independent for i != j. * * For returned value rv, the following is always true: * - rv[i]->args.size() <=1: only one iterator per element. * * \param indices The indices to detect pattern for. * \param input_iters Map from variable to iterator's range. * \param predicate The predicate constraints on the input iterators * \param check_level The iter mapping checking level. * \param analyzer Analyzer used to get context information. * \param simplify_trivial_iterators If true, iterators with extent of * 1 will be replaced with a constant value. * * \return The detected iteration result. * The return object's .indices is empty on failure. */ IterMapResult DetectIterMap(const Array<PrimExpr>& indices, const Map<Var, Range>& input_iters, const PrimExpr& predicate, IterMapLevel check_level, arith::Analyzer* analyzer, bool simplify_trivial_iterators = true); /*! * \brief Use IterVarMap detector to rewrite and simplify the indices * * \param indices The indices to detect pattern for. * \param input_iters Map from variable to iterator's range. * \param input_pred The predicate constraints on the input iterators * \param check_level The iter mapping checking level. * \param simplify_trivial_iterators If true, iterators with unit extents are simplified * \return The indices after rewrite */ Array<PrimExpr> IterMapSimplify(const Array<PrimExpr>& indices, const Map<Var, Range>& input_iters, const PrimExpr& input_pred, IterMapLevel check_level, bool simplify_trivial_iterators = true); /*! * \brief Apply the inverse of the affine transformation to the outputs. * * Similar to the back-propagation, starting from the outputs, it visits the DAG of the expressions * in reverse topology order and applies the inverse of the affine transformation until it reaches * the input. The affine iter map is required to be bijective. * * For example, iter_map = [l0 // 16, l0 % 16], outputs = [output_0, output_1], * the affine transformation specified by `iter_map` will be applied to `outputs` and the result * will be {l0: ((output_0*16) + output_1)}. * * The range of `outputs` should be the same as the output range of the affine transmation. * * \sa DetectIterMap * * \param iter_map The bijective affine iter map. * \param outputs The outputs of the affine transformation. * * \return The map from the input to the transformed result. */ Map<Var, PrimExpr> InverseAffineIterMap(const Array<IterSumExpr>& iter_map, const Array<PrimExpr> outputs); /*! * \brief Detect if bindings can be written as * [a_0*e_0 + b_0 + c_0, a_1*e_1 + b_1, ..., a_n*e_n + b_n] * * where a = some-quasi-affine-iter-map(input_iters set_minus sub_iters) * b = some-quasi-affine-iter-map(sub_iters) * c is constant symbols * e is the extent of b * * For example, z*12 + y*3 + x + c = (z*4+y)*3 + x, if sub_iters={x} * * \param bindings The input bindings * \param input_iters Map from variable to iterator's range. * \param sub_iters Iterators of subspace. * \param predicate The predicate constraints on the input iterators * \param check_level The iter mapping checking level. * \param analyzer Analyzer used to get context information. * * \return The result list has length len(bindings) + 1 [0, len(bindings)): The iter map matching result. The inner list is of length 2. The first expr is the basis of the quotient space. The second expr is the basis of the subspace. len(bindings): the predicate of outer space and inner space Empty array if no match can be found. */ Array<Array<IterMark>> SubspaceDivide(const Array<PrimExpr>& bindings, const Map<Var, Range>& input_iters, const Array<Var>& sub_iters, const PrimExpr& predicate, IterMapLevel check_level, arith::Analyzer* analyzer); /*! * \brief Given an expression that may contain IterMapExpr, transform it to normal PrimExpr. * \param expr The input expression, which may contain IterMapExpr. * \return The corresponding normal PrimExpr. */ PrimExpr NormalizeIterMapToExpr(const PrimExpr& expr); } // namespace arith } // namespace tvm #endif // TVM_ARITH_ITER_AFFINE_MAP_H_
https://github.com/zk-ml/tachikoma
include/tvm/arith/pattern.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/arith/pattern.h * \brief Expression pattern detectors. */ #ifndef TVM_ARITH_PATTERN_H_ #define TVM_ARITH_PATTERN_H_ #include <tvm/ir/expr.h> #include <tvm/tir/expr.h> namespace tvm { namespace arith { /*! * \brief Detect if e can be rewritten as e = sum_{i=0}^{n-1} var[i] * coeff[i] + coeff[n] * Where coeff[i] and base are invariant of var[j] for all i and j. * * \param e The expression to be detected. * \param vars List of variables to be used in detection. * \return [coeff[i]] if it is possible, empty array if it is not. */ Array<PrimExpr> DetectLinearEquation(const PrimExpr& e, const Array<tir::Var>& vars); /*! * \brief Detect if expression corresponds to clip bound of the vars * * \param e The expression to be detected. * \param vars List of variables to be used in detection. * \return concat([min_value[i], max_value[i]]), None is returned if there is no min or max value * return empty if the e does not match the pattern. */ Array<PrimExpr> DetectClipBound(const PrimExpr& e, const Array<tir::Var>& vars); } // namespace arith } // namespace tvm #endif // TVM_ARITH_PATTERN_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/auto_schedule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/auto_scheduler/auto_schedule.h * \brief The user interface of the auto scheduler. */ #ifndef TVM_AUTO_SCHEDULER_AUTO_SCHEDULE_H_ #define TVM_AUTO_SCHEDULER_AUTO_SCHEDULE_H_ #include <tvm/auto_scheduler/measure.h> #include <tvm/auto_scheduler/search_policy.h> #include <utility> namespace tvm { namespace auto_scheduler { /*! \brief Tuning and measurement options. */ class TuningOptionsNode : public Object { public: /*! \brief The number of total measurement trials. */ int num_measure_trials; /*! \brief Stops the tuning early if no improvement after n measurements. */ int early_stopping; /*! \brief The number of programs to be measured at each search round. */ int num_measures_per_round; /*! \brief Verbosity level. 0 for silent, 1 to output information during schedule searching. */ int verbose; /*! \brief ProgramBuilder which builds the program */ ProgramBuilder builder; /*! \brief ProgramRunner which runs the program and measures time costs */ ProgramRunner runner; /*! \brief MeasureCallback functions to be called after each measure batch */ Optional<Array<MeasureCallback>> measure_callbacks; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("num_measure_trials", &num_measure_trials); v->Visit("early_stopping", &early_stopping); v->Visit("num_measures_per_round", &num_measures_per_round); v->Visit("verbose", &verbose); v->Visit("builder", &builder); v->Visit("runner", &runner); v->Visit("measure_callbacks", &measure_callbacks); } static constexpr const char* _type_key = "auto_scheduler.TuningOptions"; TVM_DECLARE_FINAL_OBJECT_INFO(TuningOptionsNode, Object); }; /*! * \brief Managed reference to TuningOptionsNode. * \sa TuningOptionsNode */ class TuningOptions : public ObjectRef { public: /*! * \brief The constructor * \param num_measure_trials The number of total measurement trials. * \param early_stopping Stops the tuning early if no improvement after n measurements. * \param num_measures_per_round The number of programs to be measured at each search round. * \param verbose Verbosity level. 0 for silent, 1 to output information during schedule * search. * \param builder ProgramBuilder which builds the program. * \param runner ProgramRunner which runs the program and measure time costs. * \param measure_callbacks MeasureCallback functions to be called after each measure batch. */ TuningOptions(int num_measure_trials, int early_stopping, int num_measures_per_round, int verbose, ProgramBuilder builder, ProgramRunner runner, Optional<Array<MeasureCallback>> measure_callbacks); TVM_DEFINE_OBJECT_REF_METHODS(TuningOptions, ObjectRef, TuningOptionsNode); }; /*! * \brief Run schedule search for a given compute declaration. * \param search_policy The search policy. * \param tuning_options Tuning and measurement options. * \return A `te::schedule` and an Array of `te::Tensor` to be used in `tvm.lower` or * `tvm.build`. */ TVM_DLL std::pair<te::Schedule, Array<te::Tensor>> AutoSchedule(SearchPolicy search_policy, TuningOptions tuning_options); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_AUTO_SCHEDULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/compute_dag.h
/*r * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/auto_scheduler/compute_dag.h * \brief The auto-scheduler's computational graph and related program analyses. * * We convert a compute declaration described by `tvm.compute` (could be a single operator or a * subgraph) to a ComputeDAG. It keeps the input/output tensors, all operations in the DAG, and * some static analysis results for the DAG (e.g. the total float operation count, consumer/producer * relations of operations, whether an operation stage should be tiled/compute inlined ...). * These analyses can help the search policy to make decisions during the search. * ComputeDAG is also responsible for the interaction between auto-scheduler's `LoopState` and * TVM schedule (e.g. applying the `LoopState` transform steps to a TVM schedule, providing * `LoopState` with extra information got from TVM schedule ...). */ #ifndef TVM_AUTO_SCHEDULER_COMPUTE_DAG_H_ #define TVM_AUTO_SCHEDULER_COMPUTE_DAG_H_ #include <tvm/auto_scheduler/loop_state.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/te/schedule.h> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace tvm { namespace auto_scheduler { /*! \brief Static analyzer for a ComputeDAG */ class AccessAnalyzerNode : public Object { public: template <class T> using OperationMap = std::unordered_map<te::Operation, T, ObjectPtrHash, ObjectPtrEqual>; /*! \brief Map an operation to all operations it reads from. * For each operation pair, use a two-dimensional array for multiple multi-dimensional accesses * The inner vector represents the indices of multi-dimensional access.*/ OperationMap<OperationMap<std::vector<std::vector<PrimExpr>>>> read_from; /*! \brief Map an operation to all operations it is read by. * For each operation pair, use a two-dimensional array for multiple multi-dimensional accesses * The inner vector represents the indices of multi-dimensional access.*/ OperationMap<OperationMap<std::vector<std::vector<PrimExpr>>>> read_by; /*! \brief Store the number of common outer iterators for operation pairs that have * read-write relations. */ OperationMap<OperationMap<int>> num_common_outer_iterators; /*! \brief Store whether the operation is an op with only simple access. * (e.g., injective, broadcast and elementwise ops without reduction) */ OperationMap<bool> is_simple_access; /*! \brief Store whether the operation is strictly inlineable * (e.g., injective, broadcast and elementwise without reduction, branch or expensive operations) */ OperationMap<bool> is_strictly_inlineable; /*! \brief Store whether the operation needs multi-level tiling * (e.g., computation-intensive ops with data reuse opportunity like matmul, conv2d) */ OperationMap<bool> needs_multi_level_tiling; /*! \brief Store whether the operation is an output operation */ OperationMap<bool> is_output; /*! \brief Store the topological order of operations */ Array<te::Operation> ops_topo_order; static constexpr const char* _type_key = "auto_scheduler.AccessAnalyzer"; TVM_DECLARE_FINAL_OBJECT_INFO(AccessAnalyzerNode, Object); }; /*! * \brief Managed reference to AccessAnalyzerNode. * \sa AccessAnalyzerNode */ class AccessAnalyzer : public ObjectRef { public: explicit AccessAnalyzer(const Array<te::Tensor>& tensors); /*! * \brief Return whether this operation is an op with simple access * (e.g., injective, broadcast and elementwise ops without reduction) * \param op The operation */ TVM_DLL bool IsSimpleAccess(const te::Operation& op) const; /*! * \brief Return whether this operation is strictly inlineable * (e.g., injective, broadcast and elementwise without reduction, branch or expensive operations) * \param op The operation */ TVM_DLL bool IsStrictlyInlineable(const te::Operation& op) const; /*! * \brief Return whether this operation needs multi-level tiling * (e.g., computation-intensive ops with data reuse opportunity like matmul, conv2d) * \param op The operation */ TVM_DLL bool NeedsMultiLevelTiling(const te::Operation& op) const; /*! * \brief Return whether this operation is an output operation * \param op The operation */ TVM_DLL bool IsOutput(const te::Operation& op) const; /*! * \brief Get all consumers of an operation * \param state The current loop state * \param op The operation * \return The set of consumers * \note This function propagates the relation for inlined ops */ TVM_DLL std::unordered_set<te::Operation, ObjectHash, ObjectEqual> GetConsumers( const State& state, const te::Operation& op) const; /*! * \brief Get all producers of an operation * \param state The current loop state * \param op The operation * \return The set of producers * \note This function propagates the relation for inlined ops */ TVM_DLL std::unordered_set<te::Operation, ObjectHash, ObjectEqual> GetProducers( const State& state, const te::Operation& op) const; /*! * \brief Get all direct producers of an operation * \param op The operation * \return The set of direct producers * \note This function DOES NOT propagate the relation for inlined ops */ TVM_DLL std::unordered_set<te::Operation, ObjectHash, ObjectEqual> GetDirectProducers( const te::Operation& op) const; /*! * \brief Get the number of common outer iterators. * \param op The operation * \param target_op The target operation * \note This function propagates the relation for chains with multiple ops. */ TVM_DLL int GetNumCommonOuterIterator(const te::Operation& op, const te::Operation& target_op) const; /*! * \brief Return whether two operations are elementwise-matched * (e.g. conv2d and relu are elementwise-matched) * \note This function propagates the relation for chains with multiple ops. */ TVM_DLL bool ElementWiseMatch(const te::Operation& op, const te::Operation& target_op) const; TVM_DEFINE_OBJECT_REF_METHODS(AccessAnalyzer, ObjectRef, AccessAnalyzerNode); }; /*! \brief The auto-scheduler's computational graph and related program analyses. */ class ComputeDAGNode : public Object { public: /*! * \brief Input and output tensors. * This is used as the input of `tvm.lower` or `tvm.build`. */ Array<te::Tensor> tensors; /*! \brief All used operations in topo order. */ Array<te::Operation> ops; /*! \brief The number of float operations in this ComputeDAG. */ double flop_ct; /*! \brief The initial state without any transform steps. */ State init_state; /*! \brief The static read-write access analyzer. */ AccessAnalyzer access_analyzer; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("tensors", &tensors); v->Visit("ops", &ops); v->Visit("flop_ct", &flop_ct); v->Visit("init_state", &init_state); v->Visit("access_analyzer", &access_analyzer); } static constexpr const char* _type_key = "auto_scheduler.ComputeDAG"; TVM_DECLARE_FINAL_OBJECT_INFO(ComputeDAGNode, Object); }; /*! * \brief Options for applying layout rewrite. * This is an optimization to rewrite the layout of input tensors according to the schedule we get. */ enum class LayoutRewriteOption : int { /*! \brief Do not perform layout rewrite. */ NoRewrite = 0, /*! \brief Insert layout transformation stages for input placeholders in the compute DAG */ InsertTransformStage = 1, /*! * \brief Do not insert layout transformation stages and assume the input placeholders * are pre-transformed. * \note The lowered function with this option does not accept the origial input shapes, * so this option must be used along with `AutoSchedulerLayoutRewrite` pass in Relay. */ RewriteForPreTransformed = 2, }; /*! * \brief Managed reference to ComputeDAGNode. * \sa ComputeDAGNode */ class ComputeDAG : public ObjectRef { public: /*! \brief Construct a DAG from a list of output tensors. * \param tensors `te::Tensor`s for a compute declaration. */ TVM_DLL explicit ComputeDAG(Array<te::Tensor> tensors); /*! \brief Construct a DAG based on a schedule. * \param sch `te::Schedule`s for a compute declaration. */ TVM_DLL explicit ComputeDAG(const te::Schedule& sch); /*! * \brief Rewrite the layout of placeholder specified by attr `layout_free_placeholders` * according to the loop nest derived with `transform_steps`. * \param transform_steps Transform steps of a state. * \param layout_rewrite Different options in layout rewrite. * \return The updated ComputeDAG after layout rewrite. */ ComputeDAG RewriteLayout(Array<Step>* transform_steps, LayoutRewriteOption layout_rewrite) const; /*! * \brief Apply the history transform steps to get a TVM schedule. * \param transform_steps Transform steps of a state. * \param stages The list of stages after applying the steps. * Pass a valid pointer if this information needs to be used outside this function. * \param stage_to_axes The map that stores all axes for one stage. * Pass a valid pointer if this information needs to be used outside this function. * \param layout_rewrite Rewrite the layout of placeholders specified by * attr `layout_free_placeholders`. * \return A `te.schedule` and the an Array of `te.Tensor` to be used in `tvm.lower` * or `tvm.build`. */ std::pair<te::Schedule, Array<te::Tensor>> ApplySteps( const Array<Step>& transform_steps, Array<te::Stage>* stages = nullptr, StageToAxesMap* stage_to_axes = nullptr, LayoutRewriteOption layout_rewrite = LayoutRewriteOption::NoRewrite) const; /*! * \brief Print transform steps as equivalent python schedule API. * This can be used for debugging. * \param transform_steps Transform steps of a state. * \return The Python schedule code. */ String PrintStepsAsPython(const Array<Step>& transform_steps) const; /*! * \brief Print the compute DAG to a string. This is also used to generate the ComputeDAG hash. * \param simple_mode Simple mode will only include the op names and brief compute. * \return The ComputeDAG in a string. */ String PrintDAG(bool simple_mode = false) const; /*! * \brief Fill the correct bound information for a given state by calling ir_pass::InferBound. * The states can lose complete bound information after some transform steps (e.g., compute_at). * We can call this function to infer and fill all the bound information. * This function calls TVM InferBound pass internally to get the bound. * The returned state of this function is guaranteed to have complete bound information. * \param state The input state. * \return The State with complete bound information */ State InferBound(const State& state) const; /*! * \brief Fill the correct bound information for the given states by calling ir_pass::InferBound. * The states can lose complete bound information after some transform steps (e.g., compute_at). * We can call this function to infer and fill all the bound information. * This function calls TVM InferBound pass internally to get the bound. * The returned state of this function is guaranteed to have complete bound information. * \param states The input states. * \return The States with complete bound information. * \note The returned array will contains empty State, if there're infer bound failure on some * states. */ Array<State> InferBound(const Array<State>& states) const; /*! * \brief Since some steps may change the ComputeDAG (e.g. CacheRead/CacheWrite), the initial * ComputeDAG may not be up-to-date. This function replays the given transform steps from the * initial state and returns an up-to-date ComputeDAG. * \param steps The steps to be replayed. Usually we'll filter out the unused steps to speed up * the replay process, since we only intend to get a ComputeDAG with the up-to-date op stage * structure. * \return The up-to-date ComputeDAG. */ ComputeDAG ReplayAndGetDAG(const Array<Step>& steps) const; static constexpr const char* layout_free_placeholders_key = "layout_free_placeholders"; TVM_DEFINE_OBJECT_REF_METHODS(ComputeDAG, ObjectRef, ComputeDAGNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ComputeDAGNode); }; /*! * \brief Get the orginal shape from a rewritten layout string. * \param rewritten_layout The layout after auto-scheduler's layout rewrite. * \param axis_names Specifiy the names of axes. * \return shape The original shape. */ Array<PrimExpr> GetShapeFromRewrittenLayout(String rewritten_layout, Array<String> axis_names); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_COMPUTE_DAG_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/cost_model.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/cost_model.h * \brief Cost models that estimate the performance of programs */ #ifndef TVM_AUTO_SCHEDULER_COST_MODEL_H_ #define TVM_AUTO_SCHEDULER_COST_MODEL_H_ #include <tvm/auto_scheduler/compute_dag.h> #include <tvm/auto_scheduler/measure.h> #include <tvm/node/node.h> #include <tvm/runtime/packed_func.h> #include <vector> namespace tvm { namespace auto_scheduler { using runtime::PackedFunc; using runtime::TypedPackedFunc; /*! \brief The base class for cost model */ class CostModelNode : public Object { public: /*! * \brief Update the cost model according to new measurement results (training data). * \param inputs The measure inputs * \param results The measure results */ virtual void Update(const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) = 0; /*! * \brief Predict the scores of states * \param task The search task of states * \param states The input states * \param scores The predicted scores for all states */ virtual void Predict(const SearchTask& task, const Array<State>& states, std::vector<float>* scores) = 0; /*! * \brief Predict the scores of all stages in states. This is the breakdown version of `Predict` * \param task The search task * \param states The input states * \param state_scores The predicted scores for all states * \param stage_scores The predicted scores for all stages in all stages */ virtual void PredictStages(const SearchTask& task, const Array<State>& states, std::vector<float>* state_scores, std::vector<std::vector<float>>* stage_scores) { LOG(FATAL) << "Not implemented"; } /*! * \brief Default virtual destructor */ virtual ~CostModelNode() {} static constexpr const char* _type_key = "auto_scheduler.CostModel"; TVM_DECLARE_BASE_OBJECT_INFO(CostModelNode, Object); }; /*! * \brief Managed reference to CostModelNode. * \sa CostModelNode */ class CostModel : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(CostModel, ObjectRef, CostModelNode); }; /*! \brief The cost model returning random value for all predictions */ class RandomModelNode : public CostModelNode { public: /*! \brief Pointer to a random number generator function */ const TypedPackedFunc<void(size_t, void*)>* random_number_func; void Update(const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) final; void Predict(const SearchTask& task, const Array<State>& states, std::vector<float>* scores) final; static constexpr const char* _type_key = "auto_scheduler.RandomModel"; TVM_DECLARE_FINAL_OBJECT_INFO(RandomModelNode, CostModelNode); }; /*! * \brief Managed reference to RandomModelNode. * \sa RandomModelNode */ class RandomModel : public CostModel { public: RandomModel(); explicit RandomModel(::tvm::runtime::ObjectPtr<::tvm::runtime::Object> n) : CostModel(n) {} RandomModelNode* operator->() const { return static_cast<RandomModelNode*>(data_.get()); } TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(RandomModel); using ContainerType = RandomModelNode; }; /*! \brief A wrapper for cost model defined by python code * This class will call functions defined in the python */ class PythonBasedModelNode : public CostModelNode { public: /*! \brief Pointer to the update function in python */ PackedFunc update_func; /*! \brief Pointer to the predict function in python */ PackedFunc predict_func; /*! \brief Pointer to the predict function in python */ PackedFunc predict_stage_func; void Update(const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) final; void Predict(const SearchTask& task, const Array<State>& states, std::vector<float>* scores) final; void PredictStages(const SearchTask& task, const Array<State>& states, std::vector<float>* state_scores, std::vector<std::vector<float>>* stage_scores) final; static constexpr const char* _type_key = "auto_scheduler.PythonBasedModel"; TVM_DECLARE_FINAL_OBJECT_INFO(PythonBasedModelNode, CostModelNode); }; /*! * \brief Managed reference to PythonBasedModelNode. * \sa PythonBasedModelNode */ class PythonBasedModel : public CostModel { public: /*! * \brief The constructor. * \param update_func The pointer to the update function defined in python * \param predict_func The pointer to the prediction function defined in python * \param predict_stage_func The pointer to the prediction function defined in python */ PythonBasedModel(PackedFunc update_func, PackedFunc predict_func, PackedFunc predict_stage_func); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PythonBasedModel, CostModel, PythonBasedModelNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_COST_MODEL_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/feature.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/feature.h * \brief Feature extraction for the cost model. * We extract one feature vector per BufferStoreNode statement in a TIR Stmt, * so we call this feature as "per-store" feature. * The cost model also does prediction for each BufferStoreNode statement and aggregates * the predictions as the whole score for a TVM IR (Stmt). * * The feature specification is defined by `src/auto_scheduler/feature.cc:: FeatureSet` */ #ifndef TVM_AUTO_SCHEDULER_FEATURE_H_ #define TVM_AUTO_SCHEDULER_FEATURE_H_ #include <tvm/auto_scheduler/compute_dag.h> #include <tvm/auto_scheduler/measure.h> #include <tvm/tir/function.h> #include <string> #include <vector> namespace tvm { namespace auto_scheduler { /*! * \brief Get per-store features from a TIR PrimFunc * \param func The input lowered TIR PrimFunc * \param cache_line_size The size of cache line in bytes * \param max_n_bufs The maximum number of extracted buffers for one statement * \param ret The returned feature vector * \param log_scale Should the outputs be scaled by log2(1+x). */ void GetPerStoreFeature(const PrimFunc& func, int cache_line_size, int max_n_bufs, std::vector<float>* ret, bool log_scale = true); /* * \brief Get the names of elements in the feature vector. Use this for debug and inspection. * \param max_n_bufs The maximum number of extracted buffers for one statement * \param ret The returned names. */ void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret); /*! * \brief Get per-store feature from states of the same task * \param states The input states * \param task The same search task for all states * \param skip_first_n_feature_extraction Skip feature extraction for the first n states * \param max_n_bufs The maximum number of extracted buffers for one statement * \param features The returned feature vector. The innermost vector contains the * feature vectors for all BufferStoreNode statements */ void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask& task, int skip_first_n_feature_extraction, int max_n_bufs, std::vector<std::vector<float>>* features); /*! * \brief Get per-store feature from states of different tasks * \param states The input states * \param tasks The search tasks corresponding to the input states * \param skip_first_n_feature_extraction Skip feature extraction for the first n states * \param max_n_bufs The maximum number of extracted buffers for one statement * \param features The returned feature vector. The innermost vector contains the * feature vectors for all BufferStoreNode statements */ void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector<SearchTask>& tasks, int skip_first_n_feature_extraction, int max_n_bufs, std::vector<std::vector<float>>* features); /*! * \brief Get per-store features from a log file * \param filename The name of log file * \param max_lines Only read the first n lines of the file * \param max_n_bufs The maximum number of extracted buffers for one statement * \param features The returned feature vector. The innermost vector contains the * feature vectors for all BufferStoreNode statements * \param normalized_throughputs The normalized throughputs for all states * \param task_ids The task ids for all states */ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int max_n_bufs, std::vector<std::vector<float>>* features, std::vector<float>* normalized_throughputs, std::vector<int>* task_ids); /*! * \brief Get per-store features from measurement input/result pairs * \param inputs The measurement inputs * \param results The measurement results * \param skip_first_n_feature_extraction Skip feature extraction for the first n measurement pairs * \param max_n_bufs The maximum number of extracted buffers for one statement * \param features The returned feature vector. The innermost vector contains the * feature vectors for all BufferStoreNode statements * \param normalized_throughputs The normalized throughputs for all states * \param task_ids The task ids for all states */ void GetPerStoreFeaturesFromMeasurePairs(const Array<MeasureInput>& inputs, const Array<MeasureResult>& results, int skip_first_n_feature_extraction, int max_n_bufs, std::vector<std::vector<float>>* features, std::vector<float>* normalized_throughputs, std::vector<int>* task_ids); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_FEATURE_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/loop_state.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/loop_state.h * \brief The definition of the "state" in the search. * * Each LoopState corresponds to a schedule for its ComputeDAG. * A LoopState consists of: 1. a current loop structure; 2. a list of transformation steps used to * construct the loop structure. * The loop structure keeps a preview of how the schedule will finally look like after lowering the * current state (e.g. number of iterators, the extent of each iterator, the compute_at locations * ...). * During the schedule search process, the loop structure can provide search policy with necessary * information on how to manipulate the current state. * The transform history is a sequence of `TransformStep` which will finally be mapped to TVM * schedule primitives. The steps are also used for the serialization of a state. * * The LoopState can be seen as a lightweight loop structure IR specifically for schedule search. * We don't use the existing TVM IR but to extend a new structure on it is because: * 1. We want fast incremental change to the loop structures. The search policy needs to get the * immediate loop structures update rather than after TVM lowering; * 2. We want serializable transform history for replay, backtracking, and mutation; * 3. We may create some macro schedule primitives that represent the combination of several * TVM schedule primitives. * * When the search is finished, we will lower the state to TVM IR with TVM's schedule primitives. * Since we share a lot of common objects during search, the transformation is implemented in * copy on write style. All objects are immutable, which is similar to TVM IR. */ #ifndef TVM_AUTO_SCHEDULER_LOOP_STATE_H_ #define TVM_AUTO_SCHEDULER_LOOP_STATE_H_ #include <dmlc/common.h> #include <tvm/auto_scheduler/transform_step.h> #include <functional> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace auto_scheduler { using namespace tvm::tir; class ComputeDAG; /*! \brief The type of a stage. */ enum class StageKind : int { /*! \brief A placeholder stage. */ kPlaceholder = 0, /*! \brief A compute stage. */ kCompute = 1 }; /*! \brief The type of compute location. */ enum class ComputeAtKind : int { /*! \brief Compute at root. */ kRoot = 0, /*! \brief Compute inlined. */ kInlined = 1, /*! \brief Compute at some iterator. */ kIter = 2, }; /*! \brief Stage-level attributes. */ struct StageAttributes { /*! \brief The maximum steps for the pragma `auto_unroll_max_step`. */ int auto_unroll_max_step; /*! \brief The storage offset for the schedule primitive `storage_align`. */ int storage_offset; }; /*! * \brief A op stage in the compute declaration. * Similar to te::Stage in `include/tvm/te/schedule.h`. */ class StageNode : public Object { public: /*! \brief The operator of this stage */ te::Operation op; /*! \brief The iterators in this stage. */ Array<Iterator> iters; /*! \brief The type of this stage. */ StageKind op_type; /*! \brief The compute location of this stage. */ ComputeAtKind compute_at; /*! \brief Other stage-level attributes. */ StageAttributes attrs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("op", &op); v->Visit("iters", &iters); v->Visit("op_type", &op_type); v->Visit("compute_at", &compute_at); } static constexpr const char* _type_key = "auto_scheduler.Stage"; TVM_DECLARE_FINAL_OBJECT_INFO(StageNode, Object); }; /*! * \brief Managed reference to StageNode. * \sa StageNode */ class Stage : public ObjectRef { public: /*! * \brief The constructor. * \param op A `te::Operation`. */ explicit Stage(te::Operation op); /*! * \brief The constructor. * \param op The source operation * \param op_type The stage type of this op. * \param iters The iterators of this op. * \param compute_at The compute at type of this op. * \param attrs Other stage-level attributes. */ Stage(te::Operation op, StageKind op_type, const Array<Iterator>& iters, ComputeAtKind compute_at, StageAttributes attrs); TVM_DEFINE_OBJECT_REF_METHODS(Stage, ObjectRef, StageNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(StageNode); }; /*! \brief Use stage_id to represent a stage. */ using StageKey = int; /*! \brief Use stage_id and iter_id to represent a iterator. */ using IterKey = std::pair<int, int>; /*! * \brief stores the compute_at relation between stages * This stores a bi-directional mapping from stages and iter: * 1. Stage to its attached iterator * 2. Iterator to the stage attached to it * You can use AttachMapNode::stage_to_attach_iter and AttachMapNode::iter_to_attached_stages * to query the relations */ class AttachMapNode : public Object { public: struct IterKeyHash { std::size_t operator()(const IterKey& k) const { return ::dmlc::HashCombine(std::hash<int>()(k.first), std::hash<int>()(k.second)); } }; /*! \brief A Map to store the mapping of stage to its attached iterator. */ std::unordered_map<StageKey, IterKey> stage_to_attach_iter; /*! \brief A Map to store the mapping of iterator to the stages attached to it. */ std::unordered_map<IterKey, std::vector<StageKey>, IterKeyHash> iter_to_attached_stages; static constexpr const char* _type_key = "auto_scheduler.AttachMap"; TVM_DECLARE_FINAL_OBJECT_INFO(AttachMapNode, Object); }; /*! * \brief Managed reference to AttachMapNode. * \sa AttachMapNode */ class AttachMap : public ObjectRef { public: /*! * \brief Process the stage/iterator mapping after compute at. * \param stage_id The index of the source stage of computed at. * \param target_stage_id The index of stage that this step will compute at to. * \param target_iter_id The index of target iterator in the target stage. */ void SetComputeAtIter(int stage_id, int target_stage_id, int target_iter_id); /*! * \brief Delete the entry of a specific stage. This is a public wrapper of `DeleteStageEntry`. * \param stage_id The index of the stage to be deleted. */ void DeleteStage(int stage_id); /*! * \brief Find the relations of original iterators in AttachMap, and update them with the new * iterators. Both `stage_to_attach_iter` and `iter_to_attached_stages` will be updated. * \param original_iters The original IterKey. * \param new_iters The new IterKey for replacing the old ones. */ void UpdateIters(const std::vector<IterKey>& original_iters, const std::vector<IterKey>& new_iters); /*! * \brief Traverse through `stage_to_attach_iter` and `iter_to_attached_stages` map, add offset * to stage indexes that are larger than the start_id. Used for steps that insert new stages to * ComputeDAG (e.g., CacheRead/CacheWrite step). * \param start_id The index threshold. This function only adds offset for stages * with indices larger then this threshold. * \param offset The index offset to be added to the stage index. * \return The updated AttachMap after applying stage index offset. */ AttachMap ApplyStageIdOffset(int start_id, int offset = 1) const; TVM_DEFINE_OBJECT_REF_METHODS(AttachMap, ObjectRef, AttachMapNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(AttachMapNode); private: /*! * \brief Delete the entry of a specific stage. This will remove the items related to this * stage in both `stage_to_attach_iter` and `iter_to_attached_stages` map. * \param pnode A mutable pointer to AttachMapNode. * \param stage_id The index of stage that will be removed from the map. */ static void DeleteStageEntry(AttachMapNode* pnode, int stage_id); }; /*! * \brief A state in the search process. * It consists of the current loop structure and a list of transformation steps used to construct * it. * Each State corresponds to a specific schedule for its ComputeDAG. */ class StateNode : public Object { public: /*! \brief Current stages and loop structures. */ Array<Stage> stages; /*! \brief History transformation steps. */ Array<Step> transform_steps; /*! * \brief The attach relations of stages and iterators. This is used to track the compute at * operation. */ AttachMap attach_map; /*! \brief The up-to-date ComputeDAG of this state. The default value is an empty NullOpt, * meaning the dag of this state is the same as the original ComputeDAG in the SearchTask. * Otherwise, the stored value is the up-to-date ComputeDAG for this state, meaning some steps * (e.g., CacheReadStep/CacheWriteStep) have modified the ComputeDAG. */ Optional<ObjectRef> current_compute_dag; /*! * \brief Indicate whether this state has unfilled tile sizes. A concrete state means that all * tile sizes of the state is filled. Only concrete state can be apply to TVM schedule. */ bool concrete; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("stages", &stages); v->Visit("transform_steps", &transform_steps); v->Visit("concrete", &concrete); } static constexpr const char* _type_key = "auto_scheduler.State"; TVM_DECLARE_FINAL_OBJECT_INFO(StateNode, Object); }; /*! * \brief Managed reference to StateNode. * \sa StateNode */ class State : public ObjectRef { public: /*! * \brief The constructor. * \param ops `te::Operation`s for a compute declaration. */ explicit State(const Array<te::Operation>& ops); /*! * \brief Pretty-print the state to a human readable string. * \param delete_trivial_loop True for skipping the trivial loops. * (undefined or extent == 1, default set to True) * \return The human readable string. */ String ToStr(bool delete_trivial_loop = true) const; /********** Step APIs working on a single stage **********/ /*! * \brief The schedule primitive corresponding to `te::Stage::bind`. * \param stage_id The index of the stage to be binded. * \param it The iterator to be binded. * \param thread_type The thread type. * \return The new iterator after binding. */ TVM_DLL Iterator bind(int stage_id, const Iterator& it, IteratorAnnotation thread_type); /*! * \brief The schedule primitive corresponding to `te::Stage::parallel`. * \param stage_id The index of the stage to be paralleled. * \param it The iterator to be paralleled. * \return The new iterator after parallel. */ TVM_DLL Iterator parallel(int stage_id, const Iterator& it); /*! * \brief The schedule primitive corresponding to `te::Stage::unroll`. * \param stage_id The index of the stage to be unrolled. * \param it The iterator to be unrolled. * \param max_unroll The max unroll limit. Iterator with extent larger than this limit will be * skipped. * \return The new iterator after unroll. */ TVM_DLL Iterator unroll(int stage_id, const Iterator& it, int max_unroll = -1); /*! * \brief The schedule primitive corresponding to `te::Stage::vectorize`. * \param stage_id The index of the stage to be vectorized. * \param it The iterator to be vectorized. * \return The new iterator after vectorization. */ TVM_DLL Iterator vectorize(int stage_id, const Iterator& it); /*! * \brief The schedule primitive corresponding to `te::Stage::fuse`. * \param stage_id The index of the stage to be fused. * \param iters The iterators to be fused. * \return The iterator result after fuse. * \note If the iterators to be fused have stages attached at them(by compute_at), the fused * result will become the new attach point. */ TVM_DLL Iterator fuse(int stage_id, const Array<Iterator>& iters); /*! * \brief The schedule primitive corresponding to `te.Stage.pragma`. * \param stage_id The index of the stage to add pragma. * \param it The iterator to add pragma. * \param pragma_type The pragma string. */ TVM_DLL void pragma(int stage_id, const Iterator& it, const String& pragma_type); /*! * \brief The schedule primitive corresponding to `te::Stage::reorder`. * \param stage_id The index of the stage to be reordered. * \param order The expected iterator order. */ TVM_DLL void reorder(int stage_id, const Array<Iterator>& order); /*! * \brief The schedule primitive corresponding to `te::Stage::split`. * \param stage_id The index of the stage to be split. * \param it The iterator to be split. * \param lengths The multiple split factors. Can be None to be filled by search policy. * \param inner_to_outer Whether the factors go from inner to outer, or from outer to inner. * \return The new iterator after splitting. * \note If we do split on an iterator which has stages attached at it(by compute_at), the inner * most iterator of split results will become the new attach point. */ TVM_DLL Array<Iterator> split(int stage_id, const Iterator& it, const Array<Optional<Integer>>& lengths, bool inner_to_outer = true); /*! * \brief The schedule primitive similar to split, but uses split factors from previous steps. * \param stage_id The index of the stage to be split. * \param it The iterator to be split. * \param src_step_id The index of the split step to be followed in the history. * \param n_split The number of split level. * \return The split new Iterators. */ TVM_DLL Array<Iterator> follow_split(int stage_id, const Iterator& it, int src_step_id, int n_split); /*! * \brief The schedule primitive similar to split, but uses split factors from * fused previous steps. * \param stage_id The index of the stage to be split. * \param it The iterator to be split. * \param src_step_ids The indices of the split steps to be followed in the history. * \param level Use the length in this split level. * \param factor_or_nparts True to use `factor` for split from inner to outer, False to use `nparts` for split from outer to inner. * \return The split new Iterators. */ TVM_DLL Array<Iterator> follow_fused_split(int stage_id, const Iterator& it, const Array<Integer>& src_step_ids, int level, bool factor_or_nparts); /*! * \brief The schedule primitive corresponding to `te.Stage.storage_align`. * \param stage_id The index of the stage to be aligned. * \param it The iterator to be aligned. * \param factor The factor in alignment specification. * \param offset The offset in the alignment specification. */ TVM_DLL void storage_align(int stage_id, const Iterator& it, int factor, int offset); /********** Step APIs working on multiple stages **********/ /*! * \brief The schedule primitive corresponding to `te::Stage::compute_at`. * \param stage_id The index of the source stage of computed at. * \param target_stage_id The index of stage that this step will compute at to. * \param target_iter The indiex of the target iterator in the target stage. * \note After compute_at, we need careful dependency analysis to compute the accurate bound * information. However, it is relatively expensive and complicated, so we just fill "None" as * bound for the newly created iterators. * Call ComputeDAG::InferBound on the updated state if you need the complete bound information. */ TVM_DLL void compute_at(int stage_id, int target_stage_id, const Iterator& target_iter); /*! * \brief The schedule primitive corresponding to `te::Stage::compute_inline`. * \param stage_id The index of the stage to be marked compute inlined. */ TVM_DLL void compute_inline(int stage_id); /*! * \brief The schedule primitive corresponding to `te::Stage::compute_root`. * \param stage_id The index of the stage to be marked compute at root. * \note After compute_root, we need careful dependency analysis to compute the accurate bound * information. However, it is relatively expensive and complicated, so we just fill "None" as * bound for the newly created iterators. * Call ComputeDAG::InferBound on the updated state if you need the complete bound information. */ TVM_DLL void compute_root(int stage_id); /********** Step APIs adding new stages **********/ /*! * \brief The schedule primitive corresponding to `te::Schedule::cache_read`. * \param stage_id The index of the stage to be cache_read. * \param scope_name The scope name of the newly added stage. * \param reader_stage_ids The indices of reader stages. * \param dag The original ComputeDAG of this state. * \note Cache read step will add an extra stage to the original ComputeDAG (at the back of the * target stage), an up-to-date ComputeDAG is stored in State's `current_compute_dag`. */ TVM_DLL int cache_read(int stage_id, const String& scope_name, const Array<Integer>& reader_stage_ids, const ComputeDAG& dag); /*! * \brief The schedule primitive corresponding to `te::Schedule::cache_write`. * \param stage_id The index of the stage to be cache_write. * \param scope_name The scope name of the newly added stage. * \param dag The original ComputeDAG of this state. * \note Cache write step will add an extra stage to the original ComputeDAG (in the front of the * target stage), an up-to-date ComputeDAG is stored in State's `current_compute_dag`. * This step will cache write all output tensors of the target stage. */ TVM_DLL int cache_write(int stage_id, const String& scope_name, const ComputeDAG& dag); /*! * \brief The schedule primitive corresponding to `te::Schedule::rfactor`. * \param stage_id The index of the iterator to be factored. * \param it The iterator to be factored. * \param factor_iter_id The position where the new iterator is placed. * \param dag The original ComputeDAG of this state. * \note Rfactor step will add an extra stage to the original ComputeDAG (in the front of the * target stage), an up-to-date ComputeDAG is stored in State's `current_compute_dag`. */ TVM_DLL int rfactor(int stage_id, const Iterator& it, int factor_iter_id, const ComputeDAG& dag); TVM_DEFINE_OBJECT_REF_METHODS(State, ObjectRef, StateNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(StateNode); }; } // namespace auto_scheduler } // namespace tvm // Hash and equal function for State namespace std { /*! * \brief The equal_to function for auto_scheduler::State. * This function checks the equality by looking at the lowered string format of states. * If two states with different transform history have the same lowered string format, * they will be considered being equal. */ template <> struct equal_to<::tvm::auto_scheduler::State> { bool operator()(const ::tvm::auto_scheduler::State& lhs, const ::tvm::auto_scheduler::State& rhs) const { return lhs.ToStr() == rhs.ToStr(); } }; /*! \brief The hash function for auto_scheduler::State. */ template <> struct hash<::tvm::auto_scheduler::State> { std::size_t operator()(const ::tvm::auto_scheduler::State& state) const { return tvm::runtime::ObjectHash()(state.ToStr()); } }; } // namespace std #endif // TVM_AUTO_SCHEDULER_LOOP_STATE_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/measure.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/measure.h * \brief Distributed measurement infrastructure to measure the runtime costs of tensor programs. * These functions are responsible for building the tvm module, uploading it to remote devices, * recording the running time costs, and checking the correctness of the output. * * The measurement is separated into two steps: build and run. * A builder builds the executable binary files and a runner runs the binary files to get the * measurement results. The flow of data structures is * * `ProgramBuilder` `ProgramRunner` * `MeasureInput` -----------------> `BuildResult` ----------------> `MeasureResult` * * The core functions is implemented in python to utilize python's multiprocessing * and error handling (see also `python/tvm/auto_scheduler/measure.py`). * This c++ file is just a wrapper for the python functions. */ #ifndef TVM_AUTO_SCHEDULER_MEASURE_H_ #define TVM_AUTO_SCHEDULER_MEASURE_H_ #include <tvm/auto_scheduler/loop_state.h> #include <tvm/auto_scheduler/search_task.h> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> namespace tvm { namespace auto_scheduler { class SearchPolicy; class MeasureInput; class MeasureResult; /*! \brief The error code of one measurement */ enum class MeasureErrorNO : int { /*! \brief No error. */ kNoError = 0, /*! \brief Errors happen when apply transform steps from init state. */ kInstantiationError = 1, /*! \brief Errors happen when compiling code on host. (when build module) */ kCompileHostError = 2, /*! \brief Errors happen when compiling code on device. (when load module) */ kCompileDeviceError = 3, /*! \brief Errors happen when run program on device. */ kRuntimeDeviceError = 4, /*! \brief Answer is wrong when compared to a reference output. */ kWrongAnswerError = 5, /*! \brief Timeout during compilation. */ kBuildTimeoutError = 6, /*! \brief Timeout during run. */ kRunTimeoutError = 7, /*! \brief Unknown error. */ kUnknownError = 8, }; // Inputs and results of one measurement /*! \brief Store the input of a measurement */ class MeasureInputNode : public Object { public: /*! \brief The search task. */ SearchTask task; /*! \brief The program state to be measured. */ State state; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("task", &task); v->Visit("state", &state); } /*! \brief Do shallow copy. */ MeasureInput copy() const; static constexpr const char* _type_key = "auto_scheduler.MeasureInput"; TVM_DECLARE_FINAL_OBJECT_INFO(MeasureInputNode, Object); }; /*! * \brief Managed reference to MeasureInputNode. * \sa MeasureInputNode */ class MeasureInput : public ObjectRef { public: /*! * \brief The constructor. * \param task The SearchTask of this measure. * \param state The State to be measured. */ MeasureInput(SearchTask task, State state); TVM_DEFINE_OBJECT_REF_METHODS(MeasureInput, ObjectRef, MeasureInputNode); }; /*! \brief Store the result of a build. */ class BuildResultNode : public Object { public: /*! \brief The filename of built binary file. */ String filename; /*! \brief The arguments. */ Array<te::Tensor> args; /*! \brief The error code. (0 means no error, see MeasureErrorNO) */ int error_no; /*! \brief The error message if there is any error. */ String error_msg; /*! \brief The time cost of build. */ double time_cost; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("filename", &filename); v->Visit("args", &args); v->Visit("error_no", &error_no); v->Visit("error_msg", &error_msg); v->Visit("time_cost", &time_cost); } static constexpr const char* _type_key = "auto_scheduler.BuildResult"; TVM_DECLARE_FINAL_OBJECT_INFO(BuildResultNode, Object); }; /*! * \brief Managed reference to BuildResultNode. * \sa BuildResultNode */ class BuildResult : public ObjectRef { public: /*! * \brief The constructor. * \param filename The filename of built binary file. * \param args The arguments. * \param error_no The error code. * \param error_msg The error message if there is any error. * \param time_cost The time cost of build. */ BuildResult(String filename, Array<te::Tensor> args, int error_no, String error_msg, double time_cost); TVM_DEFINE_OBJECT_REF_METHODS(BuildResult, ObjectRef, BuildResultNode); }; /*! \brief Store the results of a measurement. */ class MeasureResultNode : public Object { public: /*! \brief The time costs of execution. */ Array<PrimExpr> costs; /*! \brief The error code. (0 means no error, see MeasureErrorNO) */ int error_no; /*! \brief The error message if there is any error. */ String error_msg; /*! \brief The time cost of build and run. */ double all_cost; /*! \brief The time stamps of this measurement. */ double timestamp; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("costs", &costs); v->Visit("error_no", &error_no); v->Visit("error_msg", &error_msg); v->Visit("all_cost", &all_cost); v->Visit("timestamp", &timestamp); } /*! \brief Do shallow copy. */ MeasureResult copy() const; static constexpr const char* _type_key = "auto_scheduler.MeasureResult"; TVM_DECLARE_FINAL_OBJECT_INFO(MeasureResultNode, Object); }; /*! * \brief Managed reference to MeasureResultNode. * \sa MeasureResultNode */ class MeasureResult : public ObjectRef { public: /*! * \brief The constructor. * \param costs The time costs of execution. * \param error_no The error code. * \param error_msg The error message if there is any error. * \param all_cost The time cost of build and run. * \param timestamp The time stamps of this measurement. */ MeasureResult(Array<PrimExpr> costs, int error_no, String error_msg, double all_cost, double timestamp); TVM_DEFINE_OBJECT_REF_METHODS(MeasureResult, ObjectRef, MeasureResultNode); }; /*! \brief Bass class of measurement callbacks */ class MeasureCallbackNode : public Object { public: /*! * \brief Callback function that will be called on measurement input/result pairs * after each measurement batch. * \param policy The current search policy. * \param inputs An Array of MeasureInput. * \param results An Array of MeasureResult. */ virtual void Callback(const SearchPolicy& policy, const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) = 0; static constexpr const char* _type_key = "auto_scheduler.MeasureCallback"; TVM_DECLARE_BASE_OBJECT_INFO(MeasureCallbackNode, Object); }; /*! * \brief Managed reference to MeasureCallbackNode. * \sa MeasureCallbackNode */ class MeasureCallback : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(MeasureCallback, ObjectRef, MeasureCallbackNode); }; /*! \brief A wrapper for measure callback defined by python code * This class will call functions defined in the python */ class PythonBasedMeasureCallbackNode : public MeasureCallbackNode { public: /*! \brief Pointer to the callback function in python */ PackedFunc callback_func; void Callback(const SearchPolicy& policy, const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) final; static constexpr const char* _type_key = "auto_scheduler.PythonBasedMeasureCallback"; TVM_DECLARE_FINAL_OBJECT_INFO(PythonBasedMeasureCallbackNode, MeasureCallbackNode); }; /*! * \brief Managed reference to PythonBasedMeasureCallbackNode. * \sa PythonBasedMeasureCallbackNode */ class PythonBasedMeasureCallback : public MeasureCallback { public: /*! * \brief The constructor. * \param callback_func The pointer to the callback function defined in python */ explicit PythonBasedMeasureCallback(PackedFunc callback_func); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PythonBasedMeasureCallback, MeasureCallback, PythonBasedMeasureCallbackNode); }; // The base class of ProgramBuilders and ProgramRunners. /*! \brief ProgramBuilder that builds the programs */ class ProgramBuilderNode : public Object { public: /*! \brief The number of build processes to run in parallel */ int n_parallel; /*! \brief Timeout of a build */ int timeout; /*! * \brief Build programs and return results. * \param inputs An Array of MeasureInput. * \param verbose Verbosity level. 0 for silent, 1 to output information during program * building. * \return An Array of MeasureResult. */ virtual Array<BuildResult> Build(const Array<MeasureInput>& inputs, int verbose) = 0; static constexpr const char* _type_key = "auto_scheduler.ProgramBuilder"; TVM_DECLARE_BASE_OBJECT_INFO(ProgramBuilderNode, Object); }; /*! * \brief Managed reference to ProgramBuilderNode. * \sa ProgramBuilderNode */ class ProgramBuilder : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ProgramBuilder, ObjectRef, ProgramBuilderNode); }; /*! \brief ProgramRunner that runs the built programs and measure the time cost. */ class ProgramRunnerNode : public Object { public: /*! \brief Timeout of a run. */ int timeout; /*! \brief The number of times to run the generated code for taking average. */ int number; /*! \brief The number of times to repeat the measurement. */ int repeat; /*! \brief The minimum duration of one repeat in milliseconds. */ int min_repeat_ms; /*! \brief The cool down interval between two measurements. */ double cooldown_interval; /*! \brief Whether to flush cache on CPU between repeated measurements. */ bool enable_cpu_cache_flush; /*! \brief Which device to run on if multiple are avaialble. */ int device; /*! * \brief Run measurement and return results. * \param inputs An Array of MeasureInput. * \param build_results An Array of BuildResult. * \param verbose Verbosity level. 0 for silent, 1 to output information during program * running. * \return An Array of MeasureResult. */ virtual Array<MeasureResult> Run(const Array<MeasureInput>& inputs, const Array<BuildResult>& build_results, int verbose) = 0; static constexpr const char* _type_key = "auto_scheduler.ProgramRunner"; TVM_DECLARE_BASE_OBJECT_INFO(ProgramRunnerNode, Object); }; /*! * \brief Managed reference to ProgramRunnerNode. * \sa ProgramRunnerNode */ class ProgramRunner : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ProgramRunner, ObjectRef, ProgramRunnerNode); }; // Implementation of various builders and runners /*! \brief LocalBuilder use local CPU cores to build programs in parallel */ class LocalBuilderNode : public ProgramBuilderNode { public: /*! \brief Build function. */ String build_func; Array<BuildResult> Build(const Array<MeasureInput>& inputs, int verbose) final; static constexpr const char* _type_key = "auto_scheduler.LocalBuilder"; TVM_DECLARE_FINAL_OBJECT_INFO(LocalBuilderNode, ProgramBuilderNode); }; /*! * \brief Managed reference to LocalBuilderNode. * \sa LocalBuilderNode */ class LocalBuilder : public ProgramBuilder { public: /*! * \brief The constructor. * \param timeout The timeout limit (in second) for each build thread. * This will be used in a wrapper of the multiprocessing.Process.join(). * \param n_parallel The number of threads used to build in parallel. * \param build_func The name of the registered build function. */ LocalBuilder(int timeout, int n_parallel, const String& build_func); TVM_DEFINE_OBJECT_REF_METHODS(LocalBuilder, ProgramBuilder, LocalBuilderNode); }; /*! \brief LocalRunner that uses local CPU/GPU to measure the time cost of programs */ class LocalRunnerNode : public ProgramRunnerNode { public: Array<MeasureResult> Run(const Array<MeasureInput>& inputs, const Array<BuildResult>& build_results, int verbose) final; static constexpr const char* _type_key = "auto_scheduler.LocalRunner"; TVM_DECLARE_FINAL_OBJECT_INFO(LocalRunnerNode, ProgramRunnerNode); }; /*! * \brief Managed reference to LocalRunnerNode. * \sa LocalRunnerNode */ class LocalRunner : public ProgramRunner { public: /*! * \brief The constructor. See the corresponding class in python/tvm/auto_scheduler/measure.py * for more detailed parameter explanation. * \param timeout The timeout limit (in second) for each run. * This is used in a wrapper of the multiprocessing.Process.join(). * \param number The number of times to run the generated code for taking average. * \param repeat The number of times to repeat the measurement. * \param min_repeat_ms The minimum duration of one repeat in milliseconds. * \param cooldown_interval The cool down interval between two measurements. * \param enable_cpu_cache_flush Whether to flush cache on CPU between repeated measurements. * \param device Which device to run on if multiple are available. */ LocalRunner(int timeout, int number, int repeat, int min_repeat_ms, double cooldown_interval, bool enable_cpu_cache_flush, int device); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(LocalRunner, ProgramRunner, LocalRunnerNode); }; /*! * \brief RPCRunner that uses RPC call to measures the time cost of programs on remote devices. * Or sometime we may need to use RPC even in local running to insulate the thread environment. * (e.g. running CUDA programs) */ class RPCRunnerNode : public ProgramRunnerNode { public: /*! \brief The key of the device registered in the RPC tracker. */ String key; /*! \brief The host address of the RPC Tracker. */ String host; /*! \brief The port of the RPC Tracker. */ int port; /*! \brief The priority of this run request, larger is more prior. */ int priority; /*! \brief The number of tasks run in parallel. */ int n_parallel; Array<MeasureResult> Run(const Array<MeasureInput>& inputs, const Array<BuildResult>& build_results, int verbose) final; static constexpr const char* _type_key = "auto_scheduler.RPCRunner"; TVM_DECLARE_FINAL_OBJECT_INFO(RPCRunnerNode, ProgramRunnerNode); }; /*! * \brief Managed reference to RPCRunnerNode. * \sa RPCRunnerNode */ class RPCRunner : public ProgramRunner { public: /*! * \brief The constructor. See the corresponding class in python/tvm/auto_scheduler/measure.py * for more detailed parameter explanation. * \param key The key of the device registered in the RPC tracker. * \param host The host address of the RPC Tracker. * \param port The port of RPC Tracker. * \param priority The priority of this run request, larger is more prior. * \param n_parallel The number of tasks run in parallel. * \param timeout Timeout of a run. * \param number The number of times to run the generated code for taking average. * \param repeat The number of times to repeat the measurement. * \param min_repeat_ms The minimum duration of one repeat in milliseconds. * \param cooldown_interval The cool down interval between two measurements. * \param enable_cpu_cache_flush Whether to flush cache on CPU between repeated measurements. * \param device Which device to run on if multiple are available. */ RPCRunner(const String& key, const String& host, int port, int priority, int n_parallel, int timeout, int number, int repeat, int min_repeat_ms, double cooldown_interval, bool enable_cpu_cache_flush, int device); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(RPCRunner, ProgramRunner, RPCRunnerNode); }; /*! * \brief Measurer that measures the time costs of tvm programs * This class combines ProgramBuilder and ProgramRunner, and provides a simpler API */ class ProgramMeasurerNode : public Object { public: /*! \brief Measured programs counter. */ int ct; /*! \brief Continuous error counter. */ int error_ct; /*! \brief Workload key to best flops map. */ std::unordered_map<std::string, double> best_flops; /*! \brief Workload key to best state map. */ std::unordered_map<std::string, State> best_state; /*! \brief Workload key to best state's count index map. */ std::unordered_map<std::string, int> best_ct; /*! \brief The set of workloads that have at least one valid schedule */ std::unordered_set<std::string> has_valid; /*! \brief The ProgramBuilder to build each program. */ ProgramBuilder builder; /*! \brief The ProgramRunner to measure each program. */ ProgramRunner runner; /*! \brief MeasureCallback to be called after each measure batch. */ Optional<Array<MeasureCallback>> callbacks; /*! \brief Verbosity level. 0 for silent, 1 to output information during program measuring. */ int verbose; /*! \brief The number of allowed maximum continuous error before forcely stopping the tuning */ int max_continuous_error; /*! \brief Reset book keeping variables */ void Reset(); /*! * \brief Do measurement. * \param task The current SearchTask. * \param policy The current SearchPolicy. * \param inputs The inputs of measurement. * \param batch_size Number of programs to be measured in one batch. * \return results The results of measurement. */ Array<MeasureResult> Measure(const SearchTask& task, const SearchPolicy& policy, const Array<MeasureInput>& inputs, int batch_size = -1); /*! * \brief Do measurement silently. * This API will not print the measure results to screen. * \param task The current SearchTask. * \param inputs The MeasureInputs. * \param results A pointer to a MeasureResult Array, this is used as output. */ void SilentMeasure(const SearchTask& task, const Array<MeasureInput>& inputs, Array<MeasureResult>* results); /*! \brief The default max continuous error setting. */ static const int DEFAULT_MAX_CONTINUOUS_ERROR = 150; static constexpr const char* _type_key = "auto_scheduler.ProgramMeasurer"; TVM_DECLARE_FINAL_OBJECT_INFO(ProgramMeasurerNode, Object); }; /*! * \brief Managed reference to ProgramMeasurerNode. * \sa ProgramMeasurerNode */ class ProgramMeasurer : public ObjectRef { public: /*! * \brief The constructor. * \param builder The ProgramBuilder to build programs. * \param runner The ProgramRunner to measure programs. * \param callbacks MeasureCallback to be called after each measurement batch. * \param verbose Verbosity level. 0 for silent, 1 to output information during program * measuring. * \param max_continuous_error The number of allowed maximum continuous error before * forcely stopping the tuning. */ ProgramMeasurer(ProgramBuilder builder, ProgramRunner runner, Optional<Array<MeasureCallback>> callbacks, int verbose, int max_continuous_error = -1); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ProgramMeasurer, ObjectRef, ProgramMeasurerNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_MEASURE_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/measure_record.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/auto_scheduler/measure_record.h * \brief Json serialization format for dumping and loading measurement records. */ #ifndef TVM_AUTO_SCHEDULER_MEASURE_RECORD_H_ #define TVM_AUTO_SCHEDULER_MEASURE_RECORD_H_ #include <tvm/auto_scheduler/measure.h> #include <fstream> #include <string> #include <utility> namespace tvm { namespace auto_scheduler { const std::string AUTO_SCHEDULER_LOG_VERSION = "v0.6"; // NOLINT(*) /*! \brief Callback for logging the input and results of measurements to file */ class RecordToFileNode : public MeasureCallbackNode { public: /*! \brief The name of output file. */ String filename; void Callback(const SearchPolicy& policy, const Array<MeasureInput>& inputs, const Array<MeasureResult>& results) final; static constexpr const char* _type_key = "auto_scheduler.RecordToFile"; TVM_DECLARE_FINAL_OBJECT_INFO(RecordToFileNode, MeasureCallbackNode); }; /*! * \brief Managed reference to RecordToFileNode. * \sa RecordToFileNode */ class RecordToFile : public MeasureCallback { public: /*! * \brief The constructor. * \param filename The name of output file */ explicit RecordToFile(String filename); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(RecordToFile, MeasureCallback, RecordToFileNode); }; /*! \brief Log reader to load step logs from a file.*/ class RecordReaderNode : public Object { public: /*! \brief The name of input file. */ String filename; /*! \brief The reading file stream. */ std::ifstream infile; ~RecordReaderNode(); /*! * \brief Read next line in the log file. * \param inp A pointer to a MeasureInputNode, this is used as output. * \param res A pointer to a MeasureResultNode, this is used as output. * \return Whether the read is successful. */ bool ReadNext(MeasureInputNode* inp, MeasureResultNode* res); /*! * \brief Read multiple lines from the log file. * \param max_size The maximum number of lines. -1 means read all lines. * \param skip_size Skip the first n lines. * \return The MeasureInputs and MeasureResults loaded from the log file. */ std::pair<Array<MeasureInput>, Array<MeasureResult>> ReadLines(int max_size = -1, int skip_size = 0); static constexpr const char* _type_key = "auto_scheduler.RecordReader"; TVM_DECLARE_FINAL_OBJECT_INFO(RecordReaderNode, Object); private: /*! \brief A string storing the current line. */ std::string cur_line_; }; /*! * \brief Managed reference to RecordReaderNode. * \sa RecordReaderNode */ class RecordReader : public ObjectRef { public: /*! * \brief The constructor. * \param filename The name of input file */ explicit RecordReader(String filename); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(RecordReader, ObjectRef, RecordReaderNode); }; /*! * \brief Append measure records to an output stream. * \param os A pointer to a output stream. * \param inputs The MeasureInputs to be written. * \param results The MeasureResults to be written. * \param log_version The log version for the given record. */ void WriteMeasureRecords(std::ostream* os, const Array<MeasureInput>& inputs, const Array<MeasureResult>& results, const std::string log_version = AUTO_SCHEDULER_LOG_VERSION); /*! * \brief Read one measure record from a string. * \param str The record string to be parsed. * \param inp A pointer to a MeasureInputNode used to store the return value. * \param res A pointer to a MeasureResultNode used to store the return value. * \param log_version A pointer to a string used to store the log version. */ void ReadMeasureRecord(const std::string& str, MeasureInputNode* inp, MeasureResultNode* res, std::string* log_version); } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_MEASURE_RECORD_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/search_policy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/auto_scheduler/search_policy.h * \brief The base class of search policies, including the abstract definition of search policy and * other supporting data structures. * * \note How to add a new search policy. * In design, there's no need for users to implement their own search policy, our formal search * policy(will be brought later) should be enough to cover most use cases. Meanwhile, a custom rule * mechanism will be provided to enable user-defined template search to serve the same functionality * as the current AutoTVM template. * * This guide is for advanced uses who have special requirements. * 1. The only function that must be implemented is Search(), which takes a task as input and * returns the best states found. * 2. Information about the compute declaration of ops/subgraphs can be acquired from SearchTask. * This structure also contains some information about the target device. (e.g. knowing the width * of the device vector unit, we can limit the max vectorize size during schedule search) * 3. SearchCallback provides more flexibility to do extra affairs before/after the search process. * 4. ProgramMeasurer provides a simple but useful api to help check the performance of states got * during the search process. */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_POLICY_H_ #define TVM_AUTO_SCHEDULER_SEARCH_POLICY_H_ #include <tvm/auto_scheduler/measure.h> #include <tvm/auto_scheduler/search_task.h> #include <tvm/node/node.h> #include <string> #include <unordered_set> #include <utility> #include <vector> namespace tvm { namespace auto_scheduler { class ProgramMeasurer; class SearchPolicyNode; /*! * \brief Callback function to be called by the search process. * This interface allows to do extra initializations before schedule search or extra * check during/after the schedule search. */ class SearchCallbackNode : public Object { public: /*! * \brief Run the registered callback function. * \param policy A pointer to a SearchPolicyNode. */ virtual void Callback(SearchPolicyNode* policy) = 0; static constexpr const char* _type_key = "auto_scheduler.SearchCallback"; TVM_DECLARE_BASE_OBJECT_INFO(SearchCallbackNode, Object); }; /*! * \brief Managed reference to SearchCallbackNode. * \sa SearchCallbackNode */ class SearchCallback : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(SearchCallback, ObjectRef, SearchCallbackNode); }; /*! \brief Preload measured states from a log file. * This can resume the state of the search policy */ class PreloadMeasuredStatesNode : public SearchCallbackNode { public: /*! \brief The name of the record log file. */ String filename; void Callback(SearchPolicyNode* policy) final; static constexpr const char* _type_key = "auto_scheduler.PreloadMeasuredStates"; TVM_DECLARE_FINAL_OBJECT_INFO(PreloadMeasuredStatesNode, SearchCallbackNode); }; /*! * \brief Managed reference to PreloadMeasuredStatesNode. * \sa PreloadMeasuredStatesNode */ class PreloadMeasuredStates : public SearchCallback { public: /*! * \brief The constructor. * \param filename The name of the record log file. */ explicit PreloadMeasuredStates(String filename); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PreloadMeasuredStates, SearchCallback, PreloadMeasuredStatesNode); }; /*! \brief Attribute keys of ops used for SearchPolicy. */ struct SearchPolicyKey { /*! \brief Always apply unroll to the inner most iterator of the specificed iterators. */ static constexpr const char* always_unroll_inner = "auto_scheduler_always_unroll_inner"; /*! \brief The specified iterators will be placed in the inner most tile without split. */ static constexpr const char* no_split_at_inner = "auto_scheduler_no_split_at_inner"; /*! \brief The specified iterators are indices of const tensors in "fake reduction". */ static constexpr const char* simplify_const_tensor_indices = "auto_scheduler_simplify_const_tensor_indices"; }; /*! * \brief The base class of search policies. */ class SearchPolicyNode : public Object { public: /*! \brief The current search task. */ SearchTask search_task; /*! * \brief Verbose level to control the screen output during schedule search. * 0 for silent, 1 to output state & measure information during search process. */ int verbose; void VisitAttrs(AttrVisitor* v) { v->Visit("search_task", &search_task); v->Visit("verbose", &verbose); } /*! * \brief Do schedule search for a task. Takes the SearchTask as input and returns the best state * found during the search. * \param num_measure_trials The number of total measurement trials. * \param early_stopping Stops the tuning early if no improvement after n measurements. * \param num_measures_per_round The number of programs to be measured at each search round. * \param measurer A ProgramMeasurer to build and measure programs * \return The best state found. */ virtual State Search(int num_measure_trials, int early_stopping, int num_measures_per_round, ProgramMeasurer measurer) = 0; /*! * \brief Continue the search by doing an additional search round. * \param num_measure The number of measurements * \param measurer The measurer to measure programs * \return The measurement records for measurements in this search round */ virtual std::pair<Array<MeasureInput>, Array<MeasureResult>> ContinueSearchOneRound( int num_measure, ProgramMeasurer measurer) = 0; /*! * \brief Preload measured states from a log file to resume the state of the search policy. * \param log_file The name of the record log file. */ void PreloadMeasuredStates(const String& log_file); /*! * \brief Call SearchCallback with the current SearchPolicyNode * \param callbacks SearchCallback to be called. */ void RunCallbacks(const Array<SearchCallback>& callbacks); static constexpr const char* _type_key = "auto_scheduler.SearchPolicy"; TVM_DECLARE_BASE_OBJECT_INFO(SearchPolicyNode, Object); protected: /*! * \brief The set of already measured states. * We store the string format of a state for redundancy check. This is used to make sure a * measured state will never be measured again. */ std::unordered_set<std::string> measured_states_set_; /*! \brief The array of already measured states. * The good states can be used as the initial population in evolutionary search. */ std::vector<State> measured_states_vector_; /*! \brief The throughputs of already measured states */ std::vector<float> measured_states_throughputs_; }; /*! * \brief Managed reference to SearchPolicyNode. * \sa SearchPolicyNode */ class SearchPolicy : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(SearchPolicy, ObjectRef, SearchPolicyNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_POLICY_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/search_task.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/search_task.h * \brief Meta information and hardware parameters for a search task. */ #ifndef TVM_AUTO_SCHEDULER_SEARCH_TASK_H_ #define TVM_AUTO_SCHEDULER_SEARCH_TASK_H_ #include <tvm/auto_scheduler/compute_dag.h> #include <tvm/runtime/ndarray.h> #include <tvm/target/target.h> namespace tvm { namespace auto_scheduler { class HardwareParams; /*! \brief The parameters of target hardware used to guide the SearchPolicy. */ class HardwareParamsNode : public Object { public: /*! \brief The number of cores. */ int num_cores; /*! \brief The width of vector units in bytes. */ int vector_unit_bytes; /*! \brief The size of cache line in bytes. */ int cache_line_bytes; // GPU related parameters got from device query API /*! \brief The max shared memory per block in bytes. */ int max_shared_memory_per_block; /*! \brief The max local memory per block in bytes. */ int max_local_memory_per_block; /*! \brief The max number of threads per block. */ int max_threads_per_block; /*! \brief The max vthread extent. */ int max_vthread_extent; /*! \brief The thread numbers of a warp. */ int warp_size; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("num_cores", &num_cores); v->Visit("vector_unit_bytes", &vector_unit_bytes); v->Visit("cache_line_bytes", &cache_line_bytes); v->Visit("max_shared_memory_per_block", &max_shared_memory_per_block); v->Visit("max_local_memory_per_block", &max_local_memory_per_block); v->Visit("max_threads_per_block", &max_threads_per_block); v->Visit("max_vthread_extent", &max_vthread_extent); v->Visit("warp_size", &warp_size); } /*! * \brief Get the default hardware params. * \param target A `tvm.target`. * \param target_host A `tvm.target` for host device. * \return A HardwareParams object. */ static HardwareParams GetDefaultHardwareParams(const Target& target, const Target& target_host); static constexpr const char* _type_key = "auto_scheduler.HardwareParams"; TVM_DECLARE_FINAL_OBJECT_INFO(HardwareParamsNode, Object); }; /*! * \brief Managed reference to HardwareParamsNode. * \sa HardwareParamsNode */ class HardwareParams : public ObjectRef { public: /*! * \brief The constructor. * \param num_cores The number of cores. * \param vector_unit_bytes The width of vector units in bytes. * \param cache_line_bytes The size of cache line in bytes. * \param max_shared_memory_per_block The max amount of shared memory per block for GPU. * \param max_local_memory_per_block The max amount of local memory per block for GPU. * \param max_threads_per_block The max number of threads per block for GPU. * \param max_vthread_extent The max extent of vthread for GPU. * \param warp_size The warp size for GPU */ HardwareParams(int num_cores, int vector_unit_bytes, int cache_line_bytes, int max_shared_memory_per_block, int max_local_memory_per_block, int max_threads_per_block, int max_vthread_extent, int warp_size); TVM_DEFINE_OBJECT_REF_METHODS(HardwareParams, ObjectRef, HardwareParamsNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(HardwareParamsNode); }; /*! * \brief The computation information and hardware parameters for a specific schedule search task. */ class SearchTaskNode : public Object { public: /*! \brief The ComputeDAG for the compute declaration. */ ComputeDAG compute_dag; /*! \brief The workload key for the compute declaration. */ String workload_key; /*! \brief The description string of this task. */ String desc; /*! \brief The target device of this search task. */ Target target; /*! \brief The target host device of this search task. */ Target target_host; /*! \brief Hardware parameters used in this search task. */ HardwareParams hardware_params; /*! \brief The layout rewrite option used for measuring programs. */ LayoutRewriteOption layout_rewrite_option; /*! \brief Names of some user defined input data used in program measuring. */ Array<String> task_input_names; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("compute_dag", &compute_dag); v->Visit("workload_key", &workload_key); v->Visit("desc", &desc); v->Visit("target", &target); v->Visit("target_host", &target_host); v->Visit("hardware_params", &hardware_params); v->Visit("layout_rewrite_option", &layout_rewrite_option); v->Visit("task_input_names", &task_input_names); } static constexpr const char* _type_key = "auto_scheduler.SearchTask"; TVM_DECLARE_FINAL_OBJECT_INFO(SearchTaskNode, Object); }; /*! * \brief Managed reference to SearchTaskNode. * \sa SearchTaskNode */ class SearchTask : public ObjectRef { public: /*! * \brief The constructor. * \param compute_dag The ComputeDAG for the compute declaration. * \param workload_key The workload key for the compute declaration. * \param target The target device of this search task. * \param target_host The target host device of this search task. * \param hardware_params Hardware parameters used in this search task. * \param layout_rewrite_option The layout rewrite option used for measuring programs. * \param task_input_names Names of some user defined input data used in program measuring. * \param desc The description string of this task. */ SearchTask(ComputeDAG compute_dag, String workload_key, Target target, Target target_host, Optional<HardwareParams> hardware_params, LayoutRewriteOption layout_rewrite_option, Array<String> task_input_names, String desc = ""); TVM_DEFINE_OBJECT_REF_METHODS(SearchTask, ObjectRef, SearchTaskNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_SEARCH_TASK_H_
https://github.com/zk-ml/tachikoma
include/tvm/auto_scheduler/transform_step.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file auto_scheduler/transform_step.h * \brief Transformation steps. These steps are used to manipulate `LoopState`. * They are similar to the schedule primitives in te::Stage. * * \note How to add a new transform step: * Take fuse step for example: * 1. Define class `FuseStepNode`, `FuseStep` in `transform_steps.h`, and implement its first * construction function `FuseStep::FuseStep()` in `transform_steps.cc`. * 2. Implement `FuseStepNode::ApplyToSchedule()` and `FuseStepNode::PrintAsPythonAPI()`. * - In these two functions you need to lower this step with tvm's te schedule API * 3. Implement `FuseStepNode::ApplyToState` and the state API `State::fuse`. * - In these two functions you need to incrementally update all data structures in State with * CopyOnWrite style. * 4. Add your step to `StepApplyToState`, `StepApplyToSchedule`, and `StepPrintAsPythonAPI`. * 5. Log record serialization support: * - Add `FuseStepNode::WriteToRecord` which takes a mutable JSONWriter pointer as input and * output the record to it. * - Add another construction function that takes a mutable JSONReader as input, this will get a * step record from the reader and create the step. * - Add the step implementation to `StepReadFromRecord`. * 6. Add its corresponding Python API to `loop_state.py` with necessary unit tests. The test should * at lease cover two parts: the functional test and the record serialization test. */ #ifndef TVM_AUTO_SCHEDULER_TRANSFORM_STEP_H_ #define TVM_AUTO_SCHEDULER_TRANSFORM_STEP_H_ #include <dmlc/common.h> #include <dmlc/json.h> #include <tvm/node/node.h> #include <tvm/te/schedule.h> #include <vector> namespace tvm { namespace auto_scheduler { typedef Map<tvm::te::Stage, Array<tir::IterVar>, ObjectHash, ObjectEqual> StageToAxesMap; /*! * \brief Update the current stage IterVar information to StageToAxesMap. * \param stage The stage to be updated. * \param stage_to_axes The map to be updated. */ void UpdateStageToAxesMap(const te::Stage& stage, StageToAxesMap* stage_to_axes); /*! \brief The type of an iterator. */ enum class IteratorKind : int { /*! \brief Spatial iterator. */ kSpatial = 0, /*! \brief Reduction iterator. */ kReduction = 1, /*! \brief Fused spatial and reduction iterator. */ kMixed = 2, /*! \brief Special iterator. (e.g. virtual root iterator) */ kSpecial = 3 }; /*! \brief The type of an iterator's annotation. */ enum class IteratorAnnotation : int { /*! \brief This iterator has no annotation. */ kNone = 0, /*! \brief This iterator has been unrolled. */ kUnroll = 1, /*! \brief This iterator has been vectorized. */ kVectorize = 2, /*! \brief This iterator has been paralleld. */ kParallel = 3, /*! \brief This iterator has been bind to vthread. */ kVThread = 4, /*! \brief This iterator has been bind to blockIdx.x. */ kBlockX = 5, /*! \brief This iterator has been bind to threadIdx.x. */ kThreadX = 6, /*! \brief This iterator has been bind to blockIdx.y. */ kBlockY = 7, /*! \brief This iterator has been bind to threadIdx.y. */ kThreadY = 8, /*! \brief This iterator has been bind to blockIdx.y. */ kBlockZ = 9, /*! \brief This iterator has been bind to threadIdx.y. */ kThreadZ = 10, /*! \brief This iterator has been mapped with a tensorize intrinsic. */ kTensorize = 11 }; extern const char* IteratorAnnotationString[]; // forward declaration class Iterator; /*! * \brief An iterator of a for-loop * Similar to tvm::IterVar in `include/tvm/tir/expr.h` */ class IteratorNode : public Object { public: /*! \brief The name of this iterator. */ String name; /*! \brief The range of this iterator. */ Range range; /*! \brief The iterator type of this iterator. */ IteratorKind iter_kind; /*! \brief The annotation type of this iterator. */ IteratorAnnotation annotation; /*! The original iterators before fusion. */ std::vector<Iterator> orig_iters; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name", &name); v->Visit("range", &range); v->Visit("iter_kind", &iter_kind); v->Visit("annotation", &annotation); } static constexpr const char* _type_key = "auto_scheduler.Iterator"; TVM_DECLARE_FINAL_OBJECT_INFO(IteratorNode, Object); }; /*! * \brief Managed reference to IteratorNode. * \sa IteratorNode */ class Iterator : public ObjectRef { public: /*! * \brief The constructor. * \param name The name of this iterator. * \param range The range of this iterator. * \param iter_kind The iterator type of this iterator. * \param annotation The annotation type of this iterator. * \param orig_iters The original iterators before fusion */ Iterator(String name, Range range, IteratorKind iter_kind, IteratorAnnotation annotation, const std::vector<Iterator>* orig_iters = nullptr); TVM_DEFINE_OBJECT_REF_METHODS(Iterator, ObjectRef, IteratorNode); }; /*! * \brief The base class of transformation steps. Each step has its corresponding tvm.te * schedule primitives. */ class StepNode : public Object { public: /*! \brief The index of the stage. */ int stage_id; /*! * \brief Serialize the current step record to JSONWriter. * \param writer The output JSONWriter. */ virtual void WriteToRecord(dmlc::JSONWriter* writer) const = 0; static constexpr const char* _type_key = "auto_scheduler.Step"; TVM_DECLARE_BASE_OBJECT_INFO(StepNode, Object); }; /*! * \brief Managed reference to StepNode. * \sa StepNode */ class Step : public ObjectRef { public: /*! * \brief CopyOnWrite function for Step. * This works almost the same as a normal ObjectRef.CopyOnWrite(), but can dispatch to different * steps. * \return A base StepNode pointer, need to cast to its real StepNode type before doing any * modifications. * \code * * SplitStep ref; * StepNode* mutable_ref = ref.CopyOnWrite(); * dynamic_cast<SplitStepNode*>(mutable_ref)->... = ...; * * \endcode */ StepNode* CopyOnWrite(); TVM_DEFINE_OBJECT_REF_METHODS(Step, ObjectRef, StepNode); }; // Forward declaration class State; class ComputeDAG; /*! * \brief Read a step record from JSONReader and create the corresponding step. * \param reader The input JSONReader. */ Step StepReadFromRecord(dmlc::JSONReader* reader); /*! * \brief Apply a general step to a State with runtime dynamic dispatching. * \param step The step to be applied to State. * \param state A mutable pointer to state, which will be updated. * \param dag The original ComputeDAG of this state. */ void StepApplyToState(const Step& step, State* state, const ComputeDAG& dag); /*! * \brief Apply a general step to tvm.schedule with runtime dynamic dispatching. * \param step The step to be applied to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable point to the current schedule * \param transform_steps An array of all history transform steps. */ void StepApplyToSchedule(const Step& step, Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule, const Array<Step>& transform_steps); /*! * \brief Print a general step as equivalent python schedule API with runtime dynamic dispatching. * \param step The step to be printed as python API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable point to the current schedule * \param transform_steps An array of all history transform steps. * \return Python schedule code. */ String StepPrintAsPythonAPI(const Step& step, Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule, const Array<Step>& transform_steps); /********** Steps working on single stage **********/ /*! * \brief Annotation step that corresponds to vectorize, parallel, unroll and thread binding. * (i.e. te::Stage::vectorize, te::Stage::parallel, te::Stage::vectorize, te::Stage::bind) */ class AnnotationStepNode : public StepNode { public: /*! \brief The index of the iterator to add annotation. */ int iter_id; /*! \brief The annotation type of this step. */ IteratorAnnotation annotation; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \return The iterator result after annotate. */ Iterator ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "AN"; static constexpr const char* _type_key = "auto_scheduler.AnnotationStep"; TVM_DECLARE_FINAL_OBJECT_INFO(AnnotationStepNode, StepNode); }; /*! * \brief Managed reference to AnnotationStepNode. * \sa AnnotationStepNode */ class AnnotationStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to add annotation. * \param iter_id The index of the iterator to add annotation. * \param ann The annotation type of this step. */ AnnotationStep(int stage_id, int iter_id, IteratorAnnotation ann); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit AnnotationStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(AnnotationStep, Step, AnnotationStepNode); }; /*! \brief Fuse step that corresponds to te::Stage::fuse */ class FuseStepNode : public StepNode { public: /*! \brief The ids of iterators to fuse. */ Array<Integer> fused_ids; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \return The iterator result after fuse. * \note If the iterators to be fused have stages attached at them(by compute_at), the fused * result will become the new attach point. */ Iterator ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return The iterator result after fuse. */ tir::IterVar ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "FU"; static constexpr const char* _type_key = "auto_scheduler.FuseStep"; TVM_DECLARE_FINAL_OBJECT_INFO(FuseStepNode, StepNode); }; /*! * \brief Managed reference to FuseStepNode. * \sa FuseStepNode */ class FuseStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be fused. * \param fused_ids The index of the iterators to be fused. */ FuseStep(int stage_id, const Array<Integer>& fused_ids); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit FuseStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(FuseStep, Step, FuseStepNode); }; /*! \brief Pragma step that corresponds to te::Stage::pragma */ class PragmaStepNode : public StepNode { public: /*! \brief The index of the iterator to add pragma. */ int iter_id; /*! \brief The pragma string. */ String pragma_type; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "PR"; static constexpr const char* _type_key = "auto_scheduler.PragmaStep"; TVM_DECLARE_FINAL_OBJECT_INFO(PragmaStepNode, StepNode); }; /*! * \brief Managed reference to PragmaStepNode. * \sa PragmaStepNode */ class PragmaStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be fused. * \param iter_id The index of the iterator to add pragma. * \param pragma_type The pragma string. */ PragmaStep(int stage_id, int iter_id, String pragma_type); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit PragmaStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(PragmaStep, Step, PragmaStepNode); }; /*! \brief Reorder step that corresponds to te::Stage::reorder */ class ReorderStepNode : public StepNode { public: /*! * \brief The iterator ids after reorder. * This array should specify the order of all iterators. */ Array<Integer> after_ids; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "RE"; static constexpr const char* _type_key = "auto_scheduler.ReorderStep"; TVM_DECLARE_FINAL_OBJECT_INFO(ReorderStepNode, StepNode); }; /*! * \brief Managed reference to ReorderStepNode. * \sa ReorderStepNode */ class ReorderStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be reordered. * \param after_ids The expected indexes of the iterators after reorder. */ ReorderStep(int stage_id, const Array<Integer>& after_ids); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit ReorderStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(ReorderStep, Step, ReorderStepNode); }; /*! * \brief Split step that corresponds to te::Stage::split with additional * support of multiple-level of factors */ class SplitStepNode : public StepNode { public: /*! \brief The id of the iter to split. */ int iter_id; /*! \brief The extent length of the axis to split. */ Optional<PrimExpr> extent; /*! \brief The split factors. */ Array<Optional<Integer>> lengths; /*! * \brief If true, the `lengths` denote the lengths of iterators * from inner level to outer level */ bool inner_to_outer; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \return The iterator results after split. * \note If we do split on an iterator which has stages attached at it(by compute_at), the inner * most iterator of split results will become the new attach point. */ Array<Iterator> ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return The iterator results after split. */ Array<tir::IterVar> ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "SP"; static constexpr const char* _type_key = "auto_scheduler.SplitStep"; TVM_DECLARE_FINAL_OBJECT_INFO(SplitStepNode, StepNode); }; /*! * \brief Managed reference to SplitStepNode. * \sa SplitStepNode */ class SplitStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be split. * \param iter_id The index of the iterator to be split. * \param extent The extent length of the axis to split. * \param lengths The multiple split factors. Can be None to be filled by search policy. * \param inner_to_outer The split direction. */ SplitStep(int stage_id, int iter_id, Optional<PrimExpr> extent, const Array<Optional<Integer>>& lengths, bool inner_to_outer); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit SplitStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(SplitStep, Step, SplitStepNode); }; /*! \brief Similar to SplitStepNode, but uses split factors from another step * (i.e. Follow another split step) */ class FollowSplitStepNode : public StepNode { public: /*! \brief The id of the iter to be split. */ int iter_id; /*! \brief The index of the split step to be followed in the history. */ int src_step_id; /*! \brief The number of split level. */ int n_split; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Extract split lengths. * \param transform_steps An array of history transform steps. * \return The multiple split factors. */ Array<Optional<Integer>> ExtractSplitLengths(const Array<Step>& transform_steps) const; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \return The iterator results after split. */ Array<Iterator> ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param transform_steps An array of history transform steps. * \return The iterator results after split. */ Array<tir::IterVar> ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, const Array<Step>& transform_steps) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param transform_steps An array of history transform steps. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, const Array<Step>& transform_steps) const; static constexpr const char* record_prefix_str = "FSP"; static constexpr const char* _type_key = "auto_scheduler.FollowSplitStep"; TVM_DECLARE_FINAL_OBJECT_INFO(FollowSplitStepNode, StepNode); }; /*! * \brief Managed reference to FollowSplitStepNode. * \sa FollowSplitStepNode */ class FollowSplitStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be split. * \param iter_id The index of the iterator to be split. * \param src_step_id The index of the split step to be followed in the history. * \param n_split The number of split level. */ FollowSplitStep(int stage_id, int iter_id, int src_step_id, int n_split); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit FollowSplitStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(FollowSplitStep, Step, FollowSplitStepNode); }; /*! \brief Similar to FollowSplitStep, but uses split factors from multiple steps. * \note This can be used for the split in cooperative fetching. */ class FollowFusedSplitStepNode : public StepNode { public: /*! \brief The id of the iter to split. */ int iter_id; /*! \brief The indices of the split steps to be followed in the history. */ Array<Integer> src_step_ids; /*! \brief Use the length in this split level. */ int level; /*! \brief If this is true, use factor. Otherwise, use nparts. */ bool factor_or_nparts; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Extract split length. * \param transform_steps An array of history transform steps. * \return Split factor. */ Optional<Integer> ExtractSplitLength(const Array<Step>& transform_steps) const; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \return The iterator results after split. */ Array<Iterator> ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param transform_steps An array of history transform steps. * \return The iterator results after split. */ Array<tir::IterVar> ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, const Array<Step>& transform_steps) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param transform_steps An array of history transform steps. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, const Array<Step>& transform_steps) const; static constexpr const char* record_prefix_str = "FFSP"; static constexpr const char* _type_key = "auto_scheduler.FollowFusedSplitStep"; TVM_DECLARE_FINAL_OBJECT_INFO(FollowFusedSplitStepNode, StepNode); }; /*! * \brief Managed reference to FollowFusedSplitStepNode. * \sa FollowFusedSplitStepNode */ class FollowFusedSplitStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be split. * \param iter_id The index of the iterator to be split. * \param src_step_ids An array of index for split step to be followed in the history. * \param level Use the length in this split level. * \param factor_or_nparts If this is true, use factor. Otherwise, use nparts. */ FollowFusedSplitStep(int stage_id, int iter_id, const Array<Integer>& src_step_ids, int level, bool factor_or_nparts); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit FollowFusedSplitStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(FollowFusedSplitStep, Step, FollowFusedSplitStepNode); }; /*! \brief Storage align step that corresponds to te::Stage::storage_align */ class StorageAlignStepNode : public StepNode { public: /*! \brief The iterator to be aligned. */ int iter_id; /*! \brief The factor in alignment specification. */ int factor; /*! \brief The offset in the alignment specification. */ int offset; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to State, which will be updated. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "SA"; static constexpr const char* _type_key = "auto_scheduler.StorageAlignStep"; TVM_DECLARE_FINAL_OBJECT_INFO(StorageAlignStepNode, StepNode); }; /*! * \brief Managed reference to StorageAlignStepNode. * \sa StorageAlignStepNode */ class StorageAlignStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be aligned. * \param iter_id The index of the iterator to be aligned. * \param factor The factor in alignment specification. * \param offset The offset in the alignment specification. */ StorageAlignStep(int stage_id, int iter_id, int factor, int offset); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit StorageAlignStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(StorageAlignStep, Step, StorageAlignStepNode); }; /********** Steps working on multiple stages **********/ /*! \brief Compute at step that corresponds to te::Stage::compute_at */ class ComputeAtStepNode : public StepNode { public: /*! \brief The index of stage that this step will compute at to. */ int target_stage_id; /*! \brief The index of iterator in target stage that this step will compute at to. */ int target_iter_id; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \note After compute_at, we need careful dependency analysis to compute the accurate bound * information. However, it is relatively expensive and complicated, so we just fill "None" as * bound for the newly created iterators. * Call ComputeDAG::InferBound on the updated state if you need the complete bound information. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "CA"; static constexpr const char* _type_key = "auto_scheduler.ComputeAtStep"; TVM_DECLARE_FINAL_OBJECT_INFO(ComputeAtStepNode, StepNode); }; /*! * \brief Managed reference to ComputeAtStepNode. * \sa ComputeAtStepNode */ class ComputeAtStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the source stage. * \param target_stage_id The index of stage that this step will compute at to. * \param target_iter_id The index of iterator in target stage that this step will compute at to. */ ComputeAtStep(int stage_id, int target_stage_id, int target_iter_id); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit ComputeAtStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(ComputeAtStep, Step, ComputeAtStepNode); }; /*! \brief Compute inline step that corresponds to te::Stage::compute_inline */ class ComputeInlineStepNode : public StepNode { public: void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return The iterator result after fuse. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "CI"; static constexpr const char* _type_key = "auto_scheduler.ComputeInlineStep"; TVM_DECLARE_FINAL_OBJECT_INFO(ComputeInlineStepNode, StepNode); }; /*! * \brief Managed reference to ComputeInlineStepNode. * \sa ComputeInlineStepNode */ class ComputeInlineStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be marked compute inlined. */ explicit ComputeInlineStep(int stage_id); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit ComputeInlineStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(ComputeInlineStep, Step, ComputeInlineStepNode); }; /*! \brief Compute root step that corresponds to te::Stage::compute_root */ class ComputeRootStepNode : public StepNode { public: void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \note After compute_root, we need careful dependency analysis to compute the accurate bound * information. However, it is relatively expensive and complicated, so we just fill "None" as * bound for the newly created iterators. * Call ComputeDAG::InferBound on the updated state if you need the complete bound information. */ void ApplyToState(State* state) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return The iterator result after fuse. */ void ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes) const; static constexpr const char* record_prefix_str = "CR"; static constexpr const char* _type_key = "auto_scheduler.ComputeRootStep"; TVM_DECLARE_FINAL_OBJECT_INFO(ComputeRootStepNode, StepNode); }; /*! * \brief Managed reference to ComputeRootStepNode. * \sa ComputeRootStepNode */ class ComputeRootStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be marked compute at root. */ explicit ComputeRootStep(int stage_id); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit ComputeRootStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(ComputeRootStep, Step, ComputeRootStepNode); }; /********** Steps adding new stages **********/ /*! * \brief Cache read step that corresponds to te::Schedule::cache_read. * \note Cache read step adds an extra stage to the original ComputeDAG, * an up-to-date ComputeDAG will be stored in State's `current_compute_dag`. */ class CacheReadStepNode : public StepNode { public: /*! \brief The scope name of the newly added read stage. (e.g., local, shared, global) */ String scope_name; /*! \brief The indices of read stages. */ Array<Integer> reader_stage_ids; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \param dag The original ComputeDAG of this state. * \return The index of the new added stage. */ int ApplyToState(State* state, const ComputeDAG& dag) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return The output Tensor of the new added stage. */ te::Tensor ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; static constexpr const char* record_prefix_str = "CHR"; static constexpr const char* _type_key = "auto_scheduler.CacheReadStep"; TVM_DECLARE_FINAL_OBJECT_INFO(CacheReadStepNode, StepNode); }; /*! * \brief Managed reference to CacheReadStepNode. * \sa CacheReadStepNode */ class CacheReadStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be cache_read. * \param scope_name The scope name of the newly added stage. * \param reader_stage_ids The indices of reader stages. */ CacheReadStep(int stage_id, String scope_name, const Array<Integer>& reader_stage_ids); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit CacheReadStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(CacheReadStep, Step, CacheReadStepNode); }; /*! * \brief Cache write step that corresponds to te::Schedule::cache_write. * \note Cache write step will add an extra stage to the original ComputeDAG, a up-to-date * ComputeDAG is stored in State's `current_compute_dag`. * This step will cache write all output tensors of the target stage. */ class CacheWriteStepNode : public StepNode { public: /*! \brief The scope name of the newly added compute stage. (e.g. local, shared, global) */ String scope_name; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to state, which will be updated. * \param dag The original ComputeDAG of this state. * \return The index of the new added stage. */ int ApplyToState(State* state, const ComputeDAG& dag) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return The output Tensors of the new added stage. */ Array<te::Tensor> ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; static constexpr const char* record_prefix_str = "CHW"; static constexpr const char* _type_key = "auto_scheduler.CacheWriteStep"; TVM_DECLARE_FINAL_OBJECT_INFO(CacheWriteStepNode, StepNode); }; /*! * \brief Managed reference to CacheWriteStepNode. * \sa CacheWriteStepNode */ class CacheWriteStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be cache_write. * \param scope_name The scope name of the newly added stage. */ CacheWriteStep(int stage_id, String scope_name); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit CacheWriteStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(CacheWriteStep, Step, CacheWriteStepNode); }; /*! \brief Reduction factor step that corresponds to te::Schedule::rfactor */ class RfactorStepNode : public StepNode { public: /*! \brief The index of the iterator to be factored. */ int iter_id; /*! \brief The position where the new iterator is placed. */ int factor_iter_id; void WriteToRecord(dmlc::JSONWriter* writer) const final; /*! * \brief Apply the current step to State. * \param state A mutable pointer to State, which will be updated. * \param dag The original ComputeDAG of this state. * \return The index of the new added stage. */ int ApplyToState(State* state, const ComputeDAG& dag) const; /*! * \brief Apply the current step to tvm.schedule. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return The output Tensors of the new added stage. */ Array<te::Tensor> ApplyToSchedule(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; /*! * \brief Print the current step as equivalent python schedule API. * \param stages The list of current stages * \param stage_to_axes A map that maps stage ot all its iterators. * \param schedule A mutable pointer to a te::Schedule. * \return Python schedule code. */ String PrintAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_axes, te::Schedule* schedule) const; static constexpr const char* record_prefix_str = "RF"; static constexpr const char* _type_key = "auto_scheduler.RfactorStep"; TVM_DECLARE_FINAL_OBJECT_INFO(RfactorStepNode, StepNode); }; /*! * \brief Managed reference to RfactorStepNode. * \sa RfactorStepNode */ class RfactorStep : public Step { public: /*! * \brief The constructor. * \param stage_id The index of the stage to be factored. * \param iter_id The index of the iterator to be factored. * \param factor_iter_id The position where the new iterator is placed. */ RfactorStep(int stage_id, int iter_id, int factor_iter_id); /*! * \brief The constructor used to read a step record from JSONReader and create the * corresponding step. * \param reader The input JSONReader. */ explicit RfactorStep(dmlc::JSONReader* reader); TVM_DEFINE_OBJECT_REF_METHODS(RfactorStep, Step, RfactorStepNode); }; } // namespace auto_scheduler } // namespace tvm #endif // TVM_AUTO_SCHEDULER_TRANSFORM_STEP_H_
https://github.com/zk-ml/tachikoma
include/tvm/driver/driver_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/driver/driver_api.h * \brief Compiler driver APIs to drive the compilation. * * This module provides end-to-end utils to drive the compilation process. * We adopt the term "compiler driver" in common compiler infrastructures. * Note that a compiler driver is different from "runtime drivers". * Most of runtime related code are defined in the runtime folder instead. */ #ifndef TVM_DRIVER_DRIVER_API_H_ #define TVM_DRIVER_DRIVER_API_H_ #include <tvm/ir/global_var_supply.h> #include <tvm/ir/module.h> #include <tvm/ir/transform.h> #include <tvm/runtime/packed_func.h> #include <tvm/support/with.h> #include <tvm/target/target.h> #include <tvm/te/schedule_pass.h> #include <tvm/tir/function.h> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace tvm { using tvm::transform::Pass; /*! * \brief Configures and returns the composite Pass for the fused module (pre split) that contains * device and host code. * \param mixed_mod The original mixed module. * \param target The device Target. * \return The composite Pass for the fused module. // */ TVM_DLL transform::Sequential MixedModulePassManager(IRModule mixed_mod, Target target); /*! * \brief Configures and returns the composite Pass for the device Target after device/host from * mixed module. * \param mixed_mod The optimized mixed module. * \param target The device Target. * \return The composite Pass for the device module. */ TVM_DLL transform::Sequential DeviceModulePassManager(IRModule mixed_mod, Target target); /*! * \brief Configures and returns the composite Pass for the host Target after device/host from mixed * module. * \param mixed_mod The optimized mixed module. * \param target_host The host Target. * \return The composite Pass for the host module. */ TVM_DLL transform::Sequential HostModulePassManager(IRModule mixed_mod, Target target_host); /*! * \brief Lower an IRModule (optimize with it with the pass list defined in CreatePassList) * \param mod The IRmodule to lower * \param simple_mode Disables the loop partition pass. Defaults to false. * \return The result module. */ TVM_DLL IRModule LowerModule(IRModule mod, bool simple_mode = false); /*! * \brief Lower a primfunc and name (convert to IRModule, and optimize it with the pass list * defined in CreatePassList) * \param func The PrimFunc to lower * \param name The name of the lowered function. * \param simple_mode Disables the loop partition pass. Defaults to false. * \return The result module. */ TVM_DLL IRModule LowerPrimFunc(tvm::tir::PrimFunc func, const std::string& name, bool simple_mode = false); /*! * \brief Build an IRModule given a TE schedule, args and binds. This function also applies * the lowering passes defined in CreatePassList. * \param sch The TE schedule to lower. * \param args The arguments to the function. * \param name The name of the lowered function. * \param binds Buffer assignments. * \param global_var_supply The GlobalVarSupply to be used in the module. * \param simple_mode Disables the loop partition pass. Defaults to false. * \return The result module. */ TVM_DLL IRModule LowerSchedule(te::Schedule sch, const Array<te::Tensor>& args, const std::string& name, const std::unordered_map<te::Tensor, tir::Buffer>& binds, GlobalVarSupply global_var_supply, bool simple_mode = false); /*! * \brief Build an IRModule given a TE schedule, args and binds. This function also applies * the lowering passes defined in CreatePassList. * \param sch The TE schedule to lower. * \param args The arguments to the function (Array of Tensor, Buffer and Vars) * \param name The name of the lowered function. * \param binds Buffer assignments. * \param global_var_supply The GlobalVarSupply to be used in the module. * \param simple_mode Disables the loop partition pass. Defaults to false. * \return The result module. */ TVM_DLL IRModule LowerSchedule(te::Schedule sch, const Array<ObjectRef>& args, const std::string& name, const std::unordered_map<te::Tensor, tir::Buffer>& binds, GlobalVarSupply global_var_supply, bool simple_mode = false); /*! * \brief Create an IRModule out of a TE Schedule. It does not apply lowering passes. If you want * to apply lowering passes as well, use LowerSchedule. * \param sch The schedule * \param args The arguments to the function. * \param name The name of the lowered function. * \param binds Buffer assignments. * \param global_var_supply The GlobalVarSupply to be used in the module and when creating * GlobalVars. * \return The result module. */ IRModule ScheduleToModule(te::Schedule sch, const Array<ObjectRef>& args, const std::string& name, const std::unordered_map<te::Tensor, tir::Buffer>& binds, GlobalVarSupply global_var_supply); /*! * \brief Build a device and host module for a specific target from an IRModule. * \param funcs The functions to be built. * \param target The target device to build for. * \param target_host The target for building host code. To use the default, pass Target() * \return The built module. */ TVM_DLL runtime::Module build(const IRModule& funcs, const Target& target, const Target& target_host); /*! * \brief Build a device and host module for a specific target from a map * contains target to IRModule. This function is used * for heterogeneous build. * \param input The map contains target to an IRModule. * \param target_host The target for building host code. To use the default, * pass Target(). * \return The built module that contains code for different processors. */ TVM_DLL runtime::Module build(const Map<Target, IRModule>& input, const Target& target_host); /*! * \brief Build a device and host module for a specific target from a map * contains target to IRModule. This function is used * for heterogeneous build. * \param input The map contains target string to an IRModule. * \param target_host The target for building host code. To use the default, * pass Target(). * \return The built module that contains code for different processors. */ TVM_DLL runtime::Module build(const Map<String, IRModule>& input, const Target& target_host); } // namespace tvm #endif // TVM_DRIVER_DRIVER_API_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/adt.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/adt.h * \brief Algebraic data type definitions. * * We adopt relay's ADT definition as a unified class * for decripting structured data. */ #ifndef TVM_IR_ADT_H_ #define TVM_IR_ADT_H_ #include <tvm/ir/expr.h> #include <tvm/ir/type.h> #include <tvm/node/node.h> #include <tvm/runtime/container/adt.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <string> namespace tvm { /*! * \brief ADT constructor. * Constructors compare by pointer equality. * \sa Constructor */ class ConstructorNode : public RelayExprNode { public: /*! \brief The name (only a hint) */ String name_hint; /*! \brief Input to the constructor. */ Array<Type> inputs; /*! \brief The datatype the constructor will construct. */ GlobalTypeVar belong_to; /*! \brief Index in the table of constructors (set when the type is registered). */ mutable int32_t tag = -1; ConstructorNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("inputs", &inputs); v->Visit("belong_to", &belong_to); v->Visit("tag", &tag); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const ConstructorNode* other, SEqualReducer equal) const { // Use namehint for now to be consistent with the legacy relay impl // TODO(tvm-team) revisit, need to check the type var. return equal(name_hint, other->name_hint) && equal(inputs, other->inputs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name_hint); hash_reduce(inputs); } static constexpr const char* _type_key = "relay.Constructor"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstructorNode, RelayExprNode); }; /*! * \brief Managed reference to ConstructorNode * \sa ConstructorNode */ class Constructor : public RelayExpr { public: /*! * \brief Constructor * \param name_hint the name of the constructor. * \param inputs The input types. * \param belong_to The data type var the constructor will construct. */ TVM_DLL Constructor(String name_hint, Array<Type> inputs, GlobalTypeVar belong_to); TVM_DEFINE_OBJECT_REF_METHODS(Constructor, RelayExpr, ConstructorNode); }; /*! \brief TypeData container node */ class TypeDataNode : public TypeNode { public: /*! * \brief The header is simply the name of the ADT. * We adopt nominal typing for ADT definitions; * that is, differently-named ADT definitions with same constructors * have different types. */ GlobalTypeVar header; /*! \brief The type variables (to allow for polymorphism). */ Array<TypeVar> type_vars; /*! \brief The constructors. */ Array<Constructor> constructors; void VisitAttrs(AttrVisitor* v) { v->Visit("header", &header); v->Visit("type_vars", &type_vars); v->Visit("constructors", &constructors); v->Visit("span", &span); } bool SEqualReduce(const TypeDataNode* other, SEqualReducer equal) const { return equal.DefEqual(header, other->header) && equal.DefEqual(type_vars, other->type_vars) && equal(constructors, other->constructors); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(header); hash_reduce.DefHash(type_vars); hash_reduce(constructors); } static constexpr const char* _type_key = "relay.TypeData"; TVM_DECLARE_FINAL_OBJECT_INFO(TypeDataNode, TypeNode); }; /*! * \brief Stores all data for an Algebraic Data Type (ADT). * * In particular, it stores the handle (global type var) for an ADT * and the constructors used to build it and is kept in the module. Note * that type parameters are also indicated in the type data: this means that * for any instance of an ADT, the type parameters must be indicated. That is, * an ADT definition is treated as a type-level function, so an ADT handle * must be wrapped in a TypeCall node that instantiates the type-level arguments. * The kind checker enforces this. */ class TypeData : public Type { public: /*! * \brief Constructor * \param header the name of ADT. * \param type_vars type variables. * \param constructors constructors field. */ TVM_DLL TypeData(GlobalTypeVar header, Array<TypeVar> type_vars, Array<Constructor> constructors); TVM_DEFINE_OBJECT_REF_METHODS(TypeData, Type, TypeDataNode); }; } // namespace tvm #endif // TVM_IR_ADT_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/affine_type.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/affine_type.h * \brief Quantized Tensor Types. */ #ifndef TVM_IR_AFFINE_TYPE_H_ #define TVM_IR_AFFINE_TYPE_H_ #include <tvm/ir/expr.h> #include <tvm/ir/type.h> namespace tvm { /*! * \brief AffineType representation * \sa AffineType */ class AffineTypeNode : public Object { public: /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; static constexpr const char* _type_key = "AffineType"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_BASE_OBJECT_INFO(AffineTypeNode, Object); }; /*! * \brief Managed reference to AffineTypeNode. * \sa AffineTypeNode */ class AffineType : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(AffineType, ObjectRef, AffineTypeNode); }; /*! * \brief TensorAffineType representation * \sa TensorAffineType * * This Type represents a quantized integer tensor that can be converted * back to real space via the x_real = scale * (x_quant - zero_point) */ class TensorAffineTypeNode : public AffineTypeNode { public: /*! \brief The scale of this type */ RelayExpr scale; /*! \brief The zero point of this type */ RelayExpr zero_point; /*! \brief The data type of this type */ DataType dtype; /*! \brief The axis for per-channel quantization */ int axis; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("scale", &scale); v->Visit("zero_point", &zero_point); v->Visit("dtype", &dtype); v->Visit("axis", &axis); } bool SEqualReduce(const TensorAffineTypeNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(scale, other->scale) && equal(zero_point, other->zero_point) && equal(dtype, other->dtype) && equal(axis, other->axis); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(scale); hash_reduce(zero_point); hash_reduce(dtype); hash_reduce(axis); } static constexpr const char* _type_key = "TensorAffineType"; TVM_DECLARE_BASE_OBJECT_INFO(TensorAffineTypeNode, AffineTypeNode); }; /*! * \brief Managed reference to AffineTypes. * \sa AffineTypeNode */ class TensorAffineType : public AffineType { public: TVM_DLL TensorAffineType(RelayExpr scale, RelayExpr zero_point, DataType dtype, int axis); TVM_DEFINE_OBJECT_REF_METHODS(TensorAffineType, AffineType, TensorAffineTypeNode); }; /*! * \brief TupleAffineType representation * \sa TupleAffineType */ class TupleAffineTypeNode : public AffineTypeNode { public: /*! \brief The types of this tuple*/ Array<TensorAffineType> types; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("types", &types); } bool SEqualReduce(const TupleAffineTypeNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(types, other->types); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(types); } static constexpr const char* _type_key = "TupleAffineType"; TVM_DECLARE_BASE_OBJECT_INFO(TupleAffineTypeNode, AffineTypeNode); }; /*! * \brief Managed reference to TupleAffineTypes. * \sa TupleAffineType */ class TupleAffineType : public AffineType { public: TVM_DLL TupleAffineType(Array<TensorAffineType> types); TVM_DEFINE_OBJECT_REF_METHODS(TupleAffineType, AffineType, TupleAffineTypeNode); }; } // namespace tvm #endif // TVM_IR_AFFINE_TYPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/attrs.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/attrs.h * \brief Helpers for attribute objects. * * This module enables declaration of named attributes * which support default value setup and bound checking. * * \code * struct MyAttrs : public tvm::AttrsNode<MyAttrs> { * float learning_rate; * int num_hidden; * String name; * // declare attribute fields in header file * TVM_DECLARE_ATTRS(MyAttrs, "attrs.MyAttrs") { * TVM_ATTR_FIELD(num_hidden).set_lower_bound(1); * TVM_ATTR_FIELD(learning_rate).set_default(0.01f); * TVM_ATTR_FIELD(name).set_default("hello"); * } * }; * // register it in cc file * TVM_REGISTER_NODE_TYPE(MyAttrs); * \endcode * * \sa AttrsNode, TVM_DECLARE_ATTRS, TVM_ATTR_FIELD */ #ifndef TVM_IR_ATTRS_H_ #define TVM_IR_ATTRS_H_ #include <dmlc/common.h> #include <tvm/ir/expr.h> #include <tvm/node/structural_equal.h> #include <tvm/node/structural_hash.h> #include <tvm/runtime/packed_func.h> #include <functional> #include <string> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> namespace tvm { /*! * \brief Declare an attribute function. * \param ClassName The name of the class. * \param TypeKey The type key to be used by the TVM node system. */ #define TVM_DECLARE_ATTRS(ClassName, TypeKey) \ static constexpr const char* _type_key = TypeKey; \ TVM_DECLARE_FINAL_OBJECT_INFO(ClassName, ::tvm::BaseAttrsNode) \ template <typename FVisit> \ void _tvm_VisitAttrs(FVisit& _tvm_fvisit) // NOLINT(*) /*! * \brief Declare an attribute field. * \param FieldName The field name. */ #define TVM_ATTR_FIELD(FieldName) _tvm_fvisit(#FieldName, &FieldName) /*! * \brief Create a NodeRef type that represents null. * \tparam TNodeRef the type to be created. * \return A instance that will represent None. */ template <typename TObjectRef> inline TObjectRef NullValue() { static_assert(TObjectRef::_type_is_nullable, "Can only get NullValue for nullable types"); return TObjectRef(ObjectPtr<Object>(nullptr)); } template <> inline DataType NullValue<DataType>() { return DataType(DataType::kHandle, 0, 0); } /*! \brief Error thrown during attribute checking. */ struct AttrError : public Error { /*! * \brief constructor * \param msg error message */ explicit AttrError(std::string msg) : Error("AttributeError:" + msg) {} }; /*! * \brief Information about attribute fields in string representations. */ class AttrFieldInfoNode : public Object { public: /*! \brief name of the field */ String name; /*! \brief type docstring information in str. */ String type_info; /*! \brief detailed description of the type */ String description; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("type_info", &type_info); v->Visit("description", &description); } static constexpr const char* _type_key = "AttrFieldInfo"; static constexpr bool _type_has_method_sequal_reduce = false; static constexpr bool _type_has_method_shash_reduce = false; TVM_DECLARE_FINAL_OBJECT_INFO(AttrFieldInfoNode, Object); }; /*! \brief AttrFieldInfo */ class AttrFieldInfo : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(AttrFieldInfo, ObjectRef, AttrFieldInfoNode); }; /*! * \brief Base class of all attribute class * \note Do not subclass AttrBaseNode directly, * subclass AttrsNode instead. * \sa AttrsNode */ class BaseAttrsNode : public Object { public: using TVMArgs = runtime::TVMArgs; using TVMRetValue = runtime::TVMRetValue; /*! \brief virtual destructor */ virtual ~BaseAttrsNode() {} // visit function virtual void VisitAttrs(AttrVisitor* v) {} /*! * \brief Initialize the attributes by sequence of arguments * \param args The positional arguments in the form * [key0, value0, key1, value1, ..., key_n, value_n] */ template <typename... Args> inline void InitBySeq(Args&&... args); /*! * \brief Print readible docstring to ostream, add newline. * \param os the stream to print the docstring to. */ inline void PrintDocString(std::ostream& os) const; // NOLINT(*) /*! * \brief Visit attributes that do not equal the default value. * * \note This is useful to extract fields for concise printing. * \param v The visitor */ TVM_DLL virtual void VisitNonDefaultAttrs(AttrVisitor* v) = 0; /*! * \brief Get the field information * \return The fields in the Attrs. */ TVM_DLL virtual Array<AttrFieldInfo> ListFieldInfo() const = 0; /*! * \brief Initialize the attributes by arguments. * \param kwargs The key value pairs for initialization. * [key0, value0, key1, value1, ..., key_n, value_n] * \param allow_unknown Whether allow additional unknown fields. * \note This function throws when the required field is not present. */ TVM_DLL virtual void InitByPackedArgs(const TVMArgs& kwargs, bool allow_unknown = false) = 0; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; static constexpr const char* _type_key = "Attrs"; TVM_DECLARE_BASE_OBJECT_INFO(BaseAttrsNode, Object); }; /*! * \brief Managed reference to BaseAttrsNode. * \sa AttrsNode, BaseAttrsNode */ class Attrs : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(Attrs, ObjectRef, BaseAttrsNode); }; /*! * \brief Specialized attribute type that is backed by a map. * The DictAttrsNode implements the Attrs behavior, * its fields are directly accessible via object.field_name * like other normal nodes. */ class DictAttrsNode : public BaseAttrsNode { public: /*! \brief internal attrs map */ Map<String, ObjectRef> dict; bool SEqualReduce(const DictAttrsNode* other, SEqualReducer equal) const { return equal(dict, other->dict); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dict); } // implementations void VisitAttrs(AttrVisitor* v) final; void VisitNonDefaultAttrs(AttrVisitor* v) final; void InitByPackedArgs(const runtime::TVMArgs& args, bool allow_unknown) final; Array<AttrFieldInfo> ListFieldInfo() const final; // type info static constexpr const char* _type_key = "DictAttrs"; TVM_DECLARE_FINAL_OBJECT_INFO(DictAttrsNode, BaseAttrsNode); }; /*! * \brief Managed reference to DictAttrsNode * \sa DictAttrsNode. */ class DictAttrs : public Attrs { public: /*! * \brief Consruct a Attrs backed by DictAttrsNode. * \param dict The attributes. * \return The dict attributes. */ TVM_DLL explicit DictAttrs(Map<String, ObjectRef> dict); // Utils for accessing attributes // This needs to be on DictAttrs, not DictAttrsNode because we return the default // value if DictAttrsNode is not defined. /*! * \brief Get a function attribute. * * \param attr_key The attribute key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TOBjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetAttrExample(const BaseFunc& f) { * auto value = f->attrs.GetAttr<Integer>("AttrKey", 0); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { static_assert(std::is_base_of<ObjectRef, TObjectRef>::value, "Can only call GetAttr with ObjectRef types."); if (!defined()) return default_value; const DictAttrsNode* node = this->as<DictAttrsNode>(); auto it = node->dict.find(attr_key); if (it != node->dict.end()) { return Downcast<Optional<TObjectRef>>((*it).second); } else { return default_value; } } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } /*! * \brief Check whether the function has an non-zero integer attr. * * This function can be used to check whether an optional * attribute mark(e.g. inline) exists. * * \param attr_key The key to the attribute. * \return The check result. * * \code * * void HasNonzeroAttrExample(const BaseFunc& f) { * if (f->HasNonzeroAttr(attr::kInline)) { * // inline the function. * } * } * * \endcode */ bool HasNonzeroAttr(const std::string& attr_key) const { return GetAttr<Integer>(attr_key, 0).value_or(0).IntValue() != 0; } TVM_DEFINE_OBJECT_REF_METHODS(DictAttrs, Attrs, DictAttrsNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(DictAttrsNode); }; /*! * \brief Create an Attr object with all default values. * \tparam TAttrNode the type to be created. * \return A instance that will represent None. */ template <typename TAttrs> inline TAttrs AttrsWithDefaultValues() { static_assert(std::is_base_of<Attrs, TAttrs>::value, "Can only take attr nodes"); auto n = make_object<typename TAttrs::ContainerType>(); n->InitByPackedArgs(runtime::TVMArgs(nullptr, nullptr, 0), false); return TAttrs(n); } /*! * \brief Copy the function or module, but overrides * the attribute value key with the value. * * \param input The thing to annotate (BaseFunc or IRModule) * \param attr_key The attribute key. * \param attr_value The value attribute value. * * \tparam TFunc The corresponding function or module type. * * \returns The new function or module with updated attributes. * * \note This function performs copy on write optimization for func and module. * If we move a uniquely referenced func or module into WithAttr, * then no additional copy will be performed. * * This is also why we make it as a function instead of a member function * and why we pass by value in the first argument. * * \code * * // Recommended way to trigger copy on write * func = WithAttr(std::move(func), "key1", value1); * func = WithAttr(std::move(func), "key2", value2); * * \endcode */ template <typename TFunc> inline TFunc WithAttr(TFunc input, const std::string& attr_key, ObjectRef attr_value) { using TNode = typename TFunc::ContainerType; static_assert(TNode::_type_final, "Can only operate on the leaf nodes"); TNode* node = input.CopyOnWrite(); if (node->attrs.defined()) { node->attrs.CopyOnWrite()->dict.Set(attr_key, attr_value); } else { Map<String, ObjectRef> dict = {{attr_key, attr_value}}; node->attrs = DictAttrs(dict); } return input; } /*! * \brief Copy the function or module, but overrides the attributes with the entries from \p attrs. * * \param input The thing to annotate (BaseFunc or IRModule) * \param attrs Key/values attributes to add to \p input. * * \tparam TFunc The corresponding function or module type. * * \returns The new function or module with updated attributes. */ template <typename TFunc> inline TFunc WithAttrs(TFunc input, Map<String, ObjectRef> attrs) { using TNode = typename TFunc::ContainerType; static_assert(TNode::_type_final, "Can only operate on the leaf nodes"); TNode* node = input.CopyOnWrite(); if (node->attrs.defined()) { for (const auto& pair : attrs) { node->attrs.CopyOnWrite()->dict.Set(pair.first, pair.second); } } else { node->attrs = DictAttrs(std::move(attrs)); } return input; } /*! * \brief Copy the function or module, but removes the specified * attribute. * * \param input The thing to annotate (BaseFunc or IRModule) * \param attr_key The attribute key. * * \tparam TFunc The corresponding function or module type. * * \returns The new function or module with removed attribute. * * \note This function performs copy on write optimization for func and module. * If we move a uniquely referenced func or module into WithoutAttr, * then no additional copy will be performed. * * This is also why we make it as a function instead of a member function * and why we pass by value in the first argument. * * \code * * // Recommended way to trigger copy on write * func = WithoutAttr(std::move(func), "key1"); * func = WithoutAttr(std::move(func), "key2"); * * \endcode */ template <typename TFunc> inline TFunc WithoutAttr(TFunc input, const std::string& attr_key) { using TNode = typename TFunc::ContainerType; static_assert(TNode::_type_final, "Can only operate on the leaf nodes"); if (input->attrs.defined()) { TNode* node = input.CopyOnWrite(); node->attrs.CopyOnWrite()->dict.erase(attr_key); if (node->attrs->dict.size() == 0) { node->attrs = NullValue<DictAttrs>(); } } return input; } // Namespace containing detail implementations namespace detail { using runtime::TVMArgValue; // helper entry that does nothing in set_default/bound/describe calls. struct AttrNopEntry { using TSelf = AttrNopEntry; TSelf& describe(DMLC_ATTRIBUTE_UNUSED const char* str) { return *this; } template <typename T> TSelf& set_default(DMLC_ATTRIBUTE_UNUSED const T& value) { return *this; } template <typename T> TSelf& set_lower_bound(DMLC_ATTRIBUTE_UNUSED const T& begin) { return *this; } template <typename T> TSelf& set_upper_bound(DMLC_ATTRIBUTE_UNUSED const T& end) { return *this; } }; // Wrapper for normal visitor. class AttrNormalVisitor { public: explicit AttrNormalVisitor(AttrVisitor* visitor) : visitor_(visitor) {} template <typename T> AttrNopEntry operator()(const char* key, T* value) { visitor_->Visit(key, value); return AttrNopEntry(); } private: AttrVisitor* visitor_; }; class AttrsSEqualVisitor { public: bool result_{true}; // constructor AttrsSEqualVisitor(const Object* lhs, const Object* rhs, const SEqualReducer& equal) : lhs_(lhs), rhs_(rhs), equal_(equal) {} template <typename T> AttrNopEntry operator()(const char* key, T* lhs_value) { if (!result_) return AttrNopEntry(); const T* rhs_value = reinterpret_cast<const T*>( reinterpret_cast<const char*>(rhs_) + (reinterpret_cast<const char*>(lhs_value) - reinterpret_cast<const char*>(lhs_))); if (!equal_(*lhs_value, *rhs_value)) { result_ = false; } return AttrNopEntry(); } private: const Object* lhs_; const Object* rhs_; const SEqualReducer& equal_; }; class AttrsSHashVisitor { public: explicit AttrsSHashVisitor(const SHashReducer& hash_reducer) : hash_reducer_(hash_reducer) {} template <typename T> AttrNopEntry operator()(const char* key, T* value) { hash_reducer_(*value); return AttrNopEntry(); } private: const SHashReducer& hash_reducer_; }; // helper entry that does initialization, set default. template <typename T> struct AttrInitEntry { // The attributes using TSelf = AttrInitEntry<T>; // The type key const char* type_key_; // field name const char* key_; // internal value. T* value_; // whether the value is missing. // NOTE: initialize to false so that the destructor does not throw unless // AttrInitVisitor::operator() is committed to returning an instance of this class. // It is expected not to set this to true until that is true. bool value_missing_{false}; AttrInitEntry() = default; AttrInitEntry(AttrInitEntry&& other) { type_key_ = other.type_key_; key_ = other.key_; value_ = other.value_; value_missing_ = other.value_missing_; // avoid unexpected throw other.value_missing_ = false; } // If the value is still missing in destruction time throw an error. ~AttrInitEntry() DMLC_THROW_EXCEPTION { if (value_missing_) { std::ostringstream os; os << type_key_ << ": Cannot find required field \'" << key_ << "\' during initialization. " << "If the key is defined check that its type matches the declared type."; throw AttrError(os.str()); } } // override fields. // This function sets the lower bound of the attribute TSelf& set_lower_bound(const T& begin) { if (this->value_missing_) return *this; const T& val = *value_; if (begin > val) { std::ostringstream os; os << type_key_ << "." << key_ << ": " << "value " << val << " is smaller than the lower bound " << begin; throw AttrError(os.str()); } return *this; } // This function sets the upper bound of the attribute TSelf& set_upper_bound(const T& end) { if (this->value_missing_) return *this; const T& val = *value_; if (val > end) { std::ostringstream os; os << type_key_ << "." << key_ << ": " << "value " << val << " is bigger than the upper bound " << end; throw AttrError(os.str()); } return *this; } // set default when TSelf& set_default(const T& value) { if (!value_missing_) return *this; *value_ = value; value_missing_ = false; return *this; } TSelf& describe(DMLC_ATTRIBUTE_UNUSED const char* str) { return *this; } }; // Template function to allow smart conversion // from Expr types into the constants. template <typename T> inline void SetValue(T* ptr, const TVMArgValue& val) { *ptr = val.operator T(); } template <typename T> inline void SetIntValue(T* ptr, const TVMArgValue& val) { if (val.type_code() == kDLInt) { *ptr = static_cast<T>(val.value().v_int64); } else { IntImm expr = val; *ptr = static_cast<T>(expr->value); } } // Workaround for GCC8.1 / GCC8.2 template <> inline void SetValue<DataType>(DataType* ptr, const TVMArgValue& val) { *ptr = val.operator DataType(); } template <> inline void SetValue<std::string>(std::string* ptr, const TVMArgValue& val) { if (String::CanConvertFrom(val)) { *ptr = val.operator std::string(); } else { LOG(FATAL) << "Expect str"; } } template <> inline void SetValue<double>(double* ptr, const TVMArgValue& val) { if (val.type_code() == kDLFloat || val.type_code() == kDLInt) { *ptr = val.operator double(); } else { ObjectRef expr = val; ICHECK(expr.defined()); if (const IntImmNode* op = expr.as<IntImmNode>()) { *ptr = static_cast<double>(op->value); } else if (const FloatImmNode* op = expr.as<FloatImmNode>()) { *ptr = static_cast<double>(op->value); } else { LOG(FATAL) << "Expect float value, but get " << expr->GetTypeKey(); } } } template <> inline void SetValue<int>(int* ptr, const TVMArgValue& val) { SetIntValue(ptr, val); } template <> inline void SetValue<int64_t>(int64_t* ptr, const TVMArgValue& val) { SetIntValue(ptr, val); } template <> inline void SetValue<uint64_t>(uint64_t* ptr, const TVMArgValue& val) { SetIntValue(ptr, val); } template <> inline void SetValue<bool>(bool* ptr, const TVMArgValue& val) { SetIntValue(ptr, val); } // Visitor for value initialization template <typename FFind> class AttrInitVisitor { public: // Counter of number of matched attributes during visit. // This is used to decide if there is additional unmatched attributes. size_t hit_count_{0}; // constructor AttrInitVisitor(const char* type_key, FFind ffind) : type_key_(type_key), ffind_(ffind) {} template <typename T> AttrInitEntry<T> operator()(const char* key, T* value) { TVMArgValue val; AttrInitEntry<T> opt; opt.type_key_ = type_key_; opt.key_ = key; opt.value_ = value; if (ffind_(key, &val)) { SetValue(value, val); opt.value_missing_ = false; ++hit_count_; } else { opt.value_missing_ = true; } #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpragmas" #pragma GCC diagnostic ignored "-Wpessimizing-move" #endif return std::move(opt); } private: // the type key const char* type_key_; FFind ffind_; }; template <typename FFind> inline AttrInitVisitor<FFind> CreateInitVisitor(const char* type_key, FFind ffind) { return AttrInitVisitor<FFind>(type_key, ffind); } /*! * \brief Helper struct to get the type name known to tvm. * \tparam T the type we are interested in. */ template <typename T> struct TypeName { static constexpr const char* value = T::ContainerType::_type_key; }; template <> struct TypeName<int> { static constexpr const char* value = "int"; }; template <> struct TypeName<int64_t> { static constexpr const char* value = "int64"; }; template <> struct TypeName<uint64_t> { static constexpr const char* value = "uint64_t"; }; template <> struct TypeName<DataType> { static constexpr const char* value = "DataType"; }; template <> struct TypeName<std::string> { static constexpr const char* value = "str"; }; template <> struct TypeName<bool> { static constexpr const char* value = "bool"; }; template <> struct TypeName<void*> { static constexpr const char* value = "handle"; }; template <> struct TypeName<double> { static constexpr const char* value = "double"; }; class AttrDocEntry { public: using TSelf = AttrDocEntry; explicit AttrDocEntry(ObjectPtr<AttrFieldInfoNode> info) : info_(info) {} TSelf& describe(const char* str) { info_->description = str; return *this; } template <typename T> TSelf& set_default(const T& value) { std::ostringstream os; os << info_->type_info << ", default=" << value; info_->type_info = os.str(); return *this; } template <typename T> TSelf& set_lower_bound(DMLC_ATTRIBUTE_UNUSED T begin) { return *this; } template <typename T> TSelf& set_upper_bound(DMLC_ATTRIBUTE_UNUSED T end) { return *this; } private: ObjectPtr<AttrFieldInfoNode> info_; }; class AttrDocVisitor { public: template <typename T> AttrDocEntry operator()(const char* key, T* v) { ObjectPtr<AttrFieldInfoNode> info = make_object<AttrFieldInfoNode>(); info->name = key; info->type_info = TypeName<T>::value; fields_.push_back(AttrFieldInfo(info)); return AttrDocEntry(info); } Array<AttrFieldInfo> fields_; }; class AttrExistVisitor { public: std::string key_; bool exist_{false}; template <typename T> AttrNopEntry operator()(const char* key, T* v) { if (exist_) return AttrNopEntry(); if (key == key_) exist_ = true; return AttrNopEntry(); } }; template <typename T> struct AttrTriggerNonDefaultEntry { using TSelf = AttrTriggerNonDefaultEntry<T>; // constructor AttrTriggerNonDefaultEntry(AttrVisitor* visitor, const char* key, T* data) : visitor_(visitor), key_(key), data_(data) {} ~AttrTriggerNonDefaultEntry() DMLC_THROW_EXCEPTION { if (trigger_) { visitor_->Visit(key_, data_); } } TSelf& describe(DMLC_ATTRIBUTE_UNUSED const char* str) { return *this; } TSelf& set_default(const T& value) { if (tvm::StructuralEqual()(value, *data_)) { trigger_ = false; } return *this; } TSelf& set_lower_bound(DMLC_ATTRIBUTE_UNUSED const T& begin) { return *this; } TSelf& set_upper_bound(DMLC_ATTRIBUTE_UNUSED const T& end) { return *this; } private: AttrVisitor* visitor_; const char* key_; T* data_; bool trigger_{true}; }; class AttrNonDefaultVisitor { public: explicit AttrNonDefaultVisitor(AttrVisitor* visitor) : visitor_(visitor) {} template <typename T> AttrTriggerNonDefaultEntry<T> operator()(const char* key, T* value) { return AttrTriggerNonDefaultEntry<T>(visitor_, key, value); } private: AttrVisitor* visitor_; }; } // namespace detail /*! * \brief The base class of the all the * Use "curiously recurring template pattern". * * \tparam DerivedType The final attribute type. */ template <typename DerivedType> class AttrsNode : public BaseAttrsNode { public: void VisitAttrs(AttrVisitor* v) { ::tvm::detail::AttrNormalVisitor vis(v); self()->_tvm_VisitAttrs(vis); } void VisitNonDefaultAttrs(AttrVisitor* v) { ::tvm::detail::AttrNonDefaultVisitor vis(v); self()->_tvm_VisitAttrs(vis); } void InitByPackedArgs(const runtime::TVMArgs& args, bool allow_unknown) final { ICHECK_EQ(args.size() % 2, 0); const int kLinearSearchBound = 16; int hit_count = 0; // applies two strategies to lookup if (args.size() < kLinearSearchBound) { // linear search. auto ffind = [&args](const char* key, runtime::TVMArgValue* val) { for (int i = 0; i < args.size(); i += 2) { ICHECK_EQ(args.type_codes[i], kTVMStr); if (!std::strcmp(key, args.values[i].v_str)) { *val = args[i + 1]; return true; } } return false; }; auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind); self()->_tvm_VisitAttrs(vis); hit_count = vis.hit_count_; } else { // construct a map then do lookup. std::unordered_map<std::string, runtime::TVMArgValue> kwargs; for (int i = 0; i < args.size(); i += 2) { ICHECK_EQ(args.type_codes[i], kTVMStr); kwargs[args[i].operator std::string()] = args[i + 1]; } auto ffind = [&kwargs](const char* key, runtime::TVMArgValue* val) { auto it = kwargs.find(key); if (it != kwargs.end()) { *val = it->second; return true; } return false; }; auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind); self()->_tvm_VisitAttrs(vis); hit_count = vis.hit_count_; } // error handling, slow path if (hit_count * 2 != args.size() && !allow_unknown) { for (int i = 0; i < args.size(); i += 2) { ::tvm::detail::AttrExistVisitor visitor; visitor.key_ = args[i].operator std::string(); self()->_tvm_VisitAttrs(visitor); if (!visitor.exist_) { std::ostringstream os; os << DerivedType::_type_key << ": does not have field \'" << visitor.key_ << "\', Possible fields:\n"; os << "----------------\n"; this->PrintDocString(os); throw AttrError(os.str()); } } } } bool SEqualReduce(const DerivedType* other, SEqualReducer equal) const { DerivedType* pself = self(); ::tvm::detail::AttrsSEqualVisitor visitor(pself, other, equal); self()->_tvm_VisitAttrs(visitor); return visitor.result_; } void SHashReduce(SHashReducer hash_reducer) const { ::tvm::detail::AttrsSHashVisitor visitor(hash_reducer); self()->_tvm_VisitAttrs(visitor); } Array<AttrFieldInfo> ListFieldInfo() const final { ::tvm::detail::AttrDocVisitor visitor; self()->_tvm_VisitAttrs(visitor); return visitor.fields_; } private: DerivedType* self() const { return const_cast<DerivedType*>(static_cast<const DerivedType*>(this)); } }; template <typename... Args> inline void BaseAttrsNode::InitBySeq(Args&&... args) { runtime::PackedFunc pf( [this](const TVMArgs& args, TVMRetValue* rv) { this->InitByPackedArgs(args); }); pf(std::forward<Args>(args)...); } inline void BaseAttrsNode::PrintDocString(std::ostream& os) const { // NOLINT(*) Array<AttrFieldInfo> entry = this->ListFieldInfo(); for (AttrFieldInfo info : entry) { os << info->name << " : " << info->type_info << '\n'; if (info->description.length() != 0) { os << " " << info->description << '\n'; } } } } // namespace tvm #endif // TVM_IR_ATTRS_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/diagnostic.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file diagnostic.h * \brief A new diagnostic interface for TVM error reporting. * */ #ifndef TVM_IR_DIAGNOSTIC_H_ #define TVM_IR_DIAGNOSTIC_H_ #include <tvm/ir/module.h> #include <tvm/parser/source_map.h> #include <sstream> #include <string> namespace tvm { using tvm::parser::SourceMap; using tvm::runtime::TypedPackedFunc; /*! \brief The diagnostic level, controls the printing of the message. */ enum class DiagnosticLevel : int { kBug = 10, kError = 20, kWarning = 30, kNote = 40, kHelp = 50, }; class DiagnosticBuilder; /*! \brief A compiler diagnostic. */ class Diagnostic; /*! \brief A compiler diagnostic message. */ class DiagnosticNode : public Object { public: /*! \brief The level. */ DiagnosticLevel level; /*! \brief The span at which to report an error. */ Span span; /*! \brief The diagnostic message. */ String message; // override attr visitor void VisitAttrs(AttrVisitor* v) { v->Visit("level", &level); v->Visit("span", &span); v->Visit("message", &message); } bool SEqualReduce(const DiagnosticNode* other, SEqualReducer equal) const { return equal(this->level, other->level) && equal(this->span, other->span) && equal(this->message, other->message); } static constexpr const char* _type_key = "Diagnostic"; TVM_DECLARE_FINAL_OBJECT_INFO(DiagnosticNode, Object); }; class Diagnostic : public ObjectRef { public: TVM_DLL Diagnostic(DiagnosticLevel level, Span span, const std::string& message); static DiagnosticBuilder Bug(Span span); static DiagnosticBuilder Error(Span span); static DiagnosticBuilder Warning(Span span); static DiagnosticBuilder Note(Span span); static DiagnosticBuilder Help(Span span); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Diagnostic, ObjectRef, DiagnosticNode); }; /*! * \brief A wrapper around std::stringstream to build a diagnostic. */ class DiagnosticBuilder { public: /*! \brief The level. */ DiagnosticLevel level; /*! \brief The source name. */ SourceName source_name; /*! \brief The span of the diagnostic. */ Span span; template <typename T> DiagnosticBuilder& operator<<(const T& val) { // NOLINT(*) stream_ << val; return *this; } DiagnosticBuilder() : level(DiagnosticLevel::kError), source_name(), span(Span()) {} DiagnosticBuilder(const DiagnosticBuilder& builder) : level(builder.level), source_name(builder.source_name), span(builder.span) {} DiagnosticBuilder(DiagnosticLevel level, Span span) : level(level), span(span) {} operator Diagnostic() { return Diagnostic(this->level, this->span, this->stream_.str()); } private: std::stringstream stream_; friend class Diagnostic; }; /*! * \brief A diagnostic context for recording errors against a source file. */ class DiagnosticContext; /*! \brief Display diagnostics in a given display format. * * A diagnostic renderer is responsible for converting the * raw diagnostics into consumable output. * * For example the terminal renderer will render a sequence * of compiler diagnostics to std::out and std::err in * a human readable form. */ class DiagnosticRendererNode : public Object { public: TypedPackedFunc<void(DiagnosticContext ctx)> renderer; // override attr visitor void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "DiagnosticRenderer"; TVM_DECLARE_FINAL_OBJECT_INFO(DiagnosticRendererNode, Object); }; class DiagnosticRenderer : public ObjectRef { public: TVM_DLL DiagnosticRenderer(TypedPackedFunc<void(DiagnosticContext ctx)> render); TVM_DLL DiagnosticRenderer() : DiagnosticRenderer(TypedPackedFunc<void(DiagnosticContext ctx)>()) {} void Render(const DiagnosticContext& ctx); DiagnosticRendererNode* operator->() { ICHECK(get() != nullptr); return static_cast<DiagnosticRendererNode*>(get_mutable()); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(DiagnosticRenderer, ObjectRef, DiagnosticRendererNode); }; class DiagnosticContextNode : public Object { public: /*! \brief The Module to report against. */ IRModule module; /*! \brief The set of diagnostics to report. */ Array<Diagnostic> diagnostics; /*! \brief The renderer set for the context. */ DiagnosticRenderer renderer; void VisitAttrs(AttrVisitor* v) { v->Visit("module", &module); v->Visit("diagnostics", &diagnostics); } bool SEqualReduce(const DiagnosticContextNode* other, SEqualReducer equal) const { return equal(module, other->module) && equal(diagnostics, other->diagnostics); } static constexpr const char* _type_key = "DiagnosticContext"; TVM_DECLARE_FINAL_OBJECT_INFO(DiagnosticContextNode, Object); }; class DiagnosticContext : public ObjectRef { public: TVM_DLL DiagnosticContext(const IRModule& module, const DiagnosticRenderer& renderer); TVM_DLL static DiagnosticContext Default(const IRModule& source_map); /*! \brief Emit a diagnostic. * \param diagnostic The diagnostic to emit. */ void Emit(const Diagnostic& diagnostic); /*! \brief Emit a diagnostic and then immediately attempt to render all errors. * * \param diagnostic The diagnostic to emit. * * Note: this will raise an exception if you would like to instead continue execution * use the Emit method instead. */ void EmitFatal(const Diagnostic& diagnostic); /*! \brief Render the errors and raise a DiagnosticError exception. */ void Render(); DiagnosticContextNode* operator->() { ICHECK(get() != nullptr); return static_cast<DiagnosticContextNode*>(get_mutable()); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(DiagnosticContext, ObjectRef, DiagnosticContextNode); }; DiagnosticRenderer TerminalRenderer(std::ostream& ostream); } // namespace tvm #endif // TVM_IR_DIAGNOSTIC_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/env_func.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/env_func.h * \brief Serializable global function used in IR. */ #ifndef TVM_IR_ENV_FUNC_H_ #define TVM_IR_ENV_FUNC_H_ #include <tvm/node/reflection.h> #include <string> #include <utility> namespace tvm { /*! * \brief A serializable function backed by TVM's global environment. * * This is a wrapper to enable serializable global PackedFunc. * An EnvFunc is saved by its name in the global registry * under the assumption that the same function is registered during load. * \sa EnvFunc */ class EnvFuncNode : public Object { public: /*! \brief Unique name of the global function */ String name; /*! \brief The internal packed function */ runtime::PackedFunc func; /*! \brief constructor */ EnvFuncNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); } bool SEqualReduce(const EnvFuncNode* other, SEqualReducer equal) const { // name uniquely identifies the env function. return name == other->name; } void SHashReduce(SHashReducer hash_reduce) const { // Name uniquely identifies the env function. hash_reduce(name); } static constexpr const char* _type_key = "EnvFunc"; static constexpr bool _type_has_method_sequal_reduce = true; static constexpr bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(EnvFuncNode, Object); }; /*! * \brief Managed reference to EnvFuncNode. * \sa EnvFuncNode */ class EnvFunc : public ObjectRef { public: EnvFunc() {} explicit EnvFunc(ObjectPtr<Object> n) : ObjectRef(n) {} /*! \return The internal global function pointer */ const EnvFuncNode* operator->() const { return static_cast<const EnvFuncNode*>(get()); } /*! * \brief Invoke the function. * \param args The arguments * \returns The return value. */ template <typename... Args> runtime::TVMRetValue operator()(Args&&... args) const { const EnvFuncNode* n = operator->(); ICHECK(n != nullptr); return n->func(std::forward<Args>(args)...); } /*! * \brief Get a global function based on the name. * \param name The name of the global function. * \return The created global function. * \note The function can be unique */ TVM_DLL static EnvFunc Get(const String& name); /*! \brief specify container node */ using ContainerType = EnvFuncNode; }; /*! * \brief Please refer to \ref TypedEnvFuncAnchor "TypedEnvFunc<R(Args..)>" */ template <typename FType> class TypedEnvFunc; /*! * \anchor TypedEnvFuncAnchor * \brief A typed version of EnvFunc. * It is backed by a GlobalFuncNode internally. * * \tparam R The return value of the function. * \tparam Args The argument signature of the function. * \sa EnvFunc */ template <typename R, typename... Args> class TypedEnvFunc<R(Args...)> : public ObjectRef { public: /*! \brief short hand for this function type */ using TSelf = TypedEnvFunc<R(Args...)>; TypedEnvFunc() {} explicit TypedEnvFunc(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief Assign global function to a TypedEnvFunc * \param other Another global function. * \return reference to self. */ TSelf& operator=(const EnvFunc& other) { ObjectRef::operator=(other); return *this; } /*! \return The internal global function pointer */ const EnvFuncNode* operator->() const { return static_cast<const EnvFuncNode*>(get()); } /*! * \brief Invoke the function. * \param args The arguments * \returns The return value. */ R operator()(Args... args) const { const EnvFuncNode* n = operator->(); ICHECK(n != nullptr); return runtime::detail::typed_packed_call_dispatcher<R>::run(n->func, std::forward<Args>(args)...); } /*! \brief specify container node */ using ContainerType = EnvFuncNode; }; } // namespace tvm #endif // TVM_IR_ENV_FUNC_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/error.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/error.h * \brief Utilities for error tracking and reporting. */ #ifndef TVM_IR_ERROR_H_ #define TVM_IR_ERROR_H_ #include <tvm/ir/module.h> #include <tvm/ir/span.h> #include <sstream> #include <string> #include <unordered_map> #include <vector> namespace tvm { /*! * \brief A wrapper around std::stringstream to build error. * * Can be consumed by CompileError to construct an error. * * \code * * void ReportError(const CompileError& err); * * void Test(int number) { * // Use error reporter to construct an error. * ReportError(ErrorBuilder() << "This is an error number=" << number); * } * * \endcode */ struct ErrorBuilder { public: template <typename T> ErrorBuilder& operator<<(const T& val) { // NOLINT(*) stream_ << val; return *this; } private: std::stringstream stream_; friend class CompileError; }; /*! * \brief Custom Error class to be thrown during compilation. */ class CompileError : public Error { public: /*! \brief Location of the error */ Span span; /*! * \brief construct error from message. * \param msg The message */ explicit CompileError(const std::string& msg) : Error(msg), span(nullptr) {} /*! * \brief construct error from error builder. * \param err The error builder */ CompileError(const ErrorBuilder& err) : Error(err.stream_.str()), span(nullptr) {} // NOLINT(*) /*! * \brief copy constructor. * \param other The other ereor. */ CompileError(const CompileError& other) : Error(other.what()), span(other.span) {} // NOLINT(*) /*! * \brief default constructor. */ CompileError() : Error(""), span(nullptr) {} }; /*! * \brief An abstraction around how errors are stored and reported. * Designed to be opaque to users, so we can support a robust and simpler * error reporting mode, as well as a more complex mode. * * The first mode is the most accurate: we report a Relay error at a specific * Span, and then render the error message directly against a textual representation * of the program, highlighting the exact lines in which it occurs. This mode is not * implemented in this PR and will not work. * * The second mode is a general-purpose mode, which attempts to annotate the program's * textual format with errors. * * The final mode represents the old mode, if we report an error that has no span or * expression, we will default to throwing an exception with a textual representation * of the error and no indication of where it occurred in the original program. * * The latter mode is not ideal, and the goal of the new error reporting machinery is * to avoid ever reporting errors in this style. */ class ErrorReporter { public: /*! \brief default constructor. */ ErrorReporter() : errors_(), node_to_error_() {} /*! * \brief Report a CompileError. * * This API is useful for reporting spanned errors. * * \param err The error to report. */ void Report(const CompileError& err) { if (!err.span.defined()) { throw err; } this->errors_.push_back(err); } /*! * \brief Report an error against a program, using the full program * error reporting strategy. * * This error reporting method requires the global function in which * to report an error, the expression to report the error on, * and the error object. * * \param global The global function in which the expression is contained. * \param node The expression or type to report the error at. * \param err The error message to report. */ void ReportAt(const GlobalVar& global, const ObjectRef& node, std::stringstream& err) { std::string err_msg = err.str(); this->ReportAt(global, node, CompileError(err_msg)); } /*! * \brief Report an error against a program, using the full program * error reporting strategy. * * This error reporting method requires the global function in which * to report an error, the expression to report the error on, * and the error object. * * \param global The global function in which the expression is contained. * \param node The expression or type to report the error at. * \param err The error to report. */ void ReportAt(const GlobalVar& global, const ObjectRef& node, const CompileError& err); /*! * \brief Render all reported errors and exit the program. * * This function should be used after executing a pass to render reported errors. * * It will build an error message from the set of errors, depending on the error * reporting strategy. * * \param module The module to report errors on. * \param use_color Controls whether to colorize the output. */ void RenderErrors(const IRModule& module, bool use_color = true); inline bool AnyErrors() { return errors_.size() != 0; } private: std::vector<CompileError> errors_; std::unordered_map<ObjectRef, std::vector<size_t>, ObjectPtrHash, ObjectPtrEqual> node_to_error_; std::unordered_map<ObjectRef, GlobalVar, ObjectPtrHash, ObjectPtrEqual> node_to_gv_; }; } // namespace tvm #endif // TVM_IR_ERROR_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/expr.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/expr.h * \brief Base expr nodes in TVM. */ #ifndef TVM_IR_EXPR_H_ #define TVM_IR_EXPR_H_ #include <tvm/ir/span.h> #include <tvm/ir/type.h> #include <tvm/node/node.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <algorithm> #include <limits> #include <string> #include <type_traits> namespace tvm { using tvm::runtime::String; // Forward-declare VirtualDevice to avoid circular imports. class VirtualDevice; /*! * \brief Base type of all the expressions. * \sa Expr */ class BaseExprNode : public Object { public: /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; static constexpr const char* _type_key = "BaseExpr"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; static constexpr const uint32_t _type_child_slots = 62; TVM_DECLARE_BASE_OBJECT_INFO(BaseExprNode, Object); }; /*! * \brief Managed reference to BaseExprNode. * \sa BaseExprNode */ class BaseExpr : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(BaseExpr, ObjectRef, BaseExprNode); }; /*! * \brief Base node of all primitive expressions. * * A primitive expression deals with low-level * POD data types and handles without * doing life-cycle management for objects. * * PrimExpr is used in the low-level code * optimizations and integer analysis. * * \sa PrimExpr */ class PrimExprNode : public BaseExprNode { public: /*! * \brief The runtime data type of the primitive expression. * * runtime::DataType(dtype) provides coarse grained type information * during compile time and runtime. It is eagerly built in * PrimExpr expression construction and can be used for * quick type checking. * * dtype is sufficient to decide the Type of the PrimExpr * when it corresponds to POD value types such as i32. * * When dtype is DataType::Handle(), the expression could corresponds to * a more fine-grained Type, and we can get the type by running lazy type inference. */ DataType dtype; static constexpr const char* _type_key = "PrimExpr"; static constexpr const uint32_t _type_child_slots = 38; TVM_DECLARE_BASE_OBJECT_INFO(PrimExprNode, BaseExprNode); }; /*! * \brief Reference to PrimExprNode. * \sa PrimExprNode */ class PrimExpr : public BaseExpr { public: /*! * \brief construct from integer. * \param value The value to be constructed. */ TVM_DLL PrimExpr(int32_t value); // NOLINT(*) /*! * \brief construct from float. * \param value The value to be constructed. */ TVM_DLL PrimExpr(float value); // NOLINT(*) /*! \return the data type of this expression. */ DataType dtype() const { return static_cast<const PrimExprNode*>(get())->dtype; } TVM_DEFINE_OBJECT_REF_METHODS(PrimExpr, BaseExpr, PrimExprNode); private: // Internal function for conversion. friend struct runtime::PackedFuncValueConverter<PrimExpr>; TVM_DLL static PrimExpr FromObject_(ObjectRef ref); }; /*! * \brief add operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator+(PrimExpr a, PrimExpr b); /*! * \brief subtraction operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator-(PrimExpr a, PrimExpr b); /*! * \brief negation. * * \param a input. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator-(PrimExpr a); /*! * \brief multiplication operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator*(PrimExpr a, PrimExpr b); /*! * \brief division operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator/(PrimExpr a, PrimExpr b); /*! * \brief left shift operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator<<(PrimExpr a, PrimExpr b); /*! * \brief right shift operator * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator>>(PrimExpr a, PrimExpr b); /*! * \brief greater * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator>(PrimExpr a, PrimExpr b); /*! * \brief greater_equal * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator>=(PrimExpr a, PrimExpr b); /*! * \brief less * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator<(PrimExpr a, PrimExpr b); /*! * \brief less_equal * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator<=(PrimExpr a, PrimExpr b); /*! * \brief equal * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator==(PrimExpr a, PrimExpr b); /*! * \brief not_equal * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator!=(PrimExpr a, PrimExpr b); /*! * \brief and * * \param a left operand * \param b right operand * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr operator&&(PrimExpr a, PrimExpr b); /*! * \brief or * * \param a left operand * \param b right operand * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr operator||(PrimExpr a, PrimExpr b); /*! * \brief not * * \param a left operand * \return The result expression. * \note This operator does eager constant folding. */ TVM_DLL PrimExpr operator!(PrimExpr a); /*! * \brief take bitwise and of two values * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator&(PrimExpr a, PrimExpr b); /*! * \brief take bitwise or of two values * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator|(PrimExpr a, PrimExpr b); /*! * \brief take bitwise xor of two values * * \param a left operand * \param b right operand * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator^(PrimExpr a, PrimExpr b); /*! * \brief take bitwise negation of two values * * \param a the input expression. * \return The result expression. * \note this function does eager constant folding for * index types(int32, int64) when possible. */ TVM_DLL PrimExpr operator~(PrimExpr a); /*! * \brief Base node of all non-primitive expressions. * * RelayExpr supports tensor types, functions and ADT as * first class citizens. The life-cycle of the corresponding * objects are implicitly managed by the language. * * \sa RelayExpr */ class RelayExprNode : public BaseExprNode { public: /*! * \brief Stores the result of type inference(type checking). * * \note This can be undefined before type inference. * This value is discarded during serialization. */ mutable Type checked_type_ = Type(nullptr); /*! * \return The checked_type */ inline const Type& checked_type() const; /*! * \brief Check if the inferred(checked) type of the Expr * is backed by a TTypeNode and return it. * * \note This function will thrown an error if the node type * of this Expr is not TTypeNode. * * \return The corresponding TTypeNode pointer. * \tparam The specific TypeNode we look for. */ template <typename TTypeNode> inline const TTypeNode* type_as() const; /*! * \brief The virtual device (VirtualDevice) for this node (the result of device planning). * For first-order expressions (non functions), this describes where the result of evaluating the * expression should be stored. Note that currently, all composite first-order values (tuples, * references, ADTs) must be stored on the same virtual device. This means that it is not possible * to store two tuple fields on different devices, so we only need one virtual device for these * types. * * For expressions that have the function type, the virtual device describes where the result of * the call to the function or closure is stored (instead of where the function itself is stored). * For example, the virtual device of f = fn(x) { body } is the virtual device of f(y), not where * the function itself is stored. Note that f(y)'s virtual device will be the same as the virtual * device of body. For more details, see the documentation in * src/relay/transforms/device_planner.cc. * * The VirtualDevice's Target field describes how the body of the function should be compiled. * * Set to VirtualDevice::FullyUnconstrained by default. * * \note Unfortunately, the type of virtual_device_ needs to be ObjectRef to avoid a circular * import. */ mutable ObjectRef virtual_device_; /*! * \return The virtual device (VirtualDevice). * If the virtual device is not defined, returns VirtualDevice::FullyUnconstrained(). * Note that for function types, the virtual device is the device where the result of a * call to the function is stored, not where the function itself lives. * For example, the virtual device of f = fn(x) { body } is the virtual device of f(y), not where * the function itself is stored. Note that f(y)'s virtual device will be the same as the virtual * device of body. * * See the documentation of the virtual_device_ field (above) for more details. */ VirtualDevice virtual_device() const; static constexpr const char* _type_key = "RelayExpr"; static constexpr const uint32_t _type_child_slots = 22; TVM_DECLARE_BASE_OBJECT_INFO(RelayExprNode, BaseExprNode); }; /*! * \brief Managed reference to RelayExprNode. * \sa RelayExprNode */ class RelayExpr : public BaseExpr { public: TVM_DEFINE_OBJECT_REF_METHODS(RelayExpr, BaseExpr, RelayExprNode); }; class GlobalVar; /*! * \brief Global variable that lives in the top-level module. * * A GlobalVar only refers to function definitions. * This is used to enable recursive calls between function. * * \sa GlobalVarNode */ class GlobalVarNode : public RelayExprNode { public: /*! \brief The name of the variable, this only acts as a hint. */ String name_hint; void VisitAttrs(AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const GlobalVarNode* other, SEqualReducer equal) const { // name matters for global var. return equal(name_hint, other->name_hint) && equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name_hint); hash_reduce.FreeVarHashImpl(this); } static constexpr const char* _type_key = "GlobalVar"; TVM_DECLARE_FINAL_OBJECT_INFO(GlobalVarNode, RelayExprNode); }; /*! * \brief Managed reference to GlobalVarNode. * \sa GlobalVarNode */ class GlobalVar : public RelayExpr { public: TVM_DLL explicit GlobalVar(String name_hint, Type type = {}, Span span = {}); TVM_DEFINE_OBJECT_REF_METHODS(GlobalVar, RelayExpr, GlobalVarNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(GlobalVarNode); }; // PrimExprs that are useful as runtime containers. // /*! * \brief Constant integer literals in the program. * \sa IntImm */ class IntImmNode : public PrimExprNode { public: /*! \brief the Internal value. */ int64_t value; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const IntImmNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(value); } static constexpr const char* _type_key = "IntImm"; TVM_DECLARE_FINAL_OBJECT_INFO(IntImmNode, PrimExprNode); }; /*! * \brief Managed reference class to IntImmNode. * * \sa IntImmNode */ class IntImm : public PrimExpr { public: /*! * \brief Constructor. * \param dtype The data type of the value. * \param value The internal value. * \param span The location of this object in the source code. */ TVM_DLL IntImm(DataType dtype, int64_t value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(IntImm, PrimExpr, IntImmNode); }; /*! * \brief Constant floating point literals in the program. * \sa FloatImm */ class FloatImmNode : public PrimExprNode { public: /*! \brief The constant value content. */ double value; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const FloatImmNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype) && equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); hash_reduce(value); } static constexpr const char* _type_key = "FloatImm"; TVM_DECLARE_FINAL_OBJECT_INFO(FloatImmNode, PrimExprNode); }; /*! * \brief Managed reference class to FloatImmNode. * * \sa FloatImmNode */ class FloatImm : public PrimExpr { public: /*! * \brief Constructor. * \param dtype The data type of the value. * \param value The internal value. * \param span The location in the source code. */ TVM_DLL FloatImm(DataType dtype, double value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(FloatImm, PrimExpr, FloatImmNode); }; /*! * \brief Boolean constant. * * This reference type is useful to add additional compile-time * type checks and helper functions for Integer equal comparisons. */ class Bool : public IntImm { public: explicit Bool(bool value, Span span = Span()) : IntImm(DataType::Bool(), value, span) {} Bool operator!() const { return Bool((*this)->value == 0); } operator bool() const { return (*this)->value != 0; } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Bool, IntImm, IntImmNode); }; // Overload operators to make sure we have the most fine grained types. inline Bool operator||(const Bool& a, bool b) { return Bool(a.operator bool() || b); } inline Bool operator||(bool a, const Bool& b) { return Bool(a || b.operator bool()); } inline Bool operator||(const Bool& a, const Bool& b) { return Bool(a.operator bool() || b.operator bool()); } inline Bool operator&&(const Bool& a, bool b) { return Bool(a.operator bool() && b); } inline Bool operator&&(bool a, const Bool& b) { return Bool(a && b.operator bool()); } inline Bool operator&&(const Bool& a, const Bool& b) { return Bool(a.operator bool() && b.operator bool()); } inline bool operator==(const Bool& a, bool b) { return a.operator bool() == b; } inline bool operator==(bool a, const Bool& b) { return a == b.operator bool(); } inline bool operator==(const Bool& a, const Bool& b) { return a.operator bool() == b.operator bool(); } /*! * \brief Container of constant int that adds more constructors. * * This is used to store and automate type check * attributes that must be constant integer. * * \sa IntImm */ class Integer : public IntImm { public: Integer() {} /*! * \brief constructor from node. */ explicit Integer(ObjectPtr<Object> node) : IntImm(node) {} /*! * \brief Construct integer from int value. */ Integer(int value, Span span = Span()) : IntImm(DataType::Int(32), value, span) {} // NOLINT(*) /*! * \brief Construct integer from int imm. * \param other The other value. */ Integer(IntImm other) : IntImm(std::move(other)) {} // NOLINT(*) /*! * \brief Constructor from enum * \tparam Enum The enum type. * \param value The enum value. */ template <typename Enum, typename = typename std::enable_if<std::is_enum<Enum>::value>::type> explicit Integer(Enum value) : Integer(static_cast<int>(value)) { static_assert(std::is_same<int, typename std::underlying_type<Enum>::type>::value, "declare enum to be enum int to use visitor"); } /*! * \brief Assign an expression to integer. * \param other another expression. */ Integer& operator=(const IntImm& other) { data_ = ObjectRef::GetDataPtr<Object>(other); return *this; } /*! * \brief convert to int64_t */ int64_t IntValue() const { ICHECK(data_ != nullptr) << " Trying to reference a null Integer"; return (*this)->value; } // comparators Bool operator==(int other) const { if (data_ == nullptr) return Bool(false); return Bool((*this)->value == other); } Bool operator!=(int other) const { return !(*this == other); } template <typename Enum, typename = typename std::enable_if<std::is_enum<Enum>::value>::type> Bool operator==(Enum other) const { return *this == static_cast<int>(other); } template <typename Enum, typename = typename std::enable_if<std::is_enum<Enum>::value>::type> Bool operator!=(Enum other) const { return *this != static_cast<int>(other); } }; /*! \brief range over one dimension */ class RangeNode : public Object { public: /*! \brief beginning of the node */ PrimExpr min; /*! \brief the extend of range */ PrimExpr extent; /*! \brief the location of this range in the source */ mutable Span span; /*! \brief constructor */ RangeNode() {} RangeNode(PrimExpr min, PrimExpr extent, Span span = Span()) : min(min), extent(extent), span(span) {} void VisitAttrs(AttrVisitor* v) { v->Visit("min", &min); v->Visit("extent", &extent); v->Visit("span", &span); } bool SEqualReduce(const RangeNode* other, SEqualReducer equal) const { return equal(min, other->min) && equal(extent, other->extent); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(min); hash_reduce(extent); } static constexpr const char* _type_key = "Range"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(RangeNode, Object); }; /*! \brief Range constainer */ class Range : public ObjectRef { public: /*! * \brief constructor by begin and end * \param begin The begin of the range. * \param end The end of the range. * \param span The location of the Range in the source. */ TVM_DLL Range(PrimExpr begin, PrimExpr end, Span span = Span()); /*! * \brief construct a new range with min and extent * The corresponding constructor is removed, * because that is counter convention of tradition meaning * of range(begin, end) * * \param min The minimum range. * \param extent The extent of the range. * \param span The location of the Range in the source. */ static Range FromMinExtent(PrimExpr min, PrimExpr extent, Span span = Span()); // declare range. TVM_DEFINE_OBJECT_REF_METHODS(Range, ObjectRef, RangeNode); }; // implementataions inline const Type& RelayExprNode::checked_type() const { ICHECK(checked_type_.defined()) << "internal error: the type checker has " << "not populated the checked_type " << "field for " << GetRef<RelayExpr>(this); return this->checked_type_; } template <typename TTypeNode> inline const TTypeNode* RelayExprNode::type_as() const { static_assert(std::is_base_of<TypeNode, TTypeNode>::value, "TType must be a special case of type"); ICHECK(checked_type_.defined()) << "Type inference for this Expr has not completed. Try to call infer_type pass."; const TTypeNode* node = checked_type_.as<TTypeNode>(); ICHECK(node != nullptr) << "Expected type to be " << TTypeNode::_type_key << ", but get " << checked_type_->GetTypeKey(); return node; } } // namespace tvm namespace tvm { namespace runtime { // common rule for RetValue and ArgValue template <> struct PackedFuncValueConverter<PrimExpr> { static PrimExpr From(const TVMPODValue_& val) { if (val.type_code() == kTVMNullptr) { return PrimExpr(ObjectPtr<Object>(nullptr)); } if (val.type_code() == kDLInt) { int64_t value = val.operator int64_t(); if (value > std::numeric_limits<int>::max() || value < std::numeric_limits<int>::min()) { return IntImm(runtime::DataType::Int(64), value); } return IntImm(runtime::DataType::Int(32), val.operator int()); } if (val.type_code() == kDLFloat) { return FloatImm(runtime::DataType::Float(32), val.operator double()); } return PrimExpr::FromObject_(val.AsObjectRef<ObjectRef>()); } }; template <> struct PackedFuncValueConverter<tvm::Integer> { static tvm::Integer From(const TVMPODValue_& val) { if (val.type_code() == kTVMNullptr) { return Integer(ObjectPtr<Object>(nullptr)); } if (val.type_code() == kTVMArgInt) { return Integer(val.operator int()); } return val.AsObjectRef<tvm::Integer>(); } }; template <> struct PackedFuncValueConverter<tvm::Bool> { static tvm::Bool From(const TVMPODValue_& val) { if (val.type_code() == kTVMNullptr) { return Bool(ObjectPtr<Object>(nullptr)); } if (val.type_code() == kTVMArgInt) { int v = val.operator int(); ICHECK(v == 0 || v == 1) << "ValueError: boolean value can only be 0 or 1, but get " << v; return Bool(static_cast<bool>(v)); } return val.AsObjectRef<tvm::Bool>(); } }; } // namespace runtime } // namespace tvm #endif // TVM_IR_EXPR_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/function.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/function.h * \brief Function nodes. */ #ifndef TVM_IR_FUNCTION_H_ #define TVM_IR_FUNCTION_H_ #include <tvm/ir/attrs.h> #include <tvm/ir/expr.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/string.h> #include <string> #include <type_traits> namespace tvm { /*! * \brief Possible Calling conventions. * * NOTE: The calling convention also implies * the way we implement the function during lowering. */ enum class CallingConv : int { /*! * \brief Default calling convention. * * - Uses the native calling convention of the target. * - Implementation: specified by the native target. */ kDefault = 0, /*! * \brief PackedFunc that exposes a CPackedFunc signature. * * - Calling by PackedFunc calling convention. * - Implementation: Expose a function with the CPackedFunc signature. */ kCPackedFunc = 1, /*! * \brief Device kernel launch * * - Call by PackedFunc calling convention. * - Implementation: defined by device runtime(e.g. runtime/cuda) */ kDeviceKernelLaunch = 2, }; /*! * \brief Base node of all functions. * * We support several variants of functions throughout the stack. * All of the functions share the same type system(via checked_type) * to support cross variant calls. * * \sa BaseFunc */ class BaseFuncNode : public RelayExprNode { public: /*! \brief Additional attributes storing the meta-data */ DictAttrs attrs; /*! * \brief Get a function attribute. * * \param attr_key The attribute key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TOBjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetAttrExample(const BaseFunc& f) { * auto value = f->GetAttr<Integer>("AttrKey", 0); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { return attrs.GetAttr(attr_key, default_value); } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } /*! * \brief Check whether the function has an non-zero integer attr. * * This function can be used to check whether an optional * attribute mark(e.g. inline) exists. * * \param attr_key The key to the attribute. * \return The check result. * * \code * * void HasNonzeroAttrExample(const BaseFunc& f) { * if (f->HasNonzeroAttr(attr::kInline)) { * // inline the function. * } * } * * \endcode */ bool HasNonzeroAttr(const std::string& attr_key) const { return attrs.HasNonzeroAttr(attr_key); } static constexpr const char* _type_key = "BaseFunc"; static constexpr const uint32_t _type_child_slots = 2; TVM_DECLARE_BASE_OBJECT_INFO(BaseFuncNode, RelayExprNode); }; /*! * \brief Managed reference to BaseFuncNode. * \sa BaseFuncNode */ class BaseFunc : public RelayExpr { public: TVM_DEFINE_OBJECT_REF_METHODS(BaseFunc, RelayExpr, BaseFuncNode); }; /*! * \brief Generic attribute names that can be attached to any function. * * \sa tvm::tir::attr, tvm::relay::attr */ namespace attr { /*! * \brief Indicates the special calling convention. * * Type: Integer * * \sa tvm::CallingConv */ constexpr const char* kCallingConv = "calling_conv"; /*! * \brief Compilation target of the function. * * Type: Target * * \sa tvm::Target */ constexpr const char* kTarget = "target"; /*! * \brief Global linker symbol of the function in generated code. * * This option forces the code generator to name the * function with the given. * * For example, we could set a global_symbol of a function * early to make sure that we can always refer to it by * the symbol name in the generated DLL. * * We should not set the attribute for local functions, * so that the compiler can freely rename them. * * A unique global symbol will be automatically assigned * to each function in the module before the target code * generation phase. * * Type: String */ constexpr const char* kGlobalSymbol = "global_symbol"; } // namespace attr } // namespace tvm #endif // TVM_IR_FUNCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/global_var_supply.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/global_var_supply.h * \brief GlobalVarSupply that can be used to generate unique \class GlobalVar. */ #ifndef TVM_IR_GLOBAL_VAR_SUPPLY_H_ #define TVM_IR_GLOBAL_VAR_SUPPLY_H_ #include <string> #include <unordered_map> #include "tvm/ir/expr.h" #include "tvm/ir/module.h" #include "tvm/ir/name_supply.h" namespace tvm { /*! * \brief GlobalVarSupply can be used to generate unique GlobalVars. */ class GlobalVarSupplyNode : public Object { public: /*! * \brief Empty constructor. Will use an empty NameSupply. */ GlobalVarSupplyNode() : GlobalVarSupplyNode(NameSupply("")) {} /*! * \brief Constructor. * \param name_supply The NameSupply to use for generating the names of fresh GlobalVars. * \param name_to_var_map An optional map. */ explicit GlobalVarSupplyNode(NameSupply name_supply, std::unordered_map<std::string, GlobalVar> name_to_var_map = {}); /*! * \brief Generates a unique GlobalVar from this supply. * \param name The name from which the name of the GlobalVar is derived. * \param add_prefix If set to true, then the prefix of the contained NameSupply will be prepended * to the name. \return A unique GlobalVar. */ GlobalVar FreshGlobal(String name, bool add_prefix = true); /*! * \brief Looks up for a GlobalVar with the given name in this supply. * If no entry is found, creates one, places it in the cache and returns it. * \param name The name of the GlobalVar to search for. * \param add_prefix If set to true, the prefix of the contained NameSupply will be prepended to * the name before performing the search. \return A cached GlobalVar. */ GlobalVar UniqueGlobalFor(const String& name, bool add_prefix = true); /*! * \brief Reserves an existing GlobalVar with this supply. * \param var The GlobalVar to be registered. * \param allow_conflict Allow conflict with other GlobalVars that have the same name. */ void ReserveGlobalVar(const GlobalVar& var, bool allow_conflict = false); void VisitAttrs(AttrVisitor* v) {} /*! \brief The NameSupply used to generate unique name hints to GlobalVars. */ NameSupply name_supply_; static constexpr const char* _type_key = "GlobalVarSupply"; static constexpr const bool _type_has_method_sequal_reduce = false; static constexpr const bool _type_has_method_shash_reduce = false; TVM_DECLARE_FINAL_OBJECT_INFO(GlobalVarSupplyNode, Object); private: std::unordered_map<std::string, GlobalVar> name_to_var_map_; }; /*! * \brief Managed reference class to GlobalVarSupplyNode. * \sa GlobalVarSupplyNode */ class GlobalVarSupply : public ObjectRef { public: /*! * \brief Constructor. * \param name_supply The NameSupply to be used when generating new GlobalVars. * \param name_to_var_map An optional map. */ TVM_DLL explicit GlobalVarSupply(const NameSupply& name_supply, std::unordered_map<std::string, GlobalVar> name_to_var_map = {}); /*! * \brief Constructs a supply from an array of IRModules. GlobalVars generated by this supply are * guaranteed not to conflict with any GlobalVars that belong to the modules. \param modules Array * of IRModules. */ TVM_DLL explicit GlobalVarSupply(const Array<IRModule>& modules); /*! * \brief Constructs a GlobalVarSupply from an IRModule. GlobalVars generated by this supply are * guaranteed not to conflict with GlobalVars that belong to the modules. \param module The * IRModule. */ TVM_DLL explicit GlobalVarSupply(const IRModule module); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(GlobalVarSupply, ObjectRef, GlobalVarSupplyNode); }; } // namespace tvm #endif // TVM_IR_GLOBAL_VAR_SUPPLY_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/instrument.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/instrument.h * * This file introduces a pass instrument infrastructure, inspired by LLVM and MLIR. * It inserts instrumentation points around passes. */ #ifndef TVM_IR_INSTRUMENT_H_ #define TVM_IR_INSTRUMENT_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/container/string.h> #include <utility> #include <vector> namespace tvm { class IRModule; // Forward class for PassInstrumentNode methods namespace transform { class PassInfo; } // namespace transform namespace instrument { /*! * \brief PassInstrumentNode forms an instrument implementation. * It provides API for users to register callbacks at different instrumentation points. * * Within a PassContext, call sequence of a PassInstrument implementation is like: * * with PassContext(instruments=[pi]): # pi = a PassInstrument implementation * pi.EnterPassContext() * * if pi.ShouldRun(Pass1): * pi.RunBeforePass() * Pass1() * pi.RunAfterPass() * * if pi.ShouldRun(Pass2): * pi.RunBeforePass() * Pass2() * pi.RunAfterPass() * * pi.ExitPassContext() * * `EnterPassContext` and `ExitPassContext` are only called once when entering/exiting a * PassContext. `ShouldRun`, `RunBeforePass` and `RunAfterPass` are called multiple times depending * on how many passes. * * If there are multiple pass instrumentations provided, the instrument points are the same. * PassInstrument implementations' callbacks are called in order: * * with PassContext(instruments=[pi1, pi2]): # pi1, pi2 = two distinct PassInstrument impls * pi.EnterPassContext() for pi in instruments * * should_run = all([pi.ShoudRun(Pass1) for pi in instruments)]) * if (should_run) * pi.RunBeforePass() for pi in instruments * Pass1() * pi.RunAfterPass() for pi in instruments * * should_run = all([pi.ShouldRun(Pass2) for pi in instruments)]) * if (should_run) * pi.RunBeforePass() for pi in instruments * Pass2() * pi.RunAfterPass() for pi in instruments * * pi.ExitPassContext() for pi in instruments * * Note: * 1. Assume there is no dependency between PassInstrument implementations in `instruments` . * 2. `EnterPassContext` and `ExitPassContext` have `with` behavior (see PassContext and its FFI): * If there is any exception raised in `ShouldRun()`, `RunBeforePass()`, `RunAfterPass()` and * `Pass()`, `ExitPassContext()` is still called. * 3. In mutiple PassInstrument instances scenario, callbacks are called in order: * If one throws exceptions, remainings will not be called. * * \sa PassInstrument * \sa src/ir/transform.cc */ class PassInstrumentNode : public Object { public: /*! \brief Name of this pass instrument object. */ String name; virtual ~PassInstrumentNode() {} /*! \brief Instrument when entering PassContext. Called once within a PassContext. */ virtual void EnterPassContext() const = 0; /*! \brief Instrument when exiting PassContext. Called once within a PassContext. */ virtual void ExitPassContext() const = 0; /*! * \brief Determine whether to run the pass or not. Called multiple times depend on number of * passes. * \param mod The module that an optimization pass runs on. * \param info The pass information. * * \return true to run the pass; false to skip the pass. */ virtual bool ShouldRun(const IRModule& mod, const transform::PassInfo& info) const = 0; /*! * \brief Instrument before pass run. Called multiple times depend on number of passes. * \param mod The module that an optimization pass runs on. * \param info The pass information. */ virtual void RunBeforePass(const IRModule& mod, const transform::PassInfo& info) const = 0; /*! * \brief Instrument after pass run. Called multiple time depend on number of passes. * \param mod The module that an optimization pass runs on. * \param info The pass information. */ virtual void RunAfterPass(const IRModule& mod, const transform::PassInfo& info) const = 0; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); } static constexpr const char* _type_key = "instrument.PassInstrument"; TVM_DECLARE_BASE_OBJECT_INFO(PassInstrumentNode, Object); }; /*! * \brief Managed reference class for PassInstrumentNode * \sa PassInstrumentNode */ class PassInstrument : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(PassInstrument, ObjectRef, PassInstrumentNode); }; } // namespace instrument } // namespace tvm #endif // TVM_IR_INSTRUMENT_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/memory_pools.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/memory_pools.h * \brief The object definition for relay.build argument type of memory pools */ #ifndef TVM_IR_MEMORY_POOLS_H_ #define TVM_IR_MEMORY_POOLS_H_ #include <tvm/runtime/registry.h> #include <tvm/target/target.h> struct TVMConstantInfo; namespace tvm { /*! * \brief Describes a pool of memory accessible by one or more targets. */ struct PoolInfoNode : public Object { public: /*! \brief The name of the memory pool */ String pool_name; /*! \brief The expected size hint to be used by the allocator. * The size_hint_bytes is set to kUnrestrictedPoolSizeHint * to indicate the pool is not size restricted. */ Integer size_hint_bytes; /*! \brief The clock frequency of the memory in Hz */ Integer clock_frequency_hz; /*! \brief The read bandwidth in bytes/cycle */ Integer read_bandwidth_bytes_per_cycle; /*! \brief The write bandwidth in bytes/cycle */ Integer write_bandwidth_bytes_per_cycle; /*! \brief The read latency in cycles */ Integer read_latency_cycles; /*! \brief The write latency in cycles */ Integer write_latency_cycles; /*! \brief The burst length in bytes for each Target */ Map<Target, Integer> target_burst_bytes; /*! \brief Whether pool is internally generated. * The internal pools will be generated as part of * the entry point code generation of the executor */ bool is_internal = false; /*! \brief The targets linked to the pool */ Array<Target> targets; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pool_name", &pool_name); v->Visit("targets", &targets); v->Visit("size_hint_bytes", &size_hint_bytes); v->Visit("clock_frequency_hz", &clock_frequency_hz); v->Visit("read_bandwidth_bytes_per_cycle", &read_bandwidth_bytes_per_cycle); v->Visit("write_bandwidth_bytes_per_cycle", &write_bandwidth_bytes_per_cycle); v->Visit("read_latency_cycles", &read_latency_cycles); v->Visit("write_latency_cycles", &write_latency_cycles); v->Visit("target_burst_bytes", &target_burst_bytes); v->Visit("is_internal", &is_internal); } bool SEqualReduce(const PoolInfoNode* other, SEqualReducer equal) const { return equal(pool_name, other->pool_name) && equal(size_hint_bytes, other->size_hint_bytes) && equal(clock_frequency_hz, other->clock_frequency_hz) && equal(read_bandwidth_bytes_per_cycle, other->read_bandwidth_bytes_per_cycle) && equal(write_bandwidth_bytes_per_cycle, other->write_bandwidth_bytes_per_cycle) && equal(read_latency_cycles, other->read_latency_cycles) && equal(write_latency_cycles, other->write_latency_cycles) && equal(target_burst_bytes, other->target_burst_bytes) && equal(is_internal, other->is_internal); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(pool_name); hash_reduce(size_hint_bytes); hash_reduce(clock_frequency_hz); hash_reduce(read_bandwidth_bytes_per_cycle); hash_reduce(write_bandwidth_bytes_per_cycle); hash_reduce(read_latency_cycles); hash_reduce(write_latency_cycles); hash_reduce(target_burst_bytes); hash_reduce(is_internal); } static constexpr const char* _type_key = "ir.PoolInfo"; TVM_DECLARE_BASE_OBJECT_INFO(PoolInfoNode, Object); }; /*! * \brief The string parameter to indicate read and write access to a pool * This needs to be kept in sync with PoolInfo.READ_WRITE_ACCESS in * python/tvm/ir/memory_pools.py */ static constexpr const char* kTargetPoolReadWriteAccess = "rw"; /*! * \brief The string parameter to indicate read only access to a pool * This needs to be kept in sync with PoolInfo.READ_ONLY_ACCESS in * python/tvm/ir/memory_pools.py */ static constexpr const char* kTargetPoolReadOnlyAccess = "ro"; /*! \brief The PoolSize is unrestricted for the memory planner */ static const int kUnrestrictedPoolSizeHint = -1; /*! \brief The clock frequency is not known */ static const int kUnknownClockFrequency = -1; /*! \brief The read bandwidth is not known */ static const int kUnknownReadBandwidth = -1; /*! \brief The write bandwidth is not known */ static const int kUnknownWriteBandwidth = -1; /*! \brief Base class for WorkspacePoolInfo and ConstantPoolInfo */ class PoolInfo : public ObjectRef { protected: TVM_DLL PoolInfo(String pool_name, Integer size_hint_bytes = kUnrestrictedPoolSizeHint, Integer clock_frequency_hz = kUnknownClockFrequency, Integer read_bandwidth_bytes_per_cycle = kUnknownReadBandwidth, Integer write_bandwidth_bytes_per_cycle = kUnknownWriteBandwidth, Integer read_latency_cycles = 0, Integer write_latency_cycles = 0, Map<Target, Integer> target_burst_bytes = {}, Bool is_internal = Bool(false)); public: TVM_DEFINE_OBJECT_REF_METHODS(PoolInfo, ObjectRef, PoolInfoNode); }; /*! * \brief Describes a pool of memory properties */ struct PoolInfoPropertiesNode : public Object { /*! \brief The expected size hint to be used by the allocator. * The size_hint_bytes is set to kUnrestrictedPoolSizeHint * to indicate the pool is not size restricted. */ Integer size_hint_bytes = kUnrestrictedPoolSizeHint; /*! \brief The clock frequency of the memory in Hz */ Integer clock_frequency_hz = kUnknownClockFrequency; /*! \brief The read bandwidth in bytes/cycle */ Integer read_bandwidth_bytes_per_cycle = kUnknownReadBandwidth; /*! \brief The write bandwidth in bytes/cycle */ Integer write_bandwidth_bytes_per_cycle = kUnknownWriteBandwidth; /*! \brief The read latency in cycles */ Integer read_latency_cycles = 0; /*! \brief The write latency in cycles */ Integer write_latency_cycles = 0; /*! \brief The burst length in bytes for each Target */ Map<Target, Integer> target_burst_bytes{}; /*! \brief Whether pool is internally generated. * The internal pools will be generated as part of * the entry point code generation of the executor */ bool is_internal = false; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("size_hint_bytes", &size_hint_bytes); v->Visit("clock_frequency_hz", &clock_frequency_hz); v->Visit("read_bandwidth_bytes_per_cycle", &read_bandwidth_bytes_per_cycle); v->Visit("write_bandwidth_bytes_per_cycle", &write_bandwidth_bytes_per_cycle); v->Visit("read_latency_cycles", &read_latency_cycles); v->Visit("write_latency_cycles", &write_latency_cycles); v->Visit("target_burst_bytes", &target_burst_bytes); v->Visit("is_internal", &is_internal); } bool SEqualReduce(const PoolInfoPropertiesNode* other, SEqualReducer equal) const { return equal(size_hint_bytes, other->size_hint_bytes) && equal(clock_frequency_hz, other->clock_frequency_hz) && equal(read_bandwidth_bytes_per_cycle, other->read_bandwidth_bytes_per_cycle) && equal(write_bandwidth_bytes_per_cycle, other->write_bandwidth_bytes_per_cycle) && equal(read_latency_cycles, other->read_latency_cycles) && equal(write_latency_cycles, other->write_latency_cycles) && equal(target_burst_bytes, other->target_burst_bytes) && equal(is_internal, other->is_internal); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(size_hint_bytes); hash_reduce(clock_frequency_hz); hash_reduce(read_bandwidth_bytes_per_cycle); hash_reduce(write_bandwidth_bytes_per_cycle); hash_reduce(read_latency_cycles); hash_reduce(write_latency_cycles); hash_reduce(target_burst_bytes); hash_reduce(is_internal); } static constexpr const char* _type_key = "ir.PoolInfoProperties"; TVM_DECLARE_FINAL_OBJECT_INFO(PoolInfoPropertiesNode, Object); }; class PoolInfoProperties : public ObjectRef { public: TVM_DLL PoolInfoProperties(Integer size_hint_bytes, Integer clock_frequency_hz = kUnknownClockFrequency, Integer read_bandwidth_bytes_per_cycle = kUnknownReadBandwidth, Integer write_bandwidth_bytes_per_cycle = kUnknownWriteBandwidth, Integer read_latency_cycles = 0, Integer write_latency_cycles = 0, Map<Target, Integer> target_burst_bytes = {}, Bool is_internal = Bool(false)); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(PoolInfoProperties, ObjectRef, PoolInfoPropertiesNode); }; /* \brief Represents RW memory area */ struct WorkspacePoolInfoNode : public PoolInfoNode { void VisitAttrs(tvm::AttrVisitor* v) { PoolInfoNode::VisitAttrs(v); } bool SEqualReduce(const WorkspacePoolInfoNode* other, SEqualReducer equal) const { return PoolInfoNode::SEqualReduce(other, equal); } void SHashReduce(SHashReducer hash_reduce) const { PoolInfoNode::SHashReduce(hash_reduce); } static constexpr const char* _type_key = "ir.WorkspacePoolInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(WorkspacePoolInfoNode, PoolInfoNode); }; class WorkspacePoolInfo : public PoolInfo { public: TVM_DLL WorkspacePoolInfo( String pool_name, Array<Target> targets, PoolInfoProperties properties = PoolInfoProperties(kUnrestrictedPoolSizeHint)); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(WorkspacePoolInfo, PoolInfo, WorkspacePoolInfoNode); }; /* * \brief The ConstantInfoNode contains numeric literal in RO pool * Used to initialise RO memory in ConstantPoolInfo */ struct ConstantInfoNode : public Object { String name_hint; Integer byte_offset; runtime::NDArray data; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("byte_offset", &byte_offset); v->Visit("data", &data); } bool SEqualReduce(const ConstantInfoNode* other, SEqualReducer equal) const { return equal(name_hint, other->name_hint) && equal(byte_offset, other->byte_offset) && equal(data, other->data); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name_hint); hash_reduce(byte_offset); hash_reduce(data); } static constexpr const char* _type_key = "ir.ConstantInfo"; static constexpr bool _type_has_method_sequal_reduce = true; static constexpr bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantInfoNode, Object); }; class ConstantInfo : public ObjectRef { public: TVM_DLL ConstantInfo(const struct ::TVMConstantInfo* data); ConstantInfo(String name, Integer byte_offset, runtime::NDArray data); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ConstantInfo, ObjectRef, ConstantInfoNode); }; /* \brief ConstantPoolInfoNode represents an RO memory area initialized with * data from constant_info_array */ struct ConstantPoolInfoNode : public PoolInfoNode { Array<ConstantInfo> constant_info_array; void VisitAttrs(tvm::AttrVisitor* v) { PoolInfoNode::VisitAttrs(v); v->Visit("constant_info_array", &constant_info_array); } bool SEqualReduce(const ConstantPoolInfoNode* other, SEqualReducer equal) const { return PoolInfoNode::SEqualReduce(other, equal) && equal(constant_info_array, other->constant_info_array); } void SHashReduce(SHashReducer hash_reduce) const { PoolInfoNode::SHashReduce(hash_reduce); hash_reduce(constant_info_array); } static constexpr const char* _type_key = "ir.ConstantPoolInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantPoolInfoNode, PoolInfoNode); }; class ConstantPoolInfo : public PoolInfo { public: TVM_DLL ConstantPoolInfo( String pool_name, Array<Target> targets, Array<ConstantInfo> constant_info_array, PoolInfoProperties properties = PoolInfoProperties(kUnrestrictedPoolSizeHint)); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ConstantPoolInfo, PoolInfo, ConstantPoolInfoNode); }; /* \brief A container for WorkspacePoolInfo objects */ struct WorkspaceMemoryPoolsNode : public Object { Array<PoolInfo> pools; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pools", &pools); } bool SEqualReduce(const WorkspaceMemoryPoolsNode* other, SEqualReducer equal) const { return equal(pools, other->pools); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(pools); } static constexpr const char* _type_key = "ir.WorkspaceMemoryPools"; TVM_DECLARE_FINAL_OBJECT_INFO(WorkspaceMemoryPoolsNode, Object); }; class WorkspaceMemoryPools : public ObjectRef { public: TVM_DLL WorkspaceMemoryPools(Array<PoolInfo> pools); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(WorkspaceMemoryPools, ObjectRef, WorkspaceMemoryPoolsNode); }; /* \brief A container for ConstantPoolInfo objects */ struct ConstantMemoryPoolsNode : public Object { Array<ConstantPoolInfo> pools; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pools", &pools); } bool SEqualReduce(const ConstantMemoryPoolsNode* other, SEqualReducer equal) const { return equal(pools, other->pools); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(pools); } static constexpr const char* _type_key = "ir.ConstantMemoryPools"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantMemoryPoolsNode, Object); }; class ConstantMemoryPools : public ObjectRef { public: TVM_DLL ConstantMemoryPools(Array<ConstantPoolInfo> pools); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ConstantMemoryPools, ObjectRef, ConstantMemoryPoolsNode); }; } // namespace tvm #endif // TVM_IR_MEMORY_POOLS_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/module.h * \brief IRModule that holds the functions and type definitions. */ #ifndef TVM_IR_MODULE_H_ #define TVM_IR_MODULE_H_ #include <tvm/ir/adt.h> #include <tvm/ir/expr.h> #include <tvm/ir/function.h> #include <tvm/ir/type.h> #include <tvm/parser/source_map.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/string.h> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> namespace tvm { class IRModule; /*! * \brief IRModule that holds functions and type definitions. * * IRModule is the basic unit for all IR transformations across the stack. * * Many operations require access to the global IRModule. * We pass the IRModule by value in a functional style as an explicit argument, * but we mutate the Module while optimizing programs. * \sa IRModule */ class IRModuleNode : public Object { public: /*! \brief A map from ids to all global functions. */ Map<GlobalVar, BaseFunc> functions; /*! \brief A map from global type vars to ADT type data. */ Map<GlobalTypeVar, TypeData> type_definitions; /*! \brief The source map for the module. */ parser::SourceMap source_map; /* \brief Additional attributes storing meta-data about the module. */ DictAttrs attrs; /*! * \brief Get a module attribute. * * \param attr_key The attribute key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TOBjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetAttrExample(const IRModule& mod) { * auto value = f->GetAttr<Integer>("AttrKey", 0); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { return attrs.GetAttr(attr_key, default_value); } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } /*! * \brief Check whether the module has an non-zero integer attr. * * This function can be used to check whether an optional * attribute mark(e.g. inline) exists. * * \param attr_key The key to the attribute. * \return The check result. * * \code * * void HasNonzeroAttrExample(const IRModule& mod) { * if (mod->HasNonzeroAttr(attr::kInline)) { * // inline the function. * } * } * * \endcode */ bool HasNonzeroAttr(const std::string& attr_key) const { return attrs.HasNonzeroAttr(attr_key); } IRModuleNode() : source_map() {} void VisitAttrs(AttrVisitor* v) { v->Visit("functions", &functions); v->Visit("type_definitions", &type_definitions); v->Visit("global_var_map_", &global_var_map_); v->Visit("global_type_var_map_", &global_type_var_map_); v->Visit("source_map", &source_map); v->Visit("attrs", &attrs); } TVM_DLL bool SEqualReduce(const IRModuleNode* other, SEqualReducer equal) const; TVM_DLL void SHashReduce(SHashReducer hash_reduce) const; /*! * \brief Add a function to the global environment. * \param var The var of the global function. * \param func The function. * \param update Controls whether you can replace a definition in the * environment. */ TVM_DLL void Add(const GlobalVar& var, const BaseFunc& func, bool update = false); /*! * \brief Add a function to the global environment. * \param var The name of the global function. * \param func The function. * * It does not do type inference as Add does. */ TVM_DLL void AddUnchecked(const GlobalVar& var, const BaseFunc& func); /*! * \brief Add a type-level definition to the global environment. * \param var The var of the global type definition. * \param type The ADT. * \param update Controls whether you can replace a definition in the * environment. */ TVM_DLL void AddTypeDef(const GlobalTypeVar& var, const TypeData& type, bool update = false); /*! * \brief Add a type-level definition to the global environment. * \param var The var of the global type definition. * \param type The ADT. * \param update Controls whether you can replace a definition in the * environment. * * It does not do type checking as AddTypeDef does. */ TVM_DLL void AddTypeDefUnchecked(const GlobalTypeVar& var, const TypeData& type, bool update = false); /*! * \brief Update a function in the global environment. * \param var The name of the global function to update. * \param func The new function. */ TVM_DLL void Update(const GlobalVar& var, const BaseFunc& func); /*! * \brief Update a type definition in the global environment. * \param var The name of the global type definition to update. * \param type The new ADT. */ TVM_DLL void UpdateTypeDef(const GlobalTypeVar& var, const TypeData& type); /*! * \brief Remove a function from the global environment. * \param var The name of the global function to update. */ TVM_DLL void Remove(const GlobalVar& var); /*! * \brief Check if the global_var_map_ contains a global variable. * \param name The variable name. * \returns true if contains, otherise false. */ TVM_DLL bool ContainGlobalVar(const String& name) const; /*! * \brief Check if the global_type_var_map_ contains a global type variable. * \param name The variable name. * \returns true if contains, otherise false. */ TVM_DLL bool ContainGlobalTypeVar(const String& name) const; /*! * \brief Lookup a global function by its variable. * \param str The unique string specifying the global variable. * \returns The global variable. */ TVM_DLL GlobalVar GetGlobalVar(const String& str) const; /*! * \brief Collect all global vars defined in this module. * \returns An array of global vars */ TVM_DLL Array<GlobalVar> GetGlobalVars() const; /*! * \brief Look up a global function by its name. * \param str The unique string specifying the global variable. * \returns The global variable. */ TVM_DLL GlobalTypeVar GetGlobalTypeVar(const String& str) const; /*! * \brief Collect all global type vars defined in this module. * \returns An array of global type vars */ TVM_DLL Array<GlobalTypeVar> GetGlobalTypeVars() const; /*! * \brief Find constructor of ADT using name * \param adt name of the ADT the constructor belongs to * \param cons name of the constructor * \returns Constructor of ADT, error if not found */ TVM_DLL Constructor GetConstructor(const String& adt, const String& cons) const; /*! * \brief Look up a global function by its variable. * \param var The global var to lookup. * \returns The function named by the variable argument. */ TVM_DLL BaseFunc Lookup(const GlobalVar& var) const; /*! * \brief Look up a global function by its string name * \param name The name of the function. * \returns The function named by the argument. */ TVM_DLL BaseFunc Lookup(const String& name) const; /*! * \brief Look up a global type definition by its variable. * \param var The var of the global type definition. * \return The type definition. */ TVM_DLL TypeData LookupTypeDef(const GlobalTypeVar& var) const; /*! * \brief Look up a global type definition by its name. * \param var The name of the global type definition. * \return The type definition. */ TVM_DLL TypeData LookupTypeDef(const String& var) const; /*! * \brief Look up a constructor by its tag. * \param tag The tag for the constructor. * \return The constructor object. */ TVM_DLL Constructor LookupTag(const int32_t tag); /*! * \brief Update the functions inside this environment by * functions in another environment. * \param other The other environment. */ TVM_DLL void Update(const IRModule& other); /*! * \brief Create a shallow copy of this IRModule. * \returns The shallow copy of the IRModule. */ TVM_DLL IRModule ShallowCopy(); /*! * \brief Import Relay code from the file at path. * \param path The path of the Relay code to import. * * \note The path resolution behavior is standard, * if abosolute will be the absolute file, if * relative it will be resovled against the current * working directory. */ TVM_DLL void Import(const String& path); /*! * \brief Import Relay code from the file at path, relative to the standard library. * \param path The path of the Relay code to import. */ TVM_DLL void ImportFromStd(const String& path); /*! * \brief Should Link Parameters into the module * \return Whether the Executor is configured to execute with linked parameters (Default: false) */ TVM_DLL Bool ShouldLinkParameters() const; /*! * \brief The set of imported files. */ TVM_DLL std::unordered_set<String> Imports() const; static constexpr const char* _type_key = "IRModule"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(IRModuleNode, Object); private: /*! \brief Helper function for registering a typedef's constructors */ void RegisterConstructors(const GlobalTypeVar& var, const TypeData& type); /*! \brief A map from string names to global variables that * ensures global uniqueness. */ Map<String, GlobalVar> global_var_map_; /*! \brief A map from string names to global type variables (ADT names) * that ensures global uniqueness. */ Map<String, GlobalTypeVar> global_type_var_map_; /*! \brief A map from constructor tags to constructor objects * for convenient access */ std::unordered_map<int32_t, Constructor> constructor_tag_map_; /*! \brief The files previously imported, required to ensure importing is idempotent for each module. */ std::unordered_set<String> import_set_; friend class IRModule; }; /*! * \brief Managed reference class to IRModuleNode. * \sa IRModuleNode */ class IRModule : public ObjectRef { public: /*! * \brief constructor * \param functions Functions in the module. * \param type_definitions Type definitions in the module. * \param import_set Set of imported files in the module. * \param map The module source map. * \param attrs The module attributes. */ TVM_DLL explicit IRModule(Map<GlobalVar, BaseFunc> functions, Map<GlobalTypeVar, TypeData> type_definitions = {}, std::unordered_set<String> import_set = {}, parser::SourceMap map = {}, DictAttrs attrs = {}); /*! \brief default constructor */ IRModule() : IRModule(Map<GlobalVar, BaseFunc>({})) {} /*! * \brief constructor * \param n The object pointer. */ explicit IRModule(ObjectPtr<Object> n) : ObjectRef(n) {} /*! \return mutable pointers to the node. */ IRModuleNode* operator->() const { auto* ptr = get_mutable(); ICHECK(ptr != nullptr); return static_cast<IRModuleNode*>(ptr); } /*! * \brief Constructs a module from a standalone expression \p expr. * * If \p expr is a function it will be bound directly. Otherwise a function over the free * variables of \p expr (possibly none) with \p expr as body is created and bound. * * The function is bound to, in preference order: * - The "global_symbol" attribute of \p expr, if it is a function with that attribute. * - 'main' * - A unique name derived from 'main' if 'main' is already bound in \p global_funcs. * * Additional global functions and type definitions may be included in the result module. * * See also \p FromExpr. * * \param expr The expression to set as the main function to the module. * \param global_funcs The global function map. Default empty. * \param type_definitions The global type definition map. Default empty. * \param import_set Set of external modules already imported. Default empty. * * \returns A module with \p expr set as the main function, and the global var to which * \p expr was bound (typcially 'main'). * * TODO(mbs): Does import_set and the bound global var need to be exposed via ffi? */ static std::pair<IRModule, GlobalVar> FromExprInContext( const RelayExpr& expr, const Map<GlobalVar, BaseFunc>& global_funcs = {}, const Map<GlobalTypeVar, TypeData>& type_definitions = {}, std::unordered_set<String> import_set = {}); /*! * \brief As for \p FromExprInContext, but assuming \p expr is bound to 'main' and no * imports. */ TVM_DLL static IRModule FromExpr(const RelayExpr& expr, const Map<GlobalVar, BaseFunc>& global_funcs = {}, const Map<GlobalTypeVar, TypeData>& type_definitions = {}); /*! * \brief Parse text format source file into an IRModule. * \param text A string of Relay source code. * \param source_path The path to the source file. * \return A Relay module. */ TVM_DLL static IRModule FromText(const String& text, const String& source_path); /*! * \brief Create a shallow copy of an IRModule. * \param mod The module to copy. * \return The copied module. */ IRModule ShallowCopyIRModule(IRModule mod); /*! \brief Declare the container type. */ using ContainerType = IRModuleNode; /*! \brief Declare whether Ref is nullable. */ static constexpr bool _type_is_nullable = false; // allow copy on write. TVM_DEFINE_OBJECT_REF_COW_METHOD(IRModuleNode); }; /*! * \brief Pretty print a node for debug purposes. * * \param node The node to be printed. * \return The text reperesentation. * \note This function does not show version or meta-data. * Use AsText if you want to store the text. * \sa AsText. */ TVM_DLL String PrettyPrint(const ObjectRef& node); /*! * \brief Render the node as a string in the text format. * * \param node The node to be rendered. * \param show_meta_data Whether to print meta data section. * \param annotate An optional callback function for attaching * additional comment block to an expr. * * \note We support a limited set of IR nodes that are part of * relay IR and * * \sa PrettyPrint. * \return The text representation. */ TVM_DLL String AsText(const ObjectRef& node, bool show_meta_data = true, runtime::TypedPackedFunc<String(ObjectRef)> annotate = nullptr); namespace attr { // Following are attributes for IRModule only. /*! * \brief Name of the module * * Type: String * * \sa tvm::runtime::String */ constexpr const char* kModuleName = "mod_name"; /*! * \brief Executor targeted by the module * * Type: Executor * * \sa tvm::relay::Executor */ constexpr const char* kExecutor = "executor"; /*! * \brief Runtime target of the module * * Type: Runtime * * \sa tvm::relay::Runtime */ constexpr const char* kRuntime = "runtime"; /*! * \brief workspace memory pools of the module * * Type: WorkspaceMemoryPools * * \sa tvm::WorkspaceMemoryPools */ constexpr const char* kWorkspaceMemoryPools = "workspace_memory_pools"; /*! * \brief constant memory pools of the module * * Type: ConstantMemoryPools * * \sa tvm::ConstantMemoryPools */ constexpr const char* kConstantMemoryPools = "constant_memory_pools"; /* * \brief All the runtime::NDArrays extracted from PrimFunc tir::AllocateConst nodes. The * node will record the index into this array. See also kConstNameToConstant below, which is * the analog for Realy Functions. * * Type: Array<runtime::NDArray> */ constexpr const char* kConstants = "constants"; /*! * \brief All the runtime::Modules accumulated during compilation by external codegen. These * modules must be either directly linked or captured in the final compilation artifact. * * Type: Array<runtime::Module> */ constexpr const char* kExternalMods = "external_mods"; /*! * \brief All the named runtime::NDArrays accumulated during compilation by external codegen. * Generally the associated runtime::Module will indicate it requires bindings for these names, * and during module initialization these bindings will be recovered from a ConstLoaderModule. * See also kConstantsArray above, which is the analog for PrimFuncs. * * Type: Map<String, runtime::NDArray> */ constexpr const char* kConstNameToConstant = "const_name_to_constant"; } // namespace attr } // namespace tvm #endif // TVM_IR_MODULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/name_supply.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/name_supply.h * \brief NameSupply that can be used to generate unique variable names. */ #ifndef TVM_IR_NAME_SUPPLY_H_ #define TVM_IR_NAME_SUPPLY_H_ #include <string> #include <unordered_map> #include <utility> #include "tvm/ir/expr.h" namespace tvm { /*! * \brief NameSupply can be used to generate unique names. */ class NameSupplyNode : public Object { public: /*! * \brief Empty constructor. Needed by the TVM_REGISTER_NODE_TYPE macro. */ NameSupplyNode() = default; /*! * \brief Constructor. * \param prefix The prefix to be used with this NameSupply. * \param name_map The map used to guarantee uniqueness. */ NameSupplyNode(const String& prefix, std::unordered_map<std::string, int> name_map) : prefix_(prefix), name_map(std::move(name_map)) {} /*! * \brief Generates a unique name from this NameSupply. * \param name The name from which the generated name is derived. * \param add_prefix If set to true, then the prefix of this NameSupply will be prepended to the * name. \return A unique name. */ String FreshName(const String& name, bool add_prefix = true); /*! * \brief Reserves an existing name with this NameSupply. * \param name The name to be reserved. * \param add_prefix If set to true, then the prefix of this NameSupply will be prepended to the * name before reserving it. \return The name that was reserved with the NameSupply. It can be * different if a prefix is added. */ String ReserveName(const String& name, bool add_prefix = true); /*! * \brief Checks if this NameSupply already generated a name. * \param name The name to check. * \param add_prefix If set to true, then the prefix of this NameSupply will be prepended to the * name before checking for it. \return True if the name has already been generated. False * otherwise. */ bool ContainsName(const String& name, bool add_prefix = true); void VisitAttrs(AttrVisitor* v) {} // Prefix for all GlobalVar names. It can be empty. std::string prefix_; static constexpr const char* _type_key = "NameSupply"; static constexpr const bool _type_has_method_sequal_reduce = false; static constexpr const bool _type_has_method_shash_reduce = false; TVM_DECLARE_FINAL_OBJECT_INFO(NameSupplyNode, Object); private: /*! \brief Helper function to add the NameSupply prefix to the name. */ String add_prefix_to_name(const String& name); /*! * \brief Function that will generate a unique name. * \param name The name to be used as a base. * \return A unique name. */ std::string GetUniqueName(std::string name); /*! \brief A map that is used to generate unique names. */ std::unordered_map<std::string, int> name_map; }; /*! * \brief Managed reference class to NameSupplyNode. * \sa NameSupplyNode */ class NameSupply : public ObjectRef { public: /*! * \brief Constructor. * \param prefix The prefix to be used with this NameSupply. * \param name_map An optional map. */ TVM_DLL explicit NameSupply(const String& prefix, std::unordered_map<std::string, int> name_map = {}); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(NameSupply, ObjectRef, NameSupplyNode); }; } // namespace tvm #endif // TVM_IR_NAME_SUPPLY_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/op.h * \brief Primitive operators(builtin intrinsics) * and registry for them. */ #ifndef TVM_IR_OP_H_ #define TVM_IR_OP_H_ #include <dmlc/registry.h> #include <tvm/ir/attrs.h> #include <tvm/ir/expr.h> #include <tvm/ir/type.h> #include <tvm/ir/type_relation.h> #include <tvm/node/attr_registry_map.h> #include <tvm/runtime/registry.h> #include <string> #include <utility> #include <vector> namespace tvm { // forward declare name. template <typename> class OpAttrMap; // TODO(tvm-team): migrate low-level intrinsics to use Op /*! * \brief Primitive Op(builtin intrinsics) * * This data structure stores the meta-data * about primitive operators that can be invoked via Call. * * Low-level IR intrinsics(such as libc.expf) are also * implemented via Op. * * \sa Op */ class OpNode : public RelayExprNode { public: /*! \brief name of the operator */ String name; /*! \brief the type of the operator */ mutable FuncType op_type; /*! * \brief detailed description of the operator * This can be used to generate docstring automatically for the operator. */ String description; /* \brief Information of input arguments to the operator */ Array<AttrFieldInfo> arguments; /*! * \brief The type key of the attribute field * This can be empty, in which case it defaults to anything. */ String attrs_type_key; /*! * \brief attribute type index, * this field varies in each run and is not exposed to frontend. */ uint32_t attrs_type_index{0}; /*! * \brief number of input arguments to the operator, * -1 means it is variable length */ int32_t num_inputs = -1; /*! * \brief support level of the operator, * The lower the more priority it contains. * This is in analogies to BLAS levels. */ int32_t support_level = 10; void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("op_type", &op_type); v->Visit("description", &description); v->Visit("arguments", &arguments); v->Visit("attrs_type_key", &attrs_type_key); v->Visit("num_inputs", &num_inputs); v->Visit("support_level", &support_level); } bool SEqualReduce(const OpNode* other, SEqualReducer equal) const { // pointer equality is fine as there is only one op with the same name. return this == other; } void SHashReduce(SHashReducer hash_reduce) const { // Name uniquely identifies an Op. hash_reduce(name); } /*! * \brief Check that if current op is a "primtive operator". * That is the arguments are all type variables, and there is a single * type relation applied to the input and output types. */ bool IsPrimitiveOp() const { if (is_primitive_ != -1) return is_primitive_ != 0; is_primitive_ = this->IsPrimitiveOp_() ? 1 : 0; return is_primitive_ != 0; } static constexpr const char* _type_key = "Op"; TVM_DECLARE_FINAL_OBJECT_INFO(OpNode, RelayExprNode); private: /*! \return the internal attr registry index. */ uint32_t AttrRegistryIndex() const { return index_; } /*! \brief repr to be printed in registry*/ std::string AttrRegistryName() const { return name; } // friend class template <typename> friend class AttrRegistryMapContainerMap; template <typename, typename> friend class AttrRegistry; friend class OpRegEntry; friend bool IsPrimitiveOp(const RelayExpr&); // Program internal unique index of operator. // Used to help index the program. uint32_t index_{0}; // whether this is a primitive op. -1 means unknown. mutable int is_primitive_{-1}; // Internal function to compute if it is primitive op bool IsPrimitiveOp_() const { const auto& fn_ty = this->op_type; ICHECK(fn_ty.get() != nullptr) << "op_type of " << this->name << " is not registered"; if (fn_ty->type_constraints.size() != 1) return false; const TypeRelationNode* rel = fn_ty->type_constraints[0].as<TypeRelationNode>(); if (rel == nullptr) return false; // validate if the type parameter matches up for (size_t i = 0; i < fn_ty->type_params.size(); ++i) { if (!fn_ty->type_params[i].same_as(rel->args[i])) return false; } return true; } }; /*! * \brief Managed reference class to OpNode. * \sa OpNode */ class Op : public RelayExpr { public: /*! * \brief Get additional registered attribute about operators. * If nothing has been registered, an empty OpAttrMap will be returned. * \param attr_name The name of the attribute. * \return An OpAttrMap of specified attr_name. * \tparam ValueType The type of the attribute. */ template <typename ValueType> inline static OpAttrMap<ValueType> GetAttrMap(const String& attr_name); /*! * \brief Checks if an attr map is present in the registry. * \param attr_name The name of the attribute. * \return bool True if the attr is present. */ TVM_DLL static bool HasAttrMap(const String& attr_name); /*! * \brief Get an Op for a given operator name. * Will raise an error if the op has not been registered. * \param op_name Name of the operator. * \return Pointer to a Op, valid throughout program lifetime. */ TVM_DLL static const Op& Get(const String& op_name); TVM_DEFINE_OBJECT_REF_METHODS(Op, RelayExpr, OpNode) private: /*! * \brief Get generic attrmap given attr name * \param key The attribute key * \return The attr map. */ TVM_DLL static const AttrRegistryMapContainerMap<Op>& GetAttrMapContainer(const String& key); }; /*! * \brief Helper structure to register operators * \sa TVM_REGISTER_OP */ class OpRegEntry { public: /*! \return the operator */ const Op& op() const { return op_; } /*! * \brief setter function during registration * Set the description of operator * \param descr the description string. * \return reference to self. */ inline OpRegEntry& describe(const std::string& descr); // NOLINT(*) /*! * \brief Add argument information to the function. * \param name Name of the argument. * \param type Type of the argument. * \param description Description of the argument. * \return reference to self. */ inline OpRegEntry& add_argument(const std::string& name, const std::string& type, const std::string& description); /*! * \brief Attach the type function corresponding to the return type. * \param rel_name The type relation name to register. * \param type_rel_func The backing relation function which can solve an arbitrary * relation on variables. * \return reference to self. */ inline OpRegEntry& add_type_rel( const std::string& rel_name, runtime::TypedPackedFunc<bool(const Array<Type>&, int, const Attrs&, const TypeReporter&)> type_rel_func); /*! * \brief Set the attrs type key and index to be AttrsType. * \tparam AttrsType the attribute type to b set. * \return reference to self. */ template <typename AttrsType> inline OpRegEntry& set_attrs_type(); /*! * \brief Set the attrs type key and index to be AttrsType. * \param key The attribute type key to be set. * \return reference to self. */ inline OpRegEntry& set_attrs_type_key(const String& key); /*! * \brief Set the num_inputs * \param n The number of inputs to be set. * \return reference to self. */ inline OpRegEntry& set_num_inputs(int32_t n); // NOLINT(*) /*! * \brief Set the support level of op. * \param level The support level. * \return reference to self. */ inline OpRegEntry& set_support_level(int32_t level); // NOLINT(*) /*! * \brief Register additional attributes to operator. * \param attr_name The name of the attribute. * \param value The value to be set. * \param plevel The priority level of this set, * an higher priority level attribute * will replace lower priority level attribute. * Must be bigger than 0. * * Cannot set with same plevel twice in the code. * * \tparam ValueType The type of the value to be set. */ template <typename ValueType> inline OpRegEntry& set_attr(const std::string& attr_name, // NOLINT(*) const ValueType& value, int plevel = 10); /*! * \brief Resets an attr of the registry. * \param attr_name The name of the attribute. */ inline void reset_attr(const std::string& attr_name); // set the name of the op to be the same as registry inline OpRegEntry& set_name() { // NOLINT(*) if (get()->name.length() == 0) { get()->name = name; } return *this; } /*! * \brief Register or get a new entry. * \param name The name of the operator. * \return the corresponding entry. */ TVM_DLL static OpRegEntry& RegisterOrGet(const String& name); private: template <typename, typename> friend class AttrRegistry; // the name std::string name; /*! \brief The operator */ Op op_; // private constructor TVM_DLL OpRegEntry(uint32_t reg_index); // return internal pointer to op. inline OpNode* get(); // update the attribute OpAttrMap TVM_DLL void UpdateAttr(const String& key, runtime::TVMRetValue value, int plevel); }; /*! * \brief Map<Op,ValueType> used to store meta-information about Op. * \tparam ValueType The type of the value stored in map. */ template <typename ValueType> class OpAttrMap : public AttrRegistryMap<Op, ValueType> { public: /*! * \brief get the corresponding value element at op with default value. * \param expr The key to the map * \param def_value The default value when the key does not exist * or if expr is not an Op. * \return the const reference to the content value. */ inline ValueType get(const RelayExpr& expr, ValueType def_value) const; using TParent = AttrRegistryMap<Op, ValueType>; using TParent::count; using TParent::get; using TParent::operator[]; private: friend class Op; // constructor explicit OpAttrMap(const AttrRegistryMapContainerMap<Op>& map) : TParent(map) {} }; // internal macros to make #define TVM_OP_REGISTER_VAR_DEF static DMLC_ATTRIBUTE_UNUSED ::tvm::OpRegEntry& __make_##Op /*! * \def TVM_REGISTER_OP * \brief Register a new operator, or set attribute of the corresponding op. * * \param OpName The name of registry * * \code * * TVM_REGISTER_OP("add") * .describe("add two inputs together") * .set_num_inputs(2) * .set_attr<OpKernel>("gpu_kernel", AddKernel); * * \endcode */ #define TVM_REGISTER_OP(OpName) \ TVM_STR_CONCAT(TVM_OP_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::OpRegEntry::RegisterOrGet(OpName).set_name() // implementations template <typename ValueType> inline OpAttrMap<ValueType> Op::GetAttrMap(const String& key) { return OpAttrMap<ValueType>(Op::GetAttrMapContainer(key)); } inline OpNode* OpRegEntry::get() { return const_cast<OpNode*>(op_.operator->()); } inline OpRegEntry& OpRegEntry::describe(const std::string& descr) { // NOLINT(*) get()->description = descr; return *this; } inline OpRegEntry& OpRegEntry::add_argument(const std::string& name, const std::string& type, const std::string& description) { auto n = make_object<AttrFieldInfoNode>(); n->name = name; n->type_info = type; n->description = description; get()->arguments.push_back(AttrFieldInfo(n)); return *this; } inline OpRegEntry& OpRegEntry::add_type_rel( const std::string& rel_name, runtime::TypedPackedFunc<bool(const Array<Type>&, int, const Attrs&, const TypeReporter&)> type_rel_func) { auto func_name = std::string("tvm.relay.type_relation.") + rel_name; TypeRelationFn env_type_rel_func; if (runtime::Registry::Get(func_name)) { auto env_func = EnvFunc::Get(func_name); env_type_rel_func = env_func; } else { runtime::Registry::Register(func_name).set_body(type_rel_func.packed()); auto env_func = EnvFunc::Get(func_name); env_type_rel_func = env_func; } Array<TypeVar> type_params; Array<Type> arg_types; // Add inputs. std::string input_name_prefix = "in"; for (int i = 0; i < get()->num_inputs; i++) { auto name = input_name_prefix + std::to_string(i); auto param = TypeVar(name, TypeKind::kType); type_params.push_back(param); arg_types.push_back(param); } Array<Type> ty_call_args = arg_types; // Add output type. auto out_param = TypeVar("out", TypeKind::kType); type_params.push_back(out_param); // this will trigger copy on write. ty_call_args.push_back(out_param); // The attributes of primitive op is nullptr // // The attributes of primitive operator can vary at the call site. // The type of sum is also dependent on Attrs being passed. // So puting nullptr in the Attrs means that the operator is polymorphic on Attrs. // // A common example is sum(x, axis), where the choice of axis // can affect the type of the function. TypeConstraint type_rel = TypeRelation(env_type_rel_func, ty_call_args, arg_types.size(), Attrs()); auto func_type = FuncType(arg_types, out_param, type_params, {type_rel}); get()->op_type = func_type; return *this; } inline OpRegEntry& OpRegEntry::set_num_inputs(int32_t n) { // NOLINT(*) get()->num_inputs = n; return *this; } template <typename AttrsType> inline OpRegEntry& OpRegEntry::set_attrs_type() { // NOLINT(*) get()->attrs_type_key = AttrsType::_type_key; get()->attrs_type_index = AttrsType::RuntimeTypeIndex(); return *this; } inline OpRegEntry& OpRegEntry::set_attrs_type_key(const String& key) { // NOLINT(*) get()->attrs_type_key = key; get()->attrs_type_index = Object::TypeKey2Index(key); return *this; } inline OpRegEntry& OpRegEntry::set_support_level(int32_t n) { // NOLINT(*) get()->support_level = n; return *this; } template <typename ValueType> inline OpRegEntry& OpRegEntry::set_attr( // NOLINT(*) const std::string& attr_name, const ValueType& value, int plevel) { ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0"; runtime::TVMRetValue rv; rv = value; UpdateAttr(attr_name, rv, plevel); return *this; } // member functions of OpAttrMap template <typename ValueType> inline ValueType OpAttrMap<ValueType>::get(const RelayExpr& expr, ValueType def_value) const { ICHECK(expr.defined()); if (const OpNode* op = expr.as<OpNode>()) { return this->map_.get(GetRef<Op>(op), def_value); } else { return def_value; } } /*! * \brief Check that an expression is a "primitive operator". * * Will return true if the expression is an operator which * matches the form of primitive operators registered directly * by the Relay codebase. * * That is the arguments are all type variables, and there is a single * type relation applied to the input and output types. * * \param expr An expression. * \return Whether the expression is primitive op. */ inline bool IsPrimitiveOp(const RelayExpr& expr) { const auto* op = expr.as<OpNode>(); return op != nullptr && op->IsPrimitiveOp(); } } // namespace tvm #endif // TVM_IR_OP_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/span.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/span.h * \brief Span information for debugging purposes. */ #ifndef TVM_IR_SPAN_H_ #define TVM_IR_SPAN_H_ #include <tvm/node/node.h> #include <tvm/runtime/object.h> #include <string> namespace tvm { /*! * \brief The source name in the Span * \sa SourceNameNode, Span */ class SourceName; /*! * \brief The name of a source fragment. */ class SourceNameNode : public Object { public: /*! \brief The source name. */ String name; // override attr visitor void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); } static constexpr bool _type_has_method_sequal_reduce = true; bool SEqualReduce(const SourceNameNode* other, SEqualReducer equal) const { return equal(name, other->name); } static constexpr const char* _type_key = "SourceName"; TVM_DECLARE_FINAL_OBJECT_INFO(SourceNameNode, Object); }; /*! * \brief The source name of a file span. * \sa SourceNameNode, Span */ class SourceName : public ObjectRef { public: /*! * \brief Get an SourceName for a given operator name. * Will raise an error if the source name has not been registered. * \param name Name of the operator. * \return SourceName valid throughout program lifetime. */ TVM_DLL static SourceName Get(const String& name); TVM_DEFINE_OBJECT_REF_METHODS(SourceName, ObjectRef, SourceNameNode); }; /*! * \brief Span information for debugging purposes */ class Span; /*! * \brief Stores locations in frontend source that generated a node. */ class SpanNode : public Object { public: /*! \brief The source name. */ SourceName source_name; /*! \brief The line number. */ int line; /*! \brief The column offset. */ int column; /*! \brief The end line number. */ int end_line; /*! \brief The end column number. */ int end_column; // override attr visitor void VisitAttrs(AttrVisitor* v) { v->Visit("source_name", &source_name); v->Visit("line", &line); v->Visit("column", &column); v->Visit("end_line", &end_line); v->Visit("end_column", &end_column); } static constexpr bool _type_has_method_sequal_reduce = true; bool SEqualReduce(const SpanNode* other, SEqualReducer equal) const { return equal(source_name, other->source_name) && equal(line, other->line) && equal(column, other->column) && equal(end_line, other->end_line) && equal(end_column, other->end_column); } static constexpr const char* _type_key = "Span"; TVM_DECLARE_FINAL_OBJECT_INFO(SpanNode, Object); }; class Span : public ObjectRef { public: TVM_DLL Span(SourceName source_name, int line, int end_line, int column, int end_column); /*! \brief Merge two spans into one which captures the combined regions. */ TVM_DLL Span Merge(const Span& other) const; TVM_DEFINE_OBJECT_REF_METHODS(Span, ObjectRef, SpanNode); }; } // namespace tvm #endif // TVM_IR_SPAN_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/tensor_type.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/tensor_type.h * \brief Polymorphic tensor types. */ #ifndef TVM_IR_TENSOR_TYPE_H_ #define TVM_IR_TENSOR_TYPE_H_ #include <tvm/ir/expr.h> #include <tvm/ir/type.h> namespace tvm { /*! * \brief Base of all Tensor types * This container can hold TensorType or GenericTensorType. * \sa BaseTensorType, TensorTypeNode */ class BaseTensorTypeNode : public TypeNode { public: static constexpr const char* _type_key = "relay.BaseTensorType"; static constexpr const uint32_t _type_child_slots = 1; TVM_DECLARE_BASE_OBJECT_INFO(BaseTensorTypeNode, TypeNode); }; /*! * \brief Managed reference to BaseTensorTypeNode. * \sa BaseTensorTypeNode. */ class BaseTensorType : public Type { public: TVM_DEFINE_OBJECT_REF_METHODS(BaseTensorType, Type, BaseTensorTypeNode); }; /*! * \brief This is the most commonly used type in relay. * TensorType have a fixed dimension, data type. * * The elements of shape can be either IntImm(constant integer), * or any symbolic integer expression. * The symbolic integer allows generic shape inference in certain cases. * \sa TensorType */ class TensorTypeNode : public BaseTensorTypeNode { public: /*! * \brief The shape of the tensor, * represented by PrimExpr(tvm::Expr). */ Array<PrimExpr> shape; /*! \brief The content data type */ DataType dtype; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("shape", &shape); v->Visit("dtype", &dtype); v->Visit("span", &span); } bool SEqualReduce(const TensorTypeNode* other, SEqualReducer equal) const { return equal(shape, other->shape) && equal(dtype, other->dtype); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(shape); hash_reduce(dtype); } /*! \brief Return product of elements in the shape. * \return (d1 * d_2 ... * d_n) if shape is (d_1, d_2, ..., d_n) and 1 if shape size is zero. */ TVM_DLL PrimExpr Size() const; static constexpr const char* _type_key = "relay.TensorType"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorTypeNode, BaseTensorTypeNode); }; /*! * \brief Managed reference to TensorTypeNode. * \sa TensorTypeNode. */ class TensorType : public Type { public: /*! * \brief Constructor. * \param shape The shape of the tensor. * \param dtype The runtime dtype of the tensor's elements. */ TVM_DLL TensorType(Array<PrimExpr> shape, DataType dtype); /*! * \brief Construct an scalar containing elements of dtype. * \param dtype The runtime dtype of the tensor's elements. * \return THe constructed type. */ TVM_DLL static TensorType Scalar(DataType dtype); TVM_DEFINE_OBJECT_REF_METHODS(TensorType, Type, TensorTypeNode); }; // The following fields contains advanced typing // Only keep the class name and reserved for future usage. class GenericTensorType; // stores a DataType. class GenericDataType; // stores a DataType. class GenericShape; } // namespace tvm #endif // TVM_IR_TENSOR_TYPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/transform.h * * This file implements a pass manager. The pass manager manages a sequence * of IRModule -> IRModule transformation passes over a particlar unit of AST. The * design is largely inspired from LLVM's pass manager and modern deep learning * frameworks that perform tensor->tensor transformations. * * The responsibilities of a traditional compiler pass manager usually involves: * - Organizing the execution order of optimization passes though not * necessarily in the optimal sequence. * - Collecting required analysis information and keep them up-to-date. * - Reducing the effort required to implement new passes for compiler * developers, etc. * * Similar to LLVM's pass manager, we designed the Relay pass manager to work * different granularity, i.e. module level, function level, and even sequential * passe that contains a host of passes. * * However, we also extend the functionality of the traditional pass manager * with the consideration of requirements/convention from deep learning * frameworks, such as Pytorch and Gluon, etc. Each pass in the Relay pass * manager performs the IRModule -> IRModule transformation. All * different types of passes, including the sequential-level pass object, are * essentially pass objects. This design, therefore, effectively provides users * a consistent and convenient interface, i.e. Pass, to play with. It offers a * means to ease the development and testing of Relay passes. For example, with * the pass manager, external users will be able to have custom passes correctly * scheduled without having to modify a single handcrafted pass order. * * In the future we need to describe constraints between passes. For example, * we may want to preserve dependencies between different passes and validate * them on the completion of a certain pass. * * We also need to store side information and import the error reporting system. */ #ifndef TVM_IR_TRANSFORM_H_ #define TVM_IR_TRANSFORM_H_ #include <tvm/ir/diagnostic.h> #include <tvm/ir/error.h> #include <tvm/ir/instrument.h> #include <tvm/ir/module.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/support/with.h> #include <string> #include <utility> namespace tvm { namespace transform { /*! * \brief PassContextNode contains the information that a pass can rely on, * such as analysis results. * \sa PassContext */ class PassContextNode : public Object { public: /*! \brief The default optimization level. */ int opt_level{2}; /*! \brief The list of required passes. */ Array<String> required_pass; /*! \brief The list of disabled passes. */ Array<String> disabled_pass; /*! \brief The diagnostic context. */ mutable Optional<DiagnosticContext> diag_ctx; /*! \brief Pass specific configurations. */ Map<String, ObjectRef> config; /*! \brief A list of pass instrument implementations. */ Array<instrument::PassInstrument> instruments; PassContextNode() = default; /*! * \brief Get a config value from the pass context. * * \param key The config key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TOBjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef. */ template <typename TObjectRef> Optional<TObjectRef> GetConfig(const std::string& key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { static_assert(std::is_base_of<ObjectRef, TObjectRef>::value, "Can only call GetAttr with ObjectRef types."); if (!config.defined()) return default_value; auto it = config.find(key); if (it != config.end()) { return Downcast<Optional<TObjectRef>>((*it).second); } else { return default_value; } } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetConfig(const std::string& key, TObjectRef default_value) const { return GetConfig<TObjectRef>(key, Optional<TObjectRef>(default_value)); } void VisitAttrs(AttrVisitor* v) { v->Visit("opt_level", &opt_level); v->Visit("required_pass", &required_pass); v->Visit("disabled_pass", &disabled_pass); v->Visit("instruments", &instruments); v->Visit("config", &config); v->Visit("diag_ctx", &diag_ctx); } static constexpr const char* _type_key = "transform.PassContext"; static constexpr bool _type_has_method_sequal_reduce = false; TVM_DECLARE_FINAL_OBJECT_INFO(PassContextNode, Object); }; /*! * \brief PassContext that is used to configure the pass behavior. * * \code * * auto new_ctx = PassContext::Create(); * ctx->opt_level = 2; * With<PassContext> scope(ctx); * // pass context in effect. * * \endcode * \sa PassContextNode */ class PassContext : public ObjectRef { public: PassContext() {} explicit PassContext(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief const accessor. * \return const access pointer. */ const PassContextNode* operator->() const { ICHECK(get() != nullptr); return static_cast<const PassContextNode*>(get()); } /*! * \brief mutable accessor. * \return mutable access pointer. */ PassContextNode* operator->() { ICHECK(get() != nullptr); return static_cast<PassContextNode*>(get_mutable()); } /*! * \brief Construct a PassContext containing the default configurations. * \return The new PassContext. */ TVM_DLL static PassContext Create(); /*! * \brief Get the default pass context in the current scope. * \return The pass context. */ TVM_DLL static PassContext Current(); /*! * \brief Get all supported configuration names and metadata, registered within the PassContext. * \return Map indexed by the config name, pointing to the metadata map as key-value */ TVM_DLL static Map<String, Map<String, String>> ListConfigs(); /*! * \brief Call instrument implementations' callbacks when entering PassContext. * The callbacks are called in order, and if one raises an exception, the rest will not be * called. */ TVM_DLL void InstrumentEnterPassContext(); /*! * \brief Call instrument implementations' callbacks when exiting PassContext. * The callbacks are called in order, and if one raises an exception, the rest will not be * called. */ TVM_DLL void InstrumentExitPassContext(); /*! * \brief Call instrument implementations' callbacks before a pass run. * The callbacks are called in order, and if one raises an exception, the rest will not be * called. * * \param mod The module that an optimization pass runs on. * \param info The pass information. * * \return false: the pass is skipped; true: the pass runs. */ TVM_DLL bool InstrumentBeforePass(const IRModule& mod, const PassInfo& info) const; /*! * \brief Call instrument implementations callbacks after a pass run. * The callbacks are called in order, and if one raises an exception, the rest will not be * called. * * \param mod The module that an optimization pass runs on. * \param info The pass information. */ TVM_DLL void InstrumentAfterPass(const IRModule& mod, const PassInfo& info) const; /*! * \brief Check whether a pass is enabled. * \param info The pass information. * \return true if the pass is enabled. Otherwise, false. */ TVM_DLL bool PassEnabled(const PassInfo& info) const; /*! * \brief Register a valid configuration option and its ValueType for validation. * * \param key The configuration key. * \tparam ValueType The value type to be registered */ template <typename ValueType> static uint32_t RegisterConfigOption(const char* key) { using ValueNodeType = typename ValueType::ContainerType; // NOTE: we could further update the function later. uint32_t tindex = ValueNodeType::_GetOrAllocRuntimeTypeIndex(); RegisterConfigOption(key, tindex); return tindex; } // accessor. using ContainerType = PassContextNode; class Internal; private: // The entry of a pass context scope. TVM_DLL void EnterWithScope(); // The exit of a pass context scope. TVM_DLL void ExitWithScope(); // Register configuration key value type. TVM_DLL static void RegisterConfigOption(const char* key, uint32_t value_type_index); // Classes to get the Python `with` like syntax. friend class Internal; friend class With<PassContext>; }; #define TVM_PASS_CTX_CONFIG_VAR_DEF static TVM_ATTRIBUTE_UNUSED uint32_t __make_PassContext_tid /*! * \brief Helper macro to register the object type to runtime. * Makes sure that the runtime type table is correctly populated. * * Use this macro in the cc file for each terminal class. */ #define TVM_REGISTER_PASS_CONFIG_OPTION(Key, ValueType) \ TVM_STR_CONCAT(TVM_PASS_CTX_CONFIG_VAR_DEF, __COUNTER__) = \ ::tvm::transform::PassContext::RegisterConfigOption<ValueType>(Key) /*! * \brief Meta data that will be used to help optimization and analysis. * \sa PassInfo */ class PassInfoNode : public Object { public: /*! \brief The minimal optimization level that this pass will be enabled. */ int opt_level; /*! \brief The name of an optimization/analysis pass. */ String name; /*! \brief The passes that are required to perform the current pass. */ Array<String> required; PassInfoNode() = default; void VisitAttrs(AttrVisitor* v) { v->Visit("opt_level", &opt_level); v->Visit("name", &name); v->Visit("required", &required); } static constexpr const char* _type_key = "transform.PassInfo"; static constexpr bool _type_has_method_sequal_reduce = false; TVM_DECLARE_FINAL_OBJECT_INFO(PassInfoNode, Object); }; /*! * \brief Managed reference class for PassInfoNode * \sa PassInfoNode */ class PassInfo : public ObjectRef { public: /*! * \brief Constructor * \param opt_level The optimization level * \param name Name of the pass. * \param required The passes that are required to perform the current pass. */ TVM_DLL PassInfo(int opt_level, String name, Array<runtime::String> required); TVM_DEFINE_OBJECT_REF_METHODS(PassInfo, ObjectRef, PassInfoNode); }; /*! * \brief PassNode is the base type of differnt types of optimization passes. * It is designed as a pure class and implemented by different pass subclasses * at different granularity of Relay nodes. */ class PassNode : public Object { public: virtual ~PassNode() {} /*! * \brief Get the pass information/meta data. */ virtual PassInfo Info() const = 0; /*! * \brief Transform mod using the default PassContext in the current scope. * * \param mod The module that an optimization pass runs on. * * \return The transformed module. */ IRModule operator()(IRModule mod) const { return this->operator()(std::move(mod), PassContext::Current()); } /*! * \brief Transform mod using a functor under a given pass context. * * \param mod The module that an optimization pass runs on. * \param pass_ctx The pass context that can provide information for the optimization. * * \return The transformed module. */ virtual IRModule operator()(IRModule mod, const PassContext& pass_ctx) const = 0; void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "transform.Pass"; TVM_DECLARE_BASE_OBJECT_INFO(PassNode, Object); }; class Pass : public ObjectRef { public: /*! * \brief Transform mod using the default PassContext in the current scope. * * \code * * // If you do no longer need the input module * // it is recommended to use std::move to move your input module. * mod = pass(std::move(mod)); * * \endcode * * \param mod The module that an optimization pass runs on. * * \return The transformed module. */ IRModule operator()(IRModule mod) const; /*! * \brief Transform mod using a functor under a given pass context. * * \param mod The module that an optimization pass runs on. * \param pass_ctx The pass context that can provide information for the optimization. * * \return The transformed module. */ IRModule operator()(IRModule mod, const PassContext& pass_ctx) const; TVM_DEFINE_OBJECT_REF_METHODS(Pass, ObjectRef, PassNode); private: IRModule static AssertImmutableModule(const IRModule& mod, const PassNode* node, const PassContext& pass_ctx); }; /*! * \brief The SequentialNode contains a set of passes that transform Relay * programs from one AST to another semantically equivalent one. * * One example of this level of pass is that the pass manager needs to correctly * perform a host of optimizations with a given optimization level and disabled * passes. */ class SequentialNode : public PassNode { public: /* \brief The pass meta data.*/ PassInfo pass_info; /*! \brief A list of passes that used to compose a sequential pass. */ tvm::Array<Pass> passes; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pass_info", &pass_info); v->Visit("passes", &passes); } /*! * \brief Get the pass information/meta data. */ PassInfo Info() const override { return pass_info; } /*! * \brief Resolve the pass dependency. It globs all required passes by * a given pass and executes them. * * \param mod The module that an optimization pass runs on. * * \return The updated module after resolving pass dependencies. * * TODO(zhiics) Build a dependency graph among the passes using provided * metadata, i.e. required_passes. Likely, we can have a data structure, i.e. * PassInfo, to store the relevant information including the parent passes. */ void ResolveDependency(const IRModule& mod); /*! * \brief Perform optimizations on a series of passes. The aforementioned * typical pass manager jobs could be done by it. This function could * be overloaded to focus on different metrics, i.e. performance, * memory footprint, etc. * * \param mod The module that these passes are applied on. * \param pass_ctx The context that these passes execute on. * * \return Return the updated module. */ IRModule operator()(IRModule mod, const PassContext& pass_ctx) const final; static constexpr const char* _type_key = "transform.Sequential"; TVM_DECLARE_FINAL_OBJECT_INFO(SequentialNode, PassNode); }; class Sequential : public Pass { public: /*! * \brief The constructor of `Sequential`. * * \param passes The passes to apply. * \param pass_info The pass metadata. */ TVM_DLL Sequential(Array<Pass> passes, PassInfo pass_info); /*! * \brief The constructor of `Sequential`. * * \param passes The passes to apply. * \param name The name of a sequential pass. It's defaulted to "sequential". * This allows users to only provide a list of passes and execute them * under a given context. */ TVM_DLL Sequential(Array<Pass> passes, String name = "sequential"); Sequential() = default; explicit Sequential(ObjectPtr<Object> n) : Pass(n) {} const SequentialNode* operator->() const; using ContainerType = SequentialNode; }; /* * \brief Create a module pass. * * \param pass_func The packed function that contains the optimization. * \param opt_level The optimization level of the module pass. * \param name The name of the module pass. * \param required The list of the passes that the module pass is dependent on. * * \return The created module pass. */ TVM_DLL Pass CreateModulePass(const runtime::TypedPackedFunc<IRModule(IRModule, PassContext)>& pass_func, int opt_level, String name, Array<runtime::String> required); /*! * \brief A special trace pass that prints the header and IR to LOG(INFO). * \param header The header to be attached to the output. * \param show_meta_data Whether should we show meta data. * \return The pass. */ TVM_DLL Pass PrintIR(String header = "", bool show_meta_data = false); } // namespace transform } // namespace tvm #endif // TVM_IR_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/type.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/type.h * \brief IR/AST nodes for the unified type system in TVM. * * We use Relay's type system as the unified type system * throughout the stack. * * This file contains types that are common across IR variants. * * ## Relation between Type and runtime::DataType * * Besides Type, we also store a dtype field in the low-level PrimExpr. * runtime::DataType(dtype) provides coarse grained type information * during compile time and runtime. It is eagerly built in * low-level expression construction and can be used for * quick type checking in the low-level IR. * For example, when an Expr's dtype is int32, * we know for sure that its type is also int32. * * On the other hand, Type provides more fine grained information. * For example, a low level expression can have DataType::Handle() as * its dtype and MemRef[float32] as its type. * Types are usually lazily constructed via type checking, * so they may not readily be available during IR construction. * * The unified Type serves as a common bridge across IR dialects. * For example, we require all the functions to have a type signature, * which allow us to build cross dialect function calls. */ #ifndef TVM_IR_TYPE_H_ #define TVM_IR_TYPE_H_ #include <tvm/ir/span.h> #include <tvm/node/node.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/object.h> #include <string> namespace tvm { /*! * \brief Type is the base type of all types. * * Relay's type system contains following subclasses: * * - PrimType: type of primitive type values used in the low-level IR. * - FuncType: type of a function. * - TensorType: type of certain Tensor values in the expression. * * There are also advanced types to support generic(polymorphic types). * \sa Type */ class TypeNode : public Object { public: /*! * \brief Span that points to the original source code. * Reserved debug information. */ mutable Span span; static constexpr const char* _type_key = "Type"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; static constexpr const uint32_t _type_child_slots = 14; TVM_DECLARE_BASE_OBJECT_INFO(TypeNode, Object); }; /*! * \brief Managed reference to TypeNode. * \sa TypeNode */ class Type : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(Type, ObjectRef, TypeNode); }; /*! * \brief Primitive data types used in the low-level IR. * * PrimType represents POD-values and handles that are * not automatically managed by the runtime. * * \sa PrimType */ class PrimTypeNode : public TypeNode { public: /*! * \brief The corresponding dtype field. */ runtime::DataType dtype; void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); } bool SEqualReduce(const PrimTypeNode* other, SEqualReducer equal) const { return equal(dtype, other->dtype); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(dtype); } static constexpr const char* _type_key = "PrimType"; TVM_DECLARE_FINAL_OBJECT_INFO(PrimTypeNode, TypeNode); }; /* * \brief Managed reference to PrimTypeNode. * \sa PrimTypeNode */ class PrimType : public Type { public: /*! * \brief Constructor * \param dtype The corresponding dtype. */ TVM_DLL explicit PrimType(runtime::DataType dtype); TVM_DEFINE_OBJECT_REF_METHODS(PrimType, Type, PrimTypeNode); }; /*! * \brief Low-level raw pointer type. * * PointerType represents type hints in the TIR to be * passed to the final code generator. * * PointerType should not occur in the high-level analysis. * * \sa PointerType */ class PointerTypeNode : public TypeNode { public: /*! * \brief The type of the element which the pointer points to. */ Type element_type; /*! * \brief The storage scope of the pointer */ String storage_scope; void VisitAttrs(AttrVisitor* v) { v->Visit("element_type", &element_type); v->Visit("storage_scope", &storage_scope); } bool SEqualReduce(const PointerTypeNode* other, SEqualReducer equal) const { // Make "global" equal to "" String lhs_scope = storage_scope.empty() ? "global" : storage_scope; String rhs_scope = other->storage_scope.empty() ? "global" : other->storage_scope; return equal(element_type, other->element_type) && equal(lhs_scope, rhs_scope); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(element_type); // Make "global" equal to "" hash_reduce(storage_scope.empty() ? "global" : storage_scope); } static constexpr const char* _type_key = "PointerType"; TVM_DECLARE_FINAL_OBJECT_INFO(PointerTypeNode, TypeNode); }; /* * \brief Managed reference to PointerTypeNode. * \sa PointerTypeNode */ class PointerType : public Type { public: /*! * \brief Constructor * \param element_type The type of the element which the pointer points to. * \param storage_scope The storage scope into which the pointer addresses */ TVM_DLL explicit PointerType(Type element_type, String storage_scope = ""); TVM_DEFINE_OBJECT_REF_METHODS(PointerType, Type, PointerTypeNode); }; /*! \brief Possible kinds of TypeVars. */ enum TypeKind : int { kType = 0, /*! \brief Template variable in shape expression. */ kShapeVar = 1, kBaseType = 2, kConstraint = 4, kAdtHandle = 5, kTypeData = 6 }; /*! * \brief Type parameter in functions. * * A type variable can be viewed as template parameter in c++ template function. * * For example, in the following pesudo code, * the TypeVar of f is TypeVar("n", kind=kShapeVar). * This function can take in a Tensor with shape=(3, 3) and * returns a Tensor with shape=(9,) * * \code * * template<i32 n> * f(x : Tensor[i32, (n, n)]) -> Tensor[i32, (n * n)] * * \endcode * \sa TypeVar, TypeKind */ class TypeVarNode : public TypeNode { public: /*! * \brief The name of the variable, * this only acts as a hint to the user, * and is not used for equality. */ String name_hint; /*! \brief The kind of type parameter */ TypeKind kind; void VisitAttrs(AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("kind", &kind); v->Visit("span", &span); } bool SEqualReduce(const TypeVarNode* other, SEqualReducer equal) const { return equal(kind, other->kind) && equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(kind); hash_reduce.FreeVarHashImpl(this); } static constexpr const char* _type_key = "TypeVar"; TVM_DECLARE_FINAL_OBJECT_INFO(TypeVarNode, TypeNode); }; /*! * \brief Managed reference to TypeVarNode * \sa TypeVarNode */ class TypeVar : public Type { public: /*! * \brief Constructor * \param name_hint The name of the type var. * \param kind The kind of the type var. * \param span The span information. */ TVM_DLL TypeVar(String name_hint, TypeKind kind, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(TypeVar, Type, TypeVarNode); }; /*! * \brief A global type variable that is used for defining new types or type aliases. * \sa GlobalTypeVar */ class GlobalTypeVarNode : public TypeNode { public: /*! * \brief The name of the variable, * this only acts as a hint to the user, * and is not used for equality. */ String name_hint; /*! \brief The kind of type parameter */ TypeKind kind; void VisitAttrs(AttrVisitor* v) { v->Visit("name_hint", &name_hint); v->Visit("kind", &kind); } bool SEqualReduce(const GlobalTypeVarNode* other, SEqualReducer equal) const { // name matters for now in global type var. return equal(name_hint, other->name_hint) && equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name_hint); hash_reduce.FreeVarHashImpl(this); } static constexpr const char* _type_key = "GlobalTypeVar"; TVM_DECLARE_FINAL_OBJECT_INFO(GlobalTypeVarNode, TypeNode); }; /*! * \brief Managed reference to GlobalTypeVarNode * \sa GlobalTypeVarNode */ class GlobalTypeVar : public Type { public: /*! * \brief Constructor * \param name_hint The name of the type var. * \param kind The kind of the type var. * \param span The span of the type. */ TVM_DLL GlobalTypeVar(String name_hint, TypeKind kind, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(GlobalTypeVar, Type, GlobalTypeVarNode); }; /*! * \brief The type of tuple values. * \sa TupleType */ class TupleTypeNode : public TypeNode { public: /*! \brief The type of each field in the tuple. */ Array<Type> fields; TupleTypeNode() {} void VisitAttrs(AttrVisitor* v) { v->Visit("fields", &fields); v->Visit("span", &span); } bool SEqualReduce(const TupleTypeNode* other, SEqualReducer equal) const { return equal(fields, other->fields); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(fields); } static constexpr const char* _type_key = "TupleType"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleTypeNode, TypeNode); }; /*! * \brief Managed reference to TupleTypeNode. * \sa TupleTypeNode. */ class TupleType : public Type { public: /*! * \brief Constructor * \param fields Fields in the tuple. * \param span The span of the type. */ TVM_DLL explicit TupleType(Array<Type> fields, Span span = Span()); /*! * \brief Create an empty tuple type that constains nothing. * \return A empty tuple type. */ TVM_DLL TupleType static Empty(); TVM_DEFINE_OBJECT_REF_METHODS(TupleType, Type, TupleTypeNode); }; /*! * \return a type that represents void. */ inline Type VoidType() { return TupleType::Empty(); } /*! * \brief Check whether the tyep represents void. * \return The check result. */ inline bool IsVoidType(const Type& type) { auto* n = type.as<TupleTypeNode>(); return n && n->fields.size() == 0; } /*! * \brief Potential Constraints in a function. * \sa TypeConstraint */ class TypeConstraintNode : public TypeNode { public: static constexpr const char* _type_key = "TypeConstraint"; static constexpr const uint32_t _type_child_slots = 1; TVM_DECLARE_BASE_OBJECT_INFO(TypeConstraintNode, TypeNode); }; /*! * \brief Managed reference to TypeConstraintNode. * \sa TypeConstraintNode, TypeRelation */ class TypeConstraint : public Type { public: TVM_DEFINE_OBJECT_REF_METHODS(TypeConstraint, Type, TypeConstraintNode); }; /*! * \brief Function type. * * We support polymorphic function type. * This can be roughly viewed as template function in C++. * * \sa FuncType, TypeVar, TypeConstraint */ class FuncTypeNode : public TypeNode { public: /*! \brief type type of arguments */ Array<Type> arg_types; /*! \brief The type of return value. */ Type ret_type; // The following fields are used in polymorphic(template) functions // For normal functions, the following two fields will be empty. /*! \brief The type parameters of the function */ Array<TypeVar> type_params; /*! * \brief potential constraint the type need to obey * \note this field is reserved for further purposes. */ Array<TypeConstraint> type_constraints; void VisitAttrs(AttrVisitor* v) { v->Visit("arg_types", &arg_types); v->Visit("ret_type", &ret_type); v->Visit("type_params", &type_params); v->Visit("type_constraints", &type_constraints); v->Visit("span", &span); } bool SEqualReduce(const FuncTypeNode* other, SEqualReducer equal) const { // type params first as they defines type vars. return equal.DefEqual(type_params, other->type_params) && equal(arg_types, other->arg_types) && equal(ret_type, other->ret_type) && equal(type_constraints, other->type_constraints); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(type_params); hash_reduce(arg_types); hash_reduce(ret_type); hash_reduce(type_constraints); } static constexpr const char* _type_key = "FuncType"; TVM_DECLARE_FINAL_OBJECT_INFO(FuncTypeNode, TypeNode); }; /*! * \brief Managed reference to FuncTypeNode. * \sa FuncTypeNode */ class FuncType : public Type { public: /*! * \brief Constructor * \param arg_types The types of the arguments. * \param ret_type The type of the return value. * \param type_params The type parameters. * \param type_constraints The type constraints. * \param span The span information. * \sa FuncTypeNode for more docs about these fields. */ TVM_DLL FuncType(Array<Type> arg_types, Type ret_type, Array<TypeVar> type_params, Array<TypeConstraint> type_constraints, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(FuncType, Type, FuncTypeNode); }; /*! * \brief Intermediate values that is used to indicate incomplete type * during type inference. * * If we view the type relations as "computational graph of types", * then IncompleteType represents intermediate values of the graph, * TypeVar represents the input to the graph. * * \sa IncompleteType */ class IncompleteTypeNode : public TypeNode { public: /*! \brief kind of the type. */ TypeKind kind; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("kind", &kind); v->Visit("span", &span); } bool SEqualReduce(const IncompleteTypeNode* other, SEqualReducer equal) const { return equal(kind, other->kind) && equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(kind); } static constexpr const char* _type_key = "IncompleteType"; TVM_DECLARE_FINAL_OBJECT_INFO(IncompleteTypeNode, TypeNode); }; /*! * \brief Managed reference to IncompleteTypeNode. * \sa IncompleteTypeNode */ class IncompleteType : public Type { public: /*! * \brief Constructor. * \param kind kind of the type. * \param span The span information. */ TVM_DLL explicit IncompleteType(TypeKind kind, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(IncompleteType, Type, IncompleteTypeNode); }; /*! * \brief Reference Type High-level Relay IR. * * \sa RelayRefType. */ class RelayRefTypeNode : public TypeNode { public: /*! \brief The type of value in the Reference. */ Type value; RelayRefTypeNode() {} void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("value", &value); v->Visit("span", &span); } bool SEqualReduce(const RelayRefTypeNode* other, SEqualReducer equal) const { return equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(value); } // Keep the relay prefix in the type as this type is specific // to the relay itself. static constexpr const char* _type_key = "relay.RefType"; TVM_DECLARE_FINAL_OBJECT_INFO(RelayRefTypeNode, TypeNode); }; /*! * \brief Managed reference to RelayRefTypeNode. * \sa RelayRefTypeNode. */ class RelayRefType : public Type { public: TVM_DLL explicit RelayRefType(Type value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(RelayRefType, Type, RelayRefTypeNode); }; } // namespace tvm #endif // TVM_IR_TYPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/type_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/type_functor.h * \brief A way to defined arbitrary function signature with dispatch on types. */ #ifndef TVM_IR_TYPE_FUNCTOR_H_ #define TVM_IR_TYPE_FUNCTOR_H_ #include <tvm/node/functor.h> #include <tvm/relay/adt.h> #include <tvm/relay/expr.h> #include <string> #include <utility> #include <vector> namespace tvm { template <typename FType> class TypeFunctor; // functions to be overriden. #define TYPE_FUNCTOR_DEFAULT \ { return VisitTypeDefault_(op, std::forward<Args>(args)...); } #define TVM_TYPE_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitType_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class TypeFunctor<R(const Type& n, Args...)> { private: using TSelf = TypeFunctor<R(const Type& n, Args...)>; using FType = tvm::NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~TypeFunctor() {} /*! * \brief Same as call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ R operator()(const Type& n, Args... args) { return VisitType(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitType(const Type& n, Args... args) { ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitType_(const TensorTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TypeVarNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TypeConstraintNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const FuncTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TypeRelationNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TupleTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const IncompleteTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const RelayRefTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const GlobalTypeVarNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TypeCallNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const TypeDataNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const PrimTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitType_(const PointerTypeNode* op, Args... args) TYPE_FUNCTOR_DEFAULT; virtual R VisitTypeDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); throw; // unreachable, written to stop compiler warning } private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch TVM_TYPE_FUNCTOR_DISPATCH(TensorTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(TypeVarNode); TVM_TYPE_FUNCTOR_DISPATCH(TypeConstraintNode); TVM_TYPE_FUNCTOR_DISPATCH(FuncTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(TypeRelationNode); TVM_TYPE_FUNCTOR_DISPATCH(TupleTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(IncompleteTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(RelayRefTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(GlobalTypeVarNode); TVM_TYPE_FUNCTOR_DISPATCH(TypeCallNode); TVM_TYPE_FUNCTOR_DISPATCH(TypeDataNode); TVM_TYPE_FUNCTOR_DISPATCH(PrimTypeNode); TVM_TYPE_FUNCTOR_DISPATCH(PointerTypeNode); return vtable; } }; #undef TVM_TYPE_FUNCTOR_DISPATCH /*! * \brief A type visitor that recursively visit types. */ class TVM_DLL TypeVisitor : public TypeFunctor<void(const Type& n)> { public: void VisitType_(const TypeVarNode* op) override; void VisitType_(const IncompleteTypeNode* op) override; void VisitType_(const TensorTypeNode* op) override; void VisitType_(const FuncTypeNode* op) override; void VisitType_(const TupleTypeNode* op) override; void VisitType_(const TypeRelationNode* op) override; void VisitType_(const RelayRefTypeNode* op) override; void VisitType_(const GlobalTypeVarNode* op) override; void VisitType_(const TypeCallNode* op) override; void VisitType_(const TypeDataNode* op) override; void VisitType_(const PrimTypeNode* op) override; void VisitType_(const PointerTypeNode* op) override; }; /*! * \brief TypeMutator that mutates expressions. */ class TVM_DLL TypeMutator : public TypeFunctor<Type(const Type& n)> { public: Type VisitType(const Type& t) override; Type VisitType_(const TypeVarNode* op) override; Type VisitType_(const TensorTypeNode* op) override; Type VisitType_(const IncompleteTypeNode* op) override; Type VisitType_(const FuncTypeNode* op) override; Type VisitType_(const TupleTypeNode* op) override; Type VisitType_(const TypeRelationNode* type_rel) override; Type VisitType_(const RelayRefTypeNode* op) override; Type VisitType_(const GlobalTypeVarNode* op) override; Type VisitType_(const TypeCallNode* op) override; Type VisitType_(const TypeDataNode* op) override; Type VisitType_(const PrimTypeNode* op) override; Type VisitType_(const PointerTypeNode* op) override; private: Array<Type> MutateArray(Array<Type> arr); }; /*! * \brief Bind free type variables in the type. * \param type The type to be updated. * \param args_map The binding map. */ Type Bind(const Type& type, const Map<TypeVar, Type>& args_map); } // namespace tvm #endif // TVM_IR_TYPE_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/ir/type_relation.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/ir/type_relation.h * \brief Type relation and function for type inference(checking). */ #ifndef TVM_IR_TYPE_RELATION_H_ #define TVM_IR_TYPE_RELATION_H_ #include <tvm/ir/attrs.h> #include <tvm/ir/diagnostic.h> #include <tvm/ir/env_func.h> #include <tvm/ir/module.h> #include <tvm/ir/type.h> #include <tvm/runtime/logging.h> namespace tvm { /*! * \brief Type function application. * \sa TypeCall */ class TypeCallNode : public TypeNode { public: /*! * \brief The type-level function (ADT that takes type params). */ Type func; /*! \brief The arguments. */ Array<Type> args; void VisitAttrs(AttrVisitor* v) { v->Visit("func", &func); v->Visit("args", &args); v->Visit("span", &span); } bool SEqualReduce(const TypeCallNode* other, SEqualReducer equal) const { return equal(func, other->func) && equal(args, other->args); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(func); hash_reduce(args); } static constexpr const char* _type_key = "TypeCall"; TVM_DECLARE_FINAL_OBJECT_INFO(TypeCallNode, TypeNode); }; /*! * \brief Managed reference to TypeCallNode. * \sa TypeCallNode */ class TypeCall : public Type { public: /*! * \brief Constructor * \param func The type function to apply. * \param args The arguments to the type function. */ TVM_DLL TypeCall(Type func, Array<Type> args); TVM_DEFINE_OBJECT_REF_METHODS(TypeCall, Type, TypeCallNode); }; /*! * \brief reporter that reports back to the * type resolution information. */ class TypeReporterNode : public Object { public: /*! \brief virtual destructor */ virtual ~TypeReporterNode() {} /*! * \brief Create a type equality constraint. * * The "assign direction" acts as a hint to the solver * showing that it is more likely to resolve dst by src. * But it is possible for the solver to resolve src by dst as well. */ TVM_DLL virtual void Assign(const Type& dst, const Type& src) = 0; /*! * \brief assert shape expression comparison. * \note Use assert only if any of the condition input is symbolic. * \param cond The condition of operation. * \return false if assertion can be proven to have failed * true if solver can still proceed. */ TVM_DLL virtual bool Assert(const PrimExpr& cond) = 0; /*! * \brief assert shape expression equals each other. * \param lhs The left operand. * \param rhs The right operand. * \return false if assertion can be proven to have failed * true if solver can still proceed. */ TVM_DLL virtual bool AssertEQ(const PrimExpr& lhs, const PrimExpr& rhs) = 0; /*! * \brief Set the location at which to report unification errors. * \param span The span at which to report the error. */ TVM_DLL virtual void SetSpan(const Span& span) = 0; TVM_DLL virtual Span GetSpan() = 0; TVM_DLL virtual DiagnosticContext GetDiagCtx() = 0; /*! * \brief Retrieve the current global module. * \return The global module. */ TVM_DLL virtual IRModule GetModule() = 0; // solver is not serializable. void VisitAttrs(AttrVisitor* v) {} static constexpr const char* _type_key = "TypeReporter"; TVM_DECLARE_FINAL_OBJECT_INFO(TypeReporterNode, Object); }; /*! * \brief Container class of TypeReporter. * \sa TypeReporterNode */ class TypeReporter : public ObjectRef { public: TypeReporter() {} explicit TypeReporter(ObjectPtr<Object> n) : ObjectRef(n) {} TypeReporterNode* operator->() const { return const_cast<TypeReporterNode*>(static_cast<const TypeReporterNode*>(get())); } using ContainerType = TypeReporterNode; }; /*! * \brief User defined type constraint function. * * If the input type information can be used to fully decide * the IncompleteTypes, then the function should call * reporter.Assign to report the new types, and return true. * Otherwise, the function should return false. * * \param args The arguments to the relation. * The types are stored in the form of * [input_type_0, input_type_1, ... input_type_n, * output_type_0, output_type_1, ... output_type_m] * * \param num_inputs Number of input types in the args. * \param attrs The additional attributes of the operator. * \param reporter The reporter to report solution to. * \return false if This relation cannot be resolved. * true if this relation has been resolved. */ using TypeRelationFn = TypedEnvFunc<bool(const Array<Type>& args, int num_inputs, const Attrs& attrs, const TypeReporter& reporter)>; /*! * \brief User defined type relation, it is an input-output relation on types. * * TypeRelation is more generalized than type call as it allows inference * of both inputs and outputs. * * \sa TypeRelation */ class TypeRelationNode : public TypeConstraintNode { public: /*! * \brief The function on input and output variables which * this is not directly serializable, * need to be looked-up in the module. */ TypeRelationFn func; /*! \brief The type arguments to the type function. */ Array<Type> args; /*! \brief Number of inputs arguments */ int num_inputs; /*! \brief Attributes to the relation function */ Attrs attrs; void VisitAttrs(AttrVisitor* v) { v->Visit("func", &func); v->Visit("args", &args); v->Visit("num_inputs", &num_inputs); v->Visit("attrs", &attrs); v->Visit("span", &span); } bool SEqualReduce(const TypeRelationNode* other, SEqualReducer equal) const { return equal(func, other->func) && equal(args, other->args) && equal(num_inputs, other->num_inputs) && equal(attrs, other->attrs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(func); hash_reduce(args); hash_reduce(num_inputs); hash_reduce(attrs); } static constexpr const char* _type_key = "TypeRelation"; TVM_DECLARE_FINAL_OBJECT_INFO(TypeRelationNode, TypeConstraintNode); }; /*! * \brief Managed reference to TypeRelationNode. * \sa TypeRelationNode */ class TypeRelation : public TypeConstraint { public: /*! * \brief Constructor * \param func The relation function. * \param args The arguments to the type relation. * \param num_inputs Number of inputs. * \param attrs Attributes to the relation function. * \sa TypeRelationNode for more docs about these fields. */ TVM_DLL TypeRelation(TypeRelationFn func, Array<Type> args, int num_inputs, Attrs attrs); TVM_DEFINE_OBJECT_REF_METHODS(TypeRelation, TypeConstraint, TypeRelationNode); }; } // namespace tvm #endif // TVM_IR_TYPE_RELATION_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/arg_info.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_ARG_INFO_H_ #define TVM_META_SCHEDULE_ARG_INFO_H_ #include <tvm/ir/module.h> #include <tvm/node/node.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/shape_tuple.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/object.h> #include <tvm/tir/function.h> namespace tvm { namespace meta_schedule { /*! \brief The argument information. */ class ArgInfoNode : public runtime::Object { public: static constexpr const char* _type_key = "meta_schedule.ArgInfo"; TVM_DECLARE_BASE_OBJECT_INFO(ArgInfoNode, runtime::Object); public: /*! \brief Default destructor. */ virtual ~ArgInfoNode() = default; /*! \brief Converts the ArgInfo to its corresponding JSON representation. */ virtual ObjectRef AsJSON() const = 0; }; /*! * \brief Managed reference to ArgInfoNode * \sa ArgInfoNode */ class ArgInfo : public runtime::ObjectRef { public: /*! * \brief Parse the argument information from a JSON object. * \param json_obj The json object to parse. * \return The argument information parsed. */ TVM_DLL static ArgInfo FromJSON(const ObjectRef& json_obj); /*! * \brief Extract a list of the argument information from PrimFunc. * \param func The PrimFunc to get argument information from. * \return An array of the argument information derived. */ TVM_DLL static Array<ArgInfo, void> FromPrimFunc(const tir::PrimFunc& func); /*! * \brief Extract a list of the argument information from the entry func of an IRModule * \param mod The IRModule to extract argument information from. * \param remove_preproc Whether to remove the preprocessing blocks. * \return An array of the argument information derived. */ TVM_DLL static Array<ArgInfo, void> FromEntryFunc(const IRModule& mod, bool remove_preproc); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(ArgInfo, runtime::ObjectRef, ArgInfoNode); protected: ArgInfo() = default; }; /*! \brief The tensor argument information. */ class TensorInfoNode : public ArgInfoNode { public: /*! \brief The data type of the tensor. */ runtime::DataType dtype; /*! \brief The shape of the tensor. */ runtime::ShapeTuple shape; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("shape", &shape); } static constexpr const char* _type_key = "meta_schedule.TensorInfo"; TVM_DECLARE_FINAL_OBJECT_INFO(TensorInfoNode, ArgInfoNode); public: ObjectRef AsJSON() const; }; /*! * \brief Managed reference to TensorInfoNode * \sa TensorInfoNode */ class TensorInfo : public ArgInfo { public: /*! * \brief Constructor of TensorInfo. * \param dtype The data type of the tensor argument. * \param shape The shape tuple of the tensor argument. */ TVM_DLL explicit TensorInfo(runtime::DataType dtype, runtime::ShapeTuple shape); /*! * \brief Parse the argument information from a JSON object. * \param json_obj The json object to parse. * \return The argument information parsed. */ TVM_DLL static TensorInfo FromJSON(const ObjectRef& json_obj); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TensorInfo, ArgInfo, TensorInfoNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_ARG_INFO_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/builder.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_BUILDER_H_ #define TVM_META_SCHEDULE_BUILDER_H_ #include <tvm/ir/module.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/target/target.h> namespace tvm { namespace meta_schedule { /*! \brief The builder's input, containing an IRModule and the target. */ class BuilderInputNode : public runtime::Object { public: /*! \brief The IRModule to be built. */ IRModule mod; /*! \brief The target to be built for. */ Target target; /*! \brief Parameters for Relay build module. */ Optional<Map<String, runtime::NDArray>> params; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("mod", &mod); v->Visit("target", &target); v->Visit("params", &params); } static constexpr const char* _type_key = "meta_schedule.BuilderInput"; TVM_DECLARE_FINAL_OBJECT_INFO(BuilderInputNode, runtime::Object); }; /*! * \brief Managed reference to BuilderInputNode * \sa BuilderInputNode */ class BuilderInput : public runtime::ObjectRef { public: /*! * \brief Constructor of BuilderInput. * \param mod The IRModule to be built. * \param target The target to be built for. * \param params Parameters for Relay build module. */ TVM_DLL explicit BuilderInput(IRModule mod, Target target, Optional<Map<String, runtime::NDArray>> params = NullOpt); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(BuilderInput, runtime::ObjectRef, BuilderInputNode); }; /*! \brief The builder's output, containing the artifact path or error message if any. */ class BuilderResultNode : public runtime::Object { public: /*! \brief The path to the built artifact. */ Optional<String> artifact_path; /*! \brief The error message if any. */ Optional<String> error_msg; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("artifact_path", &artifact_path); v->Visit("error_msg", &error_msg); } static constexpr const char* _type_key = "meta_schedule.BuilderResult"; TVM_DECLARE_FINAL_OBJECT_INFO(BuilderResultNode, runtime::Object); }; /*! * \brief Managed reference to BuilderResultNode * \sa BuilderResultNode */ class BuilderResult : public runtime::ObjectRef { public: /*! * \brief Constructor of BuilderResult. * \param artifact_path The path to the built artifact. * \param error_msg The error message if any. */ TVM_DLL explicit BuilderResult(Optional<String> artifact_path, Optional<String> error_msg); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(BuilderResult, runtime::ObjectRef, BuilderResultNode); }; /*! \brief The abstract builder interface. */ class BuilderNode : public runtime::Object { public: /*! \brief Default destructor */ virtual ~BuilderNode() = default; /*! * \brief Generate the build results from build inputs. * \param build_inputs The inputs to be built. * \return The build results. */ virtual Array<BuilderResult> Build(const Array<BuilderInput>& build_inputs) = 0; /*! * \brief The function type of `Build` method. * \param build_inputs The inputs to be built. * \return The build results. */ using FBuild = runtime::TypedPackedFunc<Array<BuilderResult>(const Array<BuilderInput>&)>; static constexpr const char* _type_key = "meta_schedule.Builder"; TVM_DECLARE_BASE_OBJECT_INFO(BuilderNode, runtime::Object); }; /*! * \brief Managed reference to BuilderNode * \sa BuilderNode */ class Builder : public runtime::ObjectRef { public: /*! * \brief Create a builder with customized build method on the python-side. * \param f_build The packed function to the `Build` function.. * \return The Builder created. */ static Builder PyBuilder(BuilderNode::FBuild f_build); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Builder, runtime::ObjectRef, BuilderNode); }; /*! \brief An abstract builder with customized build method on the python-side. */ class PyBuilderNode : public BuilderNode { public: /*! \brief The packed function to the `Build` function. */ FBuild f_build; void VisitAttrs(tvm::AttrVisitor* v) { // `f_build` is not visited } Array<BuilderResult> Build(const Array<BuilderInput>& build_inputs) final { ICHECK(f_build != nullptr) << "PyBuilder's Build method not implemented!"; return f_build(build_inputs); } static constexpr const char* _type_key = "meta_schedule.PyBuilder"; TVM_DECLARE_FINAL_OBJECT_INFO(PyBuilderNode, BuilderNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_BUILDER_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/cost_model.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_COST_MODEL_H_ #define TVM_META_SCHEDULE_COST_MODEL_H_ #include <tvm/meta_schedule/arg_info.h> #include <tvm/meta_schedule/measure_candidate.h> #include <tvm/meta_schedule/runner.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/tir/schedule/schedule.h> #include <vector> namespace tvm { namespace meta_schedule { class TuneContext; /*! \brief Cost model. */ class CostModelNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~CostModelNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Load the cost model from given file location. * \param path The file path. */ virtual void Load(const String& path) = 0; /*! * \brief Save the cost model to given file location. * \param path The file path. */ virtual void Save(const String& path) = 0; /*! * \brief Update the cost model given running results. * \param context The tuning context. * \param candidates The measure candidates. * \param results The running results of the measure candidates. */ virtual void Update(const TuneContext& context, const Array<MeasureCandidate>& candidates, const Array<RunnerResult>& results) = 0; /*! * \brief Predict the normalized score (the larger the better) of given measure candidates. * \param context The tuning context. * \param candidates The measure candidates. * \return The predicted normalized score. */ virtual std::vector<double> Predict(const TuneContext& context, const Array<MeasureCandidate>& candidates) = 0; static constexpr const char* _type_key = "meta_schedule.CostModel"; TVM_DECLARE_BASE_OBJECT_INFO(CostModelNode, Object); }; /*! \brief The cost model with customized methods on the python-side. */ class PyCostModelNode : public CostModelNode { public: /*! * \brief Load the cost model from given file location. * \param path The file path. */ using FLoad = runtime::TypedPackedFunc<void(String)>; /*! * \brief Save the cost model to given file location. * \param path The file path. */ using FSave = runtime::TypedPackedFunc<void(String)>; /*! * \brief Update the cost model given running results. * \param context The tuning context. * \param candidates The measure candidates. * \param results The running results of the measure candidates. * \return Whether cost model was updated successfully. */ using FUpdate = runtime::TypedPackedFunc<void(const TuneContext&, const Array<MeasureCandidate>&, const Array<RunnerResult>&)>; /*! * \brief Predict the running results of given measure candidates. * \param context The tuning context. * \param candidates The measure candidates. * \param p_addr The address to save the estimated running results. */ using FPredict = runtime::TypedPackedFunc<void(const TuneContext&, const Array<MeasureCandidate>&, void* p_addr)>; /*! * \brief Get the cost model as string with name. * \return The string representation of the cost model. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! \brief The packed function to the `Load` function. */ FLoad f_load; /*! \brief The packed function to the `Save` function. */ FSave f_save; /*! \brief The packed function to the `Update` function. */ FUpdate f_update; /*! \brief The packed function to the `Predict` function. */ FPredict f_predict; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; void VisitAttrs(tvm::AttrVisitor* v) { // `f_load` is not visited // `f_save` is not visited // `f_update` is not visited // `f_predict` is not visited // `f_as_string` is not visited } void Load(const String& path); void Save(const String& path); void Update(const TuneContext& context, const Array<MeasureCandidate>& candidates, const Array<RunnerResult>& results); std::vector<double> Predict(const TuneContext& context, const Array<MeasureCandidate>& candidates); static constexpr const char* _type_key = "meta_schedule.PyCostModel"; TVM_DECLARE_FINAL_OBJECT_INFO(PyCostModelNode, CostModelNode); }; /*! * \brief Managed reference to CostModelNode * \sa CostModelNode */ class CostModel : public runtime::ObjectRef { public: /*! * \brief Create a feature extractor with customized methods on the python-side. * \param f_load The packed function of `Load`. * \param f_save The packed function of `Save`. * \param f_update The packed function of `Update`. * \param f_predict The packed function of `Predict`. * \param f_as_string The packed function of `AsString`. * \return The feature extractor created. */ TVM_DLL static CostModel PyCostModel(PyCostModelNode::FLoad f_load, // PyCostModelNode::FSave f_save, // PyCostModelNode::FUpdate f_update, // PyCostModelNode::FPredict f_predict, // PyCostModelNode::FAsString f_as_string); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(CostModel, ObjectRef, CostModelNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_COST_MODEL_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/database.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_DATABASE_H_ #define TVM_META_SCHEDULE_DATABASE_H_ #include <tvm/ir/expr.h> #include <tvm/ir/module.h> #include <tvm/meta_schedule/arg_info.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/target/target.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/trace.h> #include <memory> namespace tvm { namespace meta_schedule { class ModuleEquality; /*! \brief A workload, i.e. an IRModule and its structural hash. */ class WorkloadNode : public runtime::Object { public: /*! \brief The type of structural hash */ using THashCode = size_t; /*! \brief The workload's IRModule. */ IRModule mod; /*! \brief The workload's structural hash. */ THashCode shash; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("mod", &mod); // `shash` is not visited because TVM FFI doesn't support uint64_t } static constexpr const char* _type_key = "meta_schedule.Workload"; TVM_DECLARE_FINAL_OBJECT_INFO(WorkloadNode, runtime::Object); /*! * \brief Export the workload to a JSON string. * \return An array containing the structural hash and the base64 json string. */ ObjectRef AsJSON() const; }; /*! * \brief Managed reference to WorkloadNode. * \sa WorkloadNode */ class Workload : public runtime::ObjectRef { public: using THashCode = WorkloadNode::THashCode; /*! * \brief Constructor of Workload. * \param mod The workload's IRModule. */ TVM_DLL explicit Workload(IRModule mod); /*! * \brief Constructor of Workload. * \param mod The workload's IRModule. * \param shash The workload's structural hash. */ TVM_DLL explicit Workload(IRModule mod, THashCode shash); /*! * \brief Create a workload from a json object. * \param json_obj The json object. * \return The created workload. */ TVM_DLL static Workload FromJSON(const ObjectRef& json_obj); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Workload, runtime::ObjectRef, WorkloadNode); }; /*! \brief The hash method for Workload */ struct WorkloadHash { size_t operator()(const Workload& a) const { return a->shash; } }; /*! \brief The equality check for Workload */ struct WorkloadEqual { explicit WorkloadEqual(const ModuleEquality& mod_eq) : mod_eq_(mod_eq) {} bool operator()(const Workload& a, const Workload& b) const; private: /*! \brief The module equality testing and hashing method */ const ModuleEquality& mod_eq_; }; /*! \brief The class of measure candidates. */ class MeasureCandidate; /*! \brief The class of tuning records. */ class TuningRecordNode : public runtime::Object { public: /*! \brief The trace tuned. */ tir::Trace trace; /*! \brief The workload. */ Workload workload{nullptr}; /*! \brief The profiling result in seconds. */ Optional<Array<FloatImm>> run_secs; /*! \brief The target for tuning. */ Optional<Target> target; /*! \brief The argument information. */ Optional<Array<ArgInfo>> args_info; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("trace", &trace); v->Visit("workload", &workload); v->Visit("run_secs", &run_secs); v->Visit("target", &target); v->Visit("args_info", &args_info); } static constexpr const char* _type_key = "meta_schedule.TuningRecord"; TVM_DECLARE_FINAL_OBJECT_INFO(TuningRecordNode, runtime::Object); /*! \brief Construct the measure candidate given the initial IR module and trace * stored in the tuning record. */ MeasureCandidate AsMeasureCandidate() const; /*! * \brief Export the tuning record to a JSON string. * \return An array containing the trace, running secs, serialized target, and * argument information. */ ObjectRef AsJSON() const; }; /*! * \brief The managed reference of TuningRecordNode. * \sa TuningRecordNode */ class TuningRecord : public runtime::ObjectRef { public: /*! \brief Constructor of a tuning record. \param trace The trace of the tuning record. \param workload The workload of the tuning record. \param run_secs The running time of the tuning record. \param target The target of the tuning record. \param args_info The argument information of the tuning record. */ TVM_DLL explicit TuningRecord(tir::Trace trace, Workload workload, Optional<Array<FloatImm>> run_secs, Optional<Target> target, Optional<Array<ArgInfo>> args_info); /*! * \brief Create a tuning record from a json object. * \param json_obj The json object. * \param workload The workload. * \return The tuning record created. */ TVM_DLL static TuningRecord FromJSON(const ObjectRef& json_obj, const Workload& workload); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TuningRecord, runtime::ObjectRef, TuningRecordNode); }; /* \brief The abstract interface of database. */ class DatabaseNode : public runtime::Object { public: /*! * \brief Constructor * \param mod_eq_name A string to specify the module equality testing and hashing method. * It must be one of the followings: * - "structural": Use StructuralEqual/Hash * - "ignore-ndarray": Same as "structural", but ignore ndarray raw data during * equality testing and hashing. * - "anchor-block": Apply equality testing and hashing on the anchor block extracted from a * given module. The "ignore-ndarray" varint is used for the extracted blocks * or in case no anchor block is found. * For the definition of the anchor block, see tvm/tir/analysis.h. */ explicit DatabaseNode(String mod_eq_name = "structural"); /*! \brief Default destructor */ virtual ~DatabaseNode(); /*! * \brief Check if the database has the given workload. * \param mod The IRModule to be searched for. * \return Whether the database has the given workload. */ virtual bool HasWorkload(const IRModule& mod) = 0; /*! * \brief Look up or add workload to the database if missing. * \param mod The IRModule to be searched for or added. * \return The workload corresponding to the given IRModule. */ virtual Workload CommitWorkload(const IRModule& mod) = 0; /*! * \brief Add a tuning record to the database. * \param record The tuning record to be added. */ virtual void CommitTuningRecord(const TuningRecord& record) = 0; /*! * \brief Get the top K tuning records of given workload from the database. * \param workload The workload to be searched for. * \param top_k The number of top records to be returned. * \return An array of top K tuning records for the given workload. */ virtual Array<TuningRecord> GetTopK(const Workload& workload, int top_k) = 0; /*! * \brief Get all tuning records from the database. * \return An Array of all the tuning records in the database. */ virtual Array<TuningRecord> GetAllTuningRecords() = 0; /*! * \brief Get the size of the database. * \return The size of the database. */ virtual int64_t Size() = 0; /*! * \brief Query the best record of the given workload from the database. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The best record of the given workload; NullOpt if not found. */ virtual Optional<TuningRecord> QueryTuningRecord(const IRModule& mod, const Target& target, const String& workload_name); /*! * \brief Query the best schedule of the given workload from the database. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The schedule in the best schedule of the given workload; NullOpt if not found. */ virtual Optional<tir::Schedule> QuerySchedule(const IRModule& mod, const Target& target, const String& workload_name); /*! * \brief Query the best IRModule of the given workload from the database. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The IRModule in the best IRModule of the given workload; NullOpt if not found. */ virtual Optional<IRModule> QueryIRModule(const IRModule& mod, const Target& target, const String& workload_name); /*! \brief Return a reference to the owned module equality method instance. */ const ModuleEquality& GetModuleEquality() const { ICHECK(mod_eq_); return *mod_eq_; } static constexpr const char* _type_key = "meta_schedule.Database"; TVM_DECLARE_BASE_OBJECT_INFO(DatabaseNode, runtime::Object); private: /*! \brief The module equality testing and hashing method */ std::unique_ptr<ModuleEquality> mod_eq_; }; /*! \brief The database with customized methods on the python-side. */ class PyDatabaseNode : public DatabaseNode { public: /*! * \brief Constructor * \param mod_eq_name A string to specify the module equality testing and hashing method. * It must be one of the followings: * - "structural": Use StructuralEqual/Hash * - "ignore-ndarray": Same as "structural", but ignore ndarray raw data during * equality testing and hashing. * - "anchor-block": Apply equality testing and hashing on the anchor block extracted from a * given module. The "ignore-ndarray" varint is used for the extracted blocks * or in case no anchor block is found. * For the definition of the anchor block, see tvm/tir/analysis.h. */ explicit PyDatabaseNode(String mod_eq_name = "structural"); /*! * \brief The function type of `HasWorkload` method. * \param mod The IRModule to be searched for. * \return Whether the database has the given workload. */ using FHasWorkload = runtime::TypedPackedFunc<bool(const IRModule&)>; /*! * \brief The function type of `CommitWorkload` method. * \param mod The IRModule to be searched for or added. * \return The workload corresponding to the given IRModule. */ using FCommitWorkload = runtime::TypedPackedFunc<Workload(const IRModule&)>; /*! * \brief The function type of `CommitTuningRecord` method. * \param record The tuning record to be added. */ using FCommitTuningRecord = runtime::TypedPackedFunc<void(const TuningRecord&)>; /*! * \brief The function type of `GetTopK` method. * \param workload The workload to be searched for. * \param top_k The number of top records to be returned. * \return An array of top K tuning records for the given workload. */ using FGetTopK = runtime::TypedPackedFunc<Array<TuningRecord>(const Workload&, int)>; /*! * \brief The function type of `GetAllTuningRecords` method. * \return An Array of all the tuning records in the database. */ using FGetAllTuningRecords = runtime::TypedPackedFunc<Array<TuningRecord>()>; /*! * \brief The function type of `QueryTuningRecord` method. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The best record of the given workload; NullOpt if not found. */ using FQueryTuningRecord = runtime::TypedPackedFunc<Optional<TuningRecord>( const IRModule&, const Target&, const String&)>; /*! * \brief The function type of `QuerySchedule` method. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The schedule in the best schedule of the given workload; NullOpt if not found. */ using FQuerySchedule = runtime::TypedPackedFunc<Optional<tir::Schedule>( const IRModule&, const Target&, const String&)>; /*! * \brief The function type of `QueryIRModule` method. * \param mod The IRModule to be searched for. * \param target The target to be searched for. * \param workload_name The name of the workload to be searched for. * \return The IRModule in the best IRModule of the given workload; NullOpt if not found. */ using FQueryIRModule = runtime::TypedPackedFunc<Optional<IRModule>(const IRModule&, const Target&, const String&)>; /*! * \brief The function type of `Size` method. * \return The size of the database. */ using FSize = runtime::TypedPackedFunc<int64_t()>; /*! \brief The packed function to the `HasWorkload` function. */ FHasWorkload f_has_workload; /*! \brief The packed function to the `CommitWorkload` function. */ FCommitWorkload f_commit_workload; /*! \brief The packed function to the `CommitTuningRecord` function. */ FCommitTuningRecord f_commit_tuning_record; /*! \brief The packed function to the `GetTopK` function. */ FGetTopK f_get_top_k; /*! \brief The packed function to the `GetAllTuningRecords` function. */ FGetAllTuningRecords f_get_all_tuning_records; /*! \brief The packed function to the `QueryTuningRecord` function. */ FQueryTuningRecord f_query_tuning_record; /*! \brief The packed function to the `QuerySchedule` function. */ FQuerySchedule f_query_schedule; /*! \brief The packed function to the `QueryIRModule` function. */ FQueryIRModule f_query_ir_module; /*! \brief The packed function to the `Size` function. */ FSize f_size; void VisitAttrs(tvm::AttrVisitor* v) { // PackedFuncs are all not visited, because the reflection system doesn't take care of them, // so it cannot be accessible on the python side. If there is such need from the future, // we can then add corresponding accessor methods to help access on python. // `f_has_workload` is not visited // `f_commit_workload` is not visited // `f_commit_tuning_record` is not visited // `f_get_top_k` is not visited // `f_get_all_tuning_records` is not visited // `f_query_tuning_record` is not visited // `f_query_schedule` is not visited // `f_query_ir_module` is not visited // `f_size` is not visited } bool HasWorkload(const IRModule& mod) final { ICHECK(f_has_workload != nullptr) << "PyDatabase's HasWorkload method not implemented!"; return f_has_workload(mod); } Workload CommitWorkload(const IRModule& mod) final { ICHECK(f_commit_workload != nullptr) << "PyDatabase's CommitWorkload method not implemented!"; return f_commit_workload(mod); } void CommitTuningRecord(const TuningRecord& record) final { ICHECK(f_commit_tuning_record != nullptr) << "PyDatabase's CommitTuningRecord method not implemented!"; f_commit_tuning_record(record); } Array<TuningRecord> GetTopK(const Workload& workload, int top_k) final { ICHECK(f_get_top_k != nullptr) << "PyDatabase's GetTopK method not implemented!"; return f_get_top_k(workload, top_k); } Array<TuningRecord> GetAllTuningRecords() final { ICHECK(f_get_all_tuning_records != nullptr) << "PyDatabase's GetAllTuningRecords method not implemented!"; return f_get_all_tuning_records(); } Optional<TuningRecord> QueryTuningRecord(const IRModule& mod, const Target& target, const String& workload_name) final { if (f_query_tuning_record == nullptr) { return DatabaseNode::QueryTuningRecord(mod, target, workload_name); } else { return f_query_tuning_record(mod, target, workload_name); } } Optional<tir::Schedule> QuerySchedule(const IRModule& mod, const Target& target, const String& workload_name) final { if (f_query_schedule == nullptr) { return DatabaseNode::QuerySchedule(mod, target, workload_name); } else { return f_query_schedule(mod, target, workload_name); } } Optional<IRModule> QueryIRModule(const IRModule& mod, const Target& target, const String& workload_name) final { if (f_query_ir_module == nullptr) { return DatabaseNode::QueryIRModule(mod, target, workload_name); } else { return f_query_ir_module(mod, target, workload_name); } } int64_t Size() final { ICHECK(f_size != nullptr) << "PyDatabase's Size method not implemented!"; return f_size(); } static constexpr const char* _type_key = "meta_schedule.PyDatabase"; TVM_DECLARE_FINAL_OBJECT_INFO(PyDatabaseNode, DatabaseNode); }; /*! * \brief Managed reference to DatabaseNode. * \sa DatabaseNode */ class Database : public runtime::ObjectRef { public: /*! * \brief An in-memory database. * \param mod_eq_name A string to specify the module equality testing and hashing method. */ TVM_DLL static Database MemoryDatabase(String mod_eq_name = "structural"); /*! * \brief A database for injecting handcrafted schedule functions. * \param schedule_fn The function to do scheduling, which takes a TIR schedule, * and returns a boolean indicating if the schedule is successful. * \param mod_eq_name A string to specify the module equality testing and hashing method. */ TVM_DLL static Database ScheduleFnDatabase( runtime::TypedPackedFunc<bool(tir::Schedule)> schedule_fn, String mod_eq_name = "structural"); /*! * \brief Create a default database that uses JSON file for tuning records. * \param path_workload The path to the workload table. * \param path_tuning_record The path to the database table. * \param allow_missing Whether to create new file when the given path is not found. * \param mod_eq_name A string to specify the module equality testing and hashing method. */ TVM_DLL static Database JSONDatabase(String path_workload, String path_tuning_record, bool allow_missing, String mod_eq_name = "structural"); /*! * \brief A database composed of multiple databases, allowing users to guide IR rewriting using * combined knowledge of those databases. To each query, it returns the best record among all the * databases given. * \param databases The list of databases to be combined. * \return The combined database. */ TVM_DLL static Database UnionDatabase(Array<Database, void> databases); /*! * \brief A database composed of multiple databases, allowing users to guide IR rewriting using * combined knowledge of those databases. To each query, it returns the record from the first * database that responds to the query. * \param databases The database to be subsetted. * \return The subsetted database. */ TVM_DLL static Database OrderedUnionDatabase(Array<Database, void> databases); /*! * \brief Create a database with customized methods on the python-side. * \param f_has_workload The packed function of `HasWorkload`. * \param f_commit_workload The packed function of `CommitWorkload`. * \param f_commit_tuning_record The packed function of `CommitTuningRecord`. * \param f_get_top_k The packed function of `GetTopK`. * \param f_get_all_tuning_records The packed function of `GetAllTuningRecords`. * \param f_query_tuning_record The packed function of `QueryTuningRecord`. * \param f_query_schedule The packed function of `QuerySchedule`. * \param f_query_ir_module The packed function of `QueryIRModule`. * \param f_size The packed function of `Size`. * \param mod_eq_name A string to specify the module equality testing and hashing method. * \return The created database. */ TVM_DLL static Database PyDatabase(PyDatabaseNode::FHasWorkload f_has_workload, PyDatabaseNode::FCommitWorkload f_commit_workload, PyDatabaseNode::FCommitTuningRecord f_commit_tuning_record, PyDatabaseNode::FGetTopK f_get_top_k, PyDatabaseNode::FGetAllTuningRecords f_get_all_tuning_records, PyDatabaseNode::FQueryTuningRecord f_query_tuning_record, PyDatabaseNode::FQuerySchedule f_query_schedule, PyDatabaseNode::FQueryIRModule f_query_ir_module, PyDatabaseNode::FSize f_size, String mod_eq_name = "structural"); /*! \return The current Database in the scope. */ static Optional<Database> Current(); /*! \brief Entering the scope of the context manager */ void EnterWithScope(); /*! \brief Exiting the scope of the context manager */ void ExitWithScope(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Database, runtime::ObjectRef, DatabaseNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_DATABASE_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/extracted_task.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_EXTRACTED_TASK_H_ #define TVM_META_SCHEDULE_EXTRACTED_TASK_H_ #include <tvm/ir/module.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/target/target.h> namespace tvm { namespace tir { class PrimFunc; } // namespace tir namespace te { class Tensor; } // namespace te } // namespace tvm namespace tvm { namespace meta_schedule { /*! \brief A tuning task extracted from the high-level IR */ class ExtractedTaskNode : public runtime::Object { public: /*! \brief The name of the task extracted */ String task_name; /*! \brief The high-level IR */ IRModule mod; /*! \brief Target */ Target target; /*! \brief A list of low-level IRs that the high-level IR could potentially dispatch to */ Array<IRModule> dispatched; /*! \brief Weight of the task */ int weight; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("task_name", &task_name); v->Visit("mod", &mod); v->Visit("target", &target); v->Visit("dispatched", &dispatched); v->Visit("weight", &weight); } static constexpr const char* _type_key = "meta_schedule.ExtractedTask"; TVM_DECLARE_FINAL_OBJECT_INFO(ExtractedTaskNode, runtime::Object); }; /*! * \brief Managed reference to ExtractedTaskNode * \sa ExtractedTaskNode */ class ExtractedTask : public runtime::ObjectRef { public: explicit ExtractedTask(String task_name, IRModule mod, Target target, Array<IRModule> dispatched, int weight); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(ExtractedTask, runtime::ObjectRef, ExtractedTaskNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_EXTRACTED_TASK_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/feature_extractor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_FEATURE_EXTRACTOR_H_ #define TVM_META_SCHEDULE_FEATURE_EXTRACTOR_H_ #include <tvm/meta_schedule/measure_candidate.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> namespace tvm { namespace meta_schedule { class TuneContext; /*! \brief Extractor for features from measure candidates for use in cost model. */ class FeatureExtractorNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~FeatureExtractorNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Extract features from the given measure candidate. * \param context The tuning context for feature extraction. * \param candidates The measure candidates to extract features from. * \return The feature ndarray extracted. */ virtual Array<tvm::runtime::NDArray> ExtractFrom(const TuneContext& context, const Array<MeasureCandidate>& candidates) = 0; static constexpr const char* _type_key = "meta_schedule.FeatureExtractor"; TVM_DECLARE_BASE_OBJECT_INFO(FeatureExtractorNode, Object); }; /*! \brief The feature extractor with customized methods on the python-side. */ class PyFeatureExtractorNode : public FeatureExtractorNode { public: /*! * \brief Extract features from the given measure candidate. * \param context The tuning context for feature extraction. * \param candidates The measure candidates to extract features from. * \return The feature ndarray extracted. */ using FExtractFrom = runtime::TypedPackedFunc<Array<tvm::runtime::NDArray>( const TuneContext& context, const Array<MeasureCandidate>& candidates)>; /*! * \brief Get the feature extractor as string with name. * \return The string of the feature extractor. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! \brief The packed function to the `ExtractFrom` function. */ FExtractFrom f_extract_from; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; void VisitAttrs(tvm::AttrVisitor* v) { // `f_extract_from` is not visited // `f_as_string` is not visited } Array<tvm::runtime::NDArray> ExtractFrom(const TuneContext& context, const Array<MeasureCandidate>& candidates) final; static constexpr const char* _type_key = "meta_schedule.PyFeatureExtractor"; TVM_DECLARE_FINAL_OBJECT_INFO(PyFeatureExtractorNode, FeatureExtractorNode); }; /*! * \brief Managed reference to FeatureExtractorNode * \sa FeatureExtractorNode */ class FeatureExtractor : public runtime::ObjectRef { public: /*! * \brief Create a feature extractor that extracts features from each BufferStore * \param buffers_per_store The number of buffers in each BufferStore; Pad or truncate if * necessary. * \param arith_intensity_curve_num_samples The number of samples used in the arithmetic intensity * curve. * \param cache_line_bytes The number of bytes in a cache line. * \param extract_workload Whether to extract features in the workload in tuning context or not. * \return The feature extractor created. */ TVM_DLL static FeatureExtractor PerStoreFeature(int buffers_per_store = 5, int arith_intensity_curve_num_samples = 10, int cache_line_bytes = 64, bool extract_workload = false); /*! * \brief Create a feature extractor with customized methods on the python-side. * \param f_extract_from The packed function of `ExtractFrom`. * \param f_as_string The packed function of `AsString`. * \return The feature extractor created. */ TVM_DLL static FeatureExtractor PyFeatureExtractor( PyFeatureExtractorNode::FExtractFrom f_extract_from, PyFeatureExtractorNode::FAsString f_as_string); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(FeatureExtractor, ObjectRef, FeatureExtractorNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_FEATURE_EXTRACTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/measure_callback.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_MEASURE_CALLBACK_H_ #define TVM_META_SCHEDULE_MEASURE_CALLBACK_H_ #include <tvm/meta_schedule/builder.h> #include <tvm/meta_schedule/measure_candidate.h> #include <tvm/meta_schedule/runner.h> #include <tvm/meta_schedule/search_strategy.h> #include <tvm/meta_schedule/tune_context.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> namespace tvm { namespace meta_schedule { class TaskScheduler; /*! \brief Rules to apply after measure results is available. */ class MeasureCallbackNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~MeasureCallbackNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Apply a measure callback rule with given arguments. * \param task_scheduler The task scheduler. * \param task_id The id of the task (tune context) to apply measure callbacks. * \param measure_candidates The measure candidates. * \param builder_results The builder results by building the measure candidates. * \param runner_results The runner results by running the built measure candidates. */ virtual void Apply(const TaskScheduler& task_scheduler, // int task_id, // const Array<MeasureCandidate>& measure_candidates, // const Array<BuilderResult>& builder_results, // const Array<RunnerResult>& runner_results) = 0; static constexpr const char* _type_key = "meta_schedule.MeasureCallback"; TVM_DECLARE_BASE_OBJECT_INFO(MeasureCallbackNode, Object); }; /*! \brief The measure callback with customized methods on the python-side. */ class PyMeasureCallbackNode : public MeasureCallbackNode { public: /*! * \brief Apply a measure callback to the given schedule. * \param task_scheduler The task scheduler. * \param tasks The list of tune context to process. * \param measure_candidates The measure candidates. * \param builds The builder results by building the measure candidates. * \param results The runner results by running the built measure candidates. * \return Whether the measure callback was successfully applied. */ using FApply = runtime::TypedPackedFunc<void(const TaskScheduler& task_scheduler, // int task_id, // const Array<MeasureCandidate>& measure_candidates, // const Array<BuilderResult>& builds, // const Array<RunnerResult>& results)>; /*! * \brief Get the measure callback function as string with name. * \return The string of the measure callback function. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! \brief The packed function to the `Apply` function. */ FApply f_apply; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; void VisitAttrs(tvm::AttrVisitor* v) { // `f_apply` is not visited // `f_as_string` is not visited } void Apply(const TaskScheduler& task_scheduler, // int task_id, // const Array<MeasureCandidate>& measure_candidates, // const Array<BuilderResult>& builds, // const Array<RunnerResult>& results); static constexpr const char* _type_key = "meta_schedule.PyMeasureCallback"; TVM_DECLARE_FINAL_OBJECT_INFO(PyMeasureCallbackNode, MeasureCallbackNode); }; /*! * \brief Managed reference to MeasureCallbackNode * \sa MeasureCallbackNode */ class MeasureCallback : public runtime::ObjectRef { public: /*! * \brief Create a measure callback that adds the measurement results into the database * \return The measure callback created. */ TVM_DLL static MeasureCallback AddToDatabase(); /*! * \brief Create a measure callback that removes the build artifacts from the disk * \return The measure callback created. */ TVM_DLL static MeasureCallback RemoveBuildArtifact(); /*! * \brief Create a measure callback that updates the cost model with measurement result. * \return The measure callback created. */ TVM_DLL static MeasureCallback UpdateCostModel(); /*! * \brief Create a measure callback with customized methods on the python-side. * \param f_apply The packed function of `Apply`. * \param f_as_string The packed function of `AsString`. * \return The measure callback created. */ TVM_DLL static MeasureCallback PyMeasureCallback(PyMeasureCallbackNode::FApply f_apply, PyMeasureCallbackNode::FAsString f_as_string); /*! \brief The default list of measure callbacks. */ TVM_DLL static Array<MeasureCallback, void> Default(); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(MeasureCallback, ObjectRef, MeasureCallbackNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_MEASURE_CALLBACK_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/measure_candidate.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_MEASURE_CANDIDATE_H_ #define TVM_META_SCHEDULE_MEASURE_CANDIDATE_H_ #include <tvm/meta_schedule/arg_info.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/object.h> #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { /*! \brief The schedule (with input shapes) to be measured. */ class MeasureCandidateNode : public runtime::Object { public: /*! \brief The schedule for measurement. */ tir::Schedule sch; /*! \brief The argument information, e.g., (shape, dtype) for tensors. */ Array<ArgInfo> args_info; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("sch", &sch); v->Visit("args_info", &args_info); } static constexpr const char* _type_key = "meta_schedule.MeasureCandidate"; TVM_DECLARE_FINAL_OBJECT_INFO(MeasureCandidateNode, Object); }; /*! * \brief Managed reference to MeasureCandidateNode. * \sa MeasureCandidateNode */ class MeasureCandidate : public runtime::ObjectRef { public: /*! * \brief Constructor of MeasureCandidate. * \param sch The schedule for measurement. * \param args_info The argument information, e.g., (shape, dtype) for tensors. */ TVM_DLL MeasureCandidate(tir::Schedule sch, Array<ArgInfo> args_info); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(MeasureCandidate, ObjectRef, MeasureCandidateNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_MEASURE_CANDIDATE_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/mutator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_MUTATOR_H_ #define TVM_META_SCHEDULE_MUTATOR_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/support/random_engine.h> #include <tvm/tir/schedule/schedule.h> #include <tvm/tir/schedule/trace.h> namespace tvm { namespace meta_schedule { class TuneContext; class Mutator; /*! \brief Mutator is designed to mutate the trace to explore the design space. */ class MutatorNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~MutatorNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Initialize the design space generator with tuning context. * \param context The tuning context for initialization. * \note This method is supposed to be called only once before every other method. */ virtual void InitializeWithTuneContext(const TuneContext& context) = 0; /*! * \brief Apply the mutator function to the given trace. * \param trace The given trace for mutation. * \param rand_state The random state for mutation. * \return None if mutator failed, otherwise return the mutated trace. */ virtual Optional<tir::Trace> Apply(const tir::Trace& trace, support::LinearCongruentialEngine::TRandState* rand_state) = 0; /*! * \brief Clone the mutator. * \return The cloned mutator. */ virtual Mutator Clone() const = 0; static constexpr const char* _type_key = "meta_schedule.Mutator"; TVM_DECLARE_BASE_OBJECT_INFO(MutatorNode, Object); }; /*! * \brief Managed reference to MutatorNode * \sa MutatorNode */ class Mutator : public runtime::ObjectRef { public: /*! * \brief The function type of `InitializeWithTuneContext` method. * \param context The tuning context for initialization. */ using FInitializeWithTuneContext = runtime::TypedPackedFunc<void(const TuneContext&)>; /*! * \brief Apply the mutator function to the given trace. * \param trace The given trace for mutation. * \return None if mutator failed, otherwise return the mutated trace. */ using FApply = runtime::TypedPackedFunc<Optional<tir::Trace>( const tir::Trace&, support::LinearCongruentialEngine::TRandState rand_state)>; /*! * \brief Clone the mutator. * \return The cloned mutator. */ using FClone = runtime::TypedPackedFunc<Mutator()>; /*! * \brief Get the mutator as string with name. * \return The string of the mutator. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! \brief Create a Mutator that mutates the decision of instruction Sample-Perfect-Tile */ TVM_DLL static Mutator MutateTileSize(); /*! * \brief Create a Mutator that mutates the parallel extent * \param max_jobs_per_core The maximum number of parallel jobs per core. * \return The created mutator. */ TVM_DLL static Mutator MutateParallel(int64_t max_jobs_per_core); /*! * \brief Create a Mutator that mutates auto unroll step * \return The mutator created */ TVM_DLL static Mutator MutateUnroll(); /*! * \brief Create a Mutator that mutates the outcome of SampleComputeLocation * \return The mutator created */ TVM_DLL static Mutator MutateComputeLocation(); /*! * \brief Create a Mutator that mutates auto thread binding. * \return The mutator created */ TVM_DLL static Mutator MutateThreadBinding(); /*! * \brief Create a mutator with customized methods on the python-side. * \param f_initialize_with_tune_context The packed function of `InitializeWithTuneContext`. * \param f_apply The packed function of `Apply`. * \param f_clone The packed function of `Clone`. * \param f_as_string The packed function of `AsString`. * \return The mutator created. */ TVM_DLL static Mutator PyMutator(FInitializeWithTuneContext f_initialize_with_tune_context, FApply f_apply, FClone f_clone, FAsString f_as_string); /*! \brief Create default mutators for LLVM */ TVM_DLL static Map<Mutator, FloatImm, void> DefaultLLVM(); /*! \brief Create default mutators for CUDA */ TVM_DLL static Map<Mutator, FloatImm, void> DefaultCUDA(); /*! \brief Create default mutators for CUDA with TensorCore */ TVM_DLL static Map<Mutator, FloatImm, void> DefaultCUDATensorCore(); /*! \brief Create default mutators for Hexagon */ TVM_DLL static Map<Mutator, FloatImm, void> DefaultHexagon(); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Mutator, ObjectRef, MutatorNode); }; /*! \brief The mutator with customized methods on the python-side. */ class PyMutatorNode : public MutatorNode { public: using FInitializeWithTuneContext = Mutator::FInitializeWithTuneContext; using FApply = Mutator::FApply; using FClone = Mutator::FClone; using FAsString = Mutator::FAsString; /*! \brief The packed function to the `InitializeWithTuneContext` function. */ FInitializeWithTuneContext f_initialize_with_tune_context; /*! \brief The packed function to the `Apply` function. */ FApply f_apply; /*! \brief The packed function to the `Clone` function. */ FClone f_clone; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; void VisitAttrs(tvm::AttrVisitor* v) { // `f_initialize_with_tune_context` is not visited // `f_apply` is not visited // `f_clone` is not visited // `f_as_string` is not visited } void InitializeWithTuneContext(const TuneContext& context) final; Optional<tir::Trace> Apply(const tir::Trace& trace, support::LinearCongruentialEngine::TRandState* rand_state) final; Mutator Clone() const final; static constexpr const char* _type_key = "meta_schedule.PyMutator"; TVM_DECLARE_FINAL_OBJECT_INFO(PyMutatorNode, MutatorNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_MUTATOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/postproc.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_POSTPROC_H_ #define TVM_META_SCHEDULE_POSTPROC_H_ #include <tvm/node/reflection.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { class TuneContext; class Postproc; /*! * \brief Rules to apply a postprocessor to a schedule. */ class PostprocNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~PostprocNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Initialize the design space generator with tuning context. * \param context The tuning context for initialization. * \note This method is supposed to be called only once before every other method. */ virtual void InitializeWithTuneContext(const TuneContext& context) = 0; /*! * \brief Apply a postprocessor to the given schedule. * \param sch The schedule to be post processed. * \return Whether the postprocessor was successfully applied. */ virtual bool Apply(const tir::Schedule& sch) = 0; /*! * \brief Clone the postprocessor. * \return The cloned postprocessor. */ virtual Postproc Clone() const = 0; static constexpr const char* _type_key = "meta_schedule.Postproc"; TVM_DECLARE_BASE_OBJECT_INFO(PostprocNode, Object); }; /*! * \brief Managed reference to PostprocNode * \sa PostprocNode */ class Postproc : public runtime::ObjectRef { public: /*! * \brief The function type of `InitializeWithTuneContext` method. * \param context The tuning context for initialization. */ using FInitializeWithTuneContext = runtime::TypedPackedFunc<void(const TuneContext&)>; /*! * \brief Apply a postprocessor to the given schedule. * \param sch The schedule to be post processed. * \return Whether the postprocessor was successfully applied. */ using FApply = runtime::TypedPackedFunc<bool(const tir::Schedule&)>; /*! * \brief Clone the postprocessor. * \return The cloned postprocessor. */ using FClone = runtime::TypedPackedFunc<Postproc()>; /*! * \brief Get the postprocessor function as string with name. * \return The string of the postprocessor function. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! * \brief Create a postprocessor with customized methods on the python-side. * \param f_initialize_with_tune_context The packed function of `InitializeWithTuneContext`. * \param f_apply The packed function of `Apply`. * \param f_clone The packed function of `Clone`. * \param f_as_string The packed function of `AsString`. * \return The postprocessor created. */ TVM_DLL static Postproc PyPostproc(FInitializeWithTuneContext f_initialize_with_tune_context, // FApply f_apply, // FClone f_clone, // FAsString f_as_string); /*! * \brief Create a postprocessor that checks if all loops are static * \return The postprocessor created */ TVM_DLL static Postproc DisallowDynamicLoop(); /*! * \brief Create a postprocessor that rewrites the cooperative fetch annotation to * actual vectorized cooperative fetching in loop bindings. * \return The postprocessor created. */ TVM_DLL static Postproc RewriteCooperativeFetch(); /*! * \brief Creates a postprocessor that applies parallelization, vectorization and auto unrolling * according to the annotation of each block * \return The postprocessor created */ TVM_DLL static Postproc RewriteParallelVectorizeUnroll(); /*! * \brief Create a postprocessor that rewrites reduction block by moving the init block out. * \return The postprocessor created. */ TVM_DLL static Postproc RewriteReductionBlock(); /*! * \brief Create a postprocessor that adds thread binding to unbound blocks * \param max_threadblocks The max number of threadblocks in the cuda device. * \return The postprocessor created. */ TVM_DLL static Postproc RewriteUnboundBlock(int max_threadblocks); /*! * \brief Create a postprocessor that applies tensorization to annotated blocks * \param vectorize_init_loop Whether or not vectorize the initialization loop produced by * DecomposeReduction * \return The postprocessor created. */ TVM_DLL static Postproc RewriteTensorize(bool vectorize_init_loop = false); /*! * \brief Creates a postprocessor that verifies if the GPU code is correct * \return The postprocessor created */ TVM_DLL static Postproc VerifyGPUCode(); /*! * \brief Creates a postprocessor that rewrites the layout of input tensor * \note Weight layout rewrite is supported so far, activation layout rewrite will be added. * \return The postprocessor created */ TVM_DLL static Postproc RewriteLayout(); /*! \brief Create default postprocessors for LLVM */ TVM_DLL static Array<Postproc, void> DefaultLLVM(); /*! \brief Create default postprocessors for CUDA */ TVM_DLL static Array<Postproc, void> DefaultCUDA(); /*! \brief Create default postprocessors for CUDA with TensorCore */ TVM_DLL static Array<Postproc, void> DefaultCUDATensorCore(); /*! \brief Create default postprocessors for Hexagon */ TVM_DLL static Array<Postproc, void> DefaultHexagon(); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Postproc, ObjectRef, PostprocNode); }; /*! \brief The postprocessor with customized methods on the python-side. */ class PyPostprocNode : public PostprocNode { public: using FInitializeWithTuneContext = Postproc::FInitializeWithTuneContext; using FApply = Postproc::FApply; using FClone = Postproc::FClone; using FAsString = Postproc::FAsString; /*! \brief The packed function to the `InitializeWithTuneContext` function. */ FInitializeWithTuneContext f_initialize_with_tune_context; /*! \brief The packed function to the `Apply` function. */ FApply f_apply; /*! \brief The packed function to the `Clone` function. */ FClone f_clone; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; void VisitAttrs(tvm::AttrVisitor* v) { // `f_initialize_with_tune_context` is not visited // `f_apply` is not visited // `f_clone` is not visited // `f_as_string` is not visited } void InitializeWithTuneContext(const TuneContext& context) final; bool Apply(const tir::Schedule& sch) final; Postproc Clone() const final; static constexpr const char* _type_key = "meta_schedule.PyPostproc"; TVM_DECLARE_FINAL_OBJECT_INFO(PyPostprocNode, PostprocNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_POSTPROC_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/profiler.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_PROFILER_H_ #define TVM_META_SCHEDULE_PROFILER_H_ #include <tvm/ir/module.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/target/target.h> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace meta_schedule { class ScopedTimer { public: ~ScopedTimer() { if (deferred_ != nullptr) { deferred_(); } } private: friend class Profiler; explicit ScopedTimer(runtime::TypedPackedFunc<void()> deferred) : deferred_(deferred) {} runtime::TypedPackedFunc<void()> deferred_; }; /*! \brief A generic profiler */ class ProfilerNode : public runtime::Object { public: /*! \brief The segments that are already profiled */ std::unordered_map<std::string, double> stats_sec; /*! \brief Counter for the total time used */ runtime::PackedFunc total_timer; void VisitAttrs(tvm::AttrVisitor* v) { // `stats_sec` is not visited. // `total_timer` is not visited. } static constexpr const char* _type_key = "meta_schedule.Profiler"; TVM_DECLARE_FINAL_OBJECT_INFO(ProfilerNode, runtime::Object); public: /*! \brief Get the internal stats of the running time */ Map<String, FloatImm> Get() const; /*! \brief Return a summary of profiling results as table format */ String Table() const; }; /*! * \brief Managed reference to ProfilerNode * \sa ProfilerNode */ class Profiler : public runtime::ObjectRef { public: Profiler(); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Profiler, runtime::ObjectRef, ProfilerNode); /*! \brief Entering the scope of the context manager */ void EnterWithScope(); /*! \brief Exiting the scope of the context manager */ void ExitWithScope(); /*! \brief Returns the current profiler */ static Optional<Profiler> Current(); /*! * \brief Profile the time usage in the given scope in the given name. * \param name Name for the scope. * \return A scope timer for time profiling. */ static ScopedTimer TimedScope(String name); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_PROFILER_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/runner.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_RUNNER_H_ #define TVM_META_SCHEDULE_RUNNER_H_ #include <tvm/ir/expr.h> #include <tvm/meta_schedule/arg_info.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> namespace tvm { namespace meta_schedule { /*! \brief Runner's input containing path of artifact, type of device and argument info. */ class RunnerInputNode : public runtime::Object { public: /*! \brief The path to the built artifact. */ String artifact_path; /*! \brief The type of device. */ String device_type; /*! \brief The argument information. */ Array<ArgInfo> args_info; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("artifact_path", &artifact_path); v->Visit("device_type", &device_type); v->Visit("args_info", &args_info); } static constexpr const char* _type_key = "meta_schedule.RunnerInput"; TVM_DECLARE_FINAL_OBJECT_INFO(RunnerInputNode, runtime::Object); }; /*! * \brief Managed reference to RunnerInputNode * \sa RunnerInputNode */ class RunnerInput : public runtime::ObjectRef { public: /*! * \brief Constructor of RunnerInput * \param artifact_path The path to the built artifact. * \param device_type The type of device. * \param args_info The argument information. */ TVM_DLL explicit RunnerInput(String artifact_path, String device_type, Array<ArgInfo> args_info); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerInput, runtime::ObjectRef, RunnerInputNode); }; /*! \brief Runner's output containing measurement result of MeasureCandidate or error msg if any. */ class RunnerResultNode : public runtime::Object { public: /*! \brief The run time in seconds.*/ Optional<Array<FloatImm>> run_secs; /*! \brief The error message, if any. */ Optional<String> error_msg; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("run_secs", &run_secs); v->Visit("error_msg", &error_msg); } static constexpr const char* _type_key = "meta_schedule.RunnerResult"; TVM_DECLARE_FINAL_OBJECT_INFO(RunnerResultNode, runtime::Object); }; /*! * \brief Managed reference to RunnerResultNode * \sa RunnerResultNode */ class RunnerResult : public runtime::ObjectRef { public: /*! * \brief Constructor * \brief The run time in seconds. * \brief The error message, if any. */ TVM_DLL explicit RunnerResult(Optional<Array<FloatImm>> run_secs, Optional<String> error_msg); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerResult, runtime::ObjectRef, RunnerResultNode); }; /*! * \brief A class to asynchronously fetch runner's output. * \note The API design is consistent with python's concurrent.futures.Future: * https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Future */ class RunnerFutureNode : public runtime::Object { public: /*! * \brief The function type to check whether the runner has finished. * \return Whether the runner's output is ready. */ using FDone = runtime::TypedPackedFunc<bool()>; /*! * \brief The function type to fetch runner output if it is ready. * \return The runner's output. */ using FResult = runtime::TypedPackedFunc<RunnerResult()>; /*! \brief The packed function to check whether the runner has finished. */ FDone f_done; /*! \brief The packed function to fetch runner output if it is ready. */ FResult f_result; void VisitAttrs(tvm::AttrVisitor* v) { // `f_done` is not visited // `f_result` is not visited } /*! * \brief Check whether the runner has finished. * \return A boolean indicating whether the runner has finished. */ bool Done() const { ICHECK(f_done != nullptr) << "PyRunnerFuture's Done method not implemented!"; return f_done(); } /*! * \brief Fetch the runner's output if it is ready. * \return The runner's output. */ RunnerResult Result() const { ICHECK(f_result != nullptr) << "PyRunnerFuture's Result method not implemented!"; return f_result(); } static constexpr const char* _type_key = "meta_schedule.RunnerFuture"; TVM_DECLARE_FINAL_OBJECT_INFO(RunnerFutureNode, runtime::Object); }; /*! * \brief Managed reference to RunnerFutureNode * \sa RunnerFutureNode */ class RunnerFuture : public runtime::ObjectRef { public: using FDone = RunnerFutureNode::FDone; using FResult = RunnerFutureNode::FResult; /*! * \brief Constructor of RunnerFuture * \param f_done The packed function to check whether the runner has finished. * \param f_result The packed function to fetch runner output if it is ready. */ TVM_DLL explicit RunnerFuture(FDone f_done, FResult f_result); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(RunnerFuture, runtime::ObjectRef, RunnerFutureNode); }; /*! \brief The abstract runner interface. */ class RunnerNode : public runtime::Object { public: /*! * \brief The function type to run the built artifacts and get runner futures. * \param input The runner's inputs. * \return The runner futures. * \sa RunnerFuture */ using FRun = runtime::TypedPackedFunc<Array<RunnerFuture>(Array<RunnerInput>)>; /*! \brief Default destructor */ virtual ~RunnerNode() = default; /*! * \brief Run the built artifact and get runner futures. * \param runner_inputs The runner's inputs. * \return The runner futures. */ virtual Array<RunnerFuture> Run(Array<RunnerInput> runner_inputs) = 0; static constexpr const char* _type_key = "meta_schedule.Runner"; TVM_DECLARE_BASE_OBJECT_INFO(RunnerNode, runtime::Object); }; /*! * \brief Managed reference to RunnerNode * \sa RunnerNode */ class Runner : public runtime::ObjectRef { public: using FRun = RunnerNode::FRun; /*! * \brief Create a runner with customized build method on the python-side. * \param f_run The packed function to run the built artifacts and get runner futures. * \return The runner created. */ TVM_DLL static Runner PyRunner(FRun f_run); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(Runner, runtime::ObjectRef, RunnerNode); }; /*! \brief An abstract runner with customized build method on the python-side. */ class PyRunnerNode : public RunnerNode { public: /*! \brief The packed function to run the built artifacts and get runner futures. */ FRun f_run; void VisitAttrs(tvm::AttrVisitor* v) { // `f_run` is not visited } Array<RunnerFuture> Run(Array<RunnerInput> runner_inputs) final { ICHECK(f_run != nullptr) << "PyRunner's Run method not implemented!"; return f_run(runner_inputs); } static constexpr const char* _type_key = "meta_schedule.PyRunner"; TVM_DECLARE_FINAL_OBJECT_INFO(PyRunnerNode, RunnerNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_RUNNER_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/schedule/cuda/thread_bind.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SCHEDULE_CUDA_THREAD_BIND_H_ #define TVM_META_SCHEDULE_SCHEDULE_CUDA_THREAD_BIND_H_ #include <tvm/tir/schedule/schedule.h> #include <algorithm> #include <limits> #include <utility> namespace tvm { namespace meta_schedule { /*! * \brief Given candidates of thread_extents, make a sampler that use `sch->SampleCategorical` * to return a random thread extent. * \param sch The schedule * \param thread_extents The candidate thread extents. * \return A sampler that returns a random thread extent. */ std::function<tir::ExprRV(int64_t)> MakeFactorSampler(tir::Schedule sch, Array<Integer> thread_extents); /*! * \brief Bind blockIdx.x and threadIdx.x to the given loop * \param sch The schedule. * \param loop The loop to be bound. * \param max_threadblocks The maximum number of threadblocks allowed. * \param max_threads_per_block The maximum number of threads allowed. * \param get_factor A function that returns the tiling factor. */ Array<tir::LoopRV> BindSpatialLoop(tir::Schedule sch, tir::LoopRV loop, // int64_t max_threadblocks, int64_t max_threads_per_block, std::function<tir::ExprRV(int64_t)> get_factor = nullptr); /*! * \brief Bind the given block if it is not bound to blockIdx or threadIdx. * \param sch The schedule. * \param block The block to be bound. * \param max_threadblocks The maximum number of threadblocks allowed. * \param max_threads_per_block The maximum number of threads allowed. * \param get_factor A function that returns the tiling factor. */ void BindBlockThreadIdx(tir::Schedule sch, tir::BlockRV block, // int64_t max_threadblocks, int64_t max_threads_per_block, std::function<tir::ExprRV(int64_t max_extent)> get_factor = nullptr); } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SCHEDULE_CUDA_THREAD_BIND_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/schedule/generic/winograd.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SCHEDULE_GENERIC_WINOGRAD_H_ #define TVM_META_SCHEDULE_SCHEDULE_GENERIC_WINOGRAD_H_ #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { /*! * \brief Get the producer block of a given block. * If there is a constant winograd transform matrix, inline it. * \return The only producer block. */ tir::BlockRV GetWinogradProducerAndInlineConst(tir::Schedule sch, tir::BlockRV block); } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SCHEDULE_GENERIC_WINOGRAD_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/schedule_rule.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SCHEDULE_RULE_H_ #define TVM_META_SCHEDULE_SCHEDULE_RULE_H_ #include <tvm/ir/expr.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { class TuneContext; class ScheduleRule; /*! \brief Rules to modify a block in a schedule. */ class ScheduleRuleNode : public runtime::Object { public: /*! \brief Virtual destructor. */ virtual ~ScheduleRuleNode() = default; void VisitAttrs(tvm::AttrVisitor* v) {} /*! * \brief Initialize the design space generator with tuning context. * \param context The tuning context for initialization. * \note This method is supposed to be called only once before every other method. */ virtual void InitializeWithTuneContext(const TuneContext& context) = 0; /*! * \brief Apply a schedule rule to the specific block in the given schedule. * \param sch The schedule to be modified. * \param block The specific block to apply the schedule rule. * \return The list of schedules generated by applying the schedule rule. */ virtual runtime::Array<tir::Schedule> Apply(const tir::Schedule& sch, const tir::BlockRV& block) = 0; /*! * \brief Deep clone the schedule rule. * \return The cloned schedule rule. */ virtual ScheduleRule Clone() const = 0; static constexpr const char* _type_key = "meta_schedule.ScheduleRule"; TVM_DECLARE_BASE_OBJECT_INFO(ScheduleRuleNode, Object); }; /*! * \brief Managed reference to ScheduleRuleNode * \sa ScheduleRuleNode */ class ScheduleRule : public runtime::ObjectRef { public: /*! * \brief The function type of `InitializeWithTuneContext` method. * \param context The tuning context for initialization. */ using FInitializeWithTuneContext = runtime::TypedPackedFunc<void(const TuneContext&)>; /*! * \brief The function type of `Apply` method. * \param sch The schedule to be modified. * \param block The specific block to apply the schedule rule. * \return The list of schedules generated by applying the schedule rule. */ using FApply = runtime::TypedPackedFunc<Array<tir::Schedule>(const tir::Schedule&, const tir::BlockRV&)>; /*! * \brief Get the schedule rule as string with name. * \return The string of the schedule rule. */ using FAsString = runtime::TypedPackedFunc<String()>; /*! * \brief The function type of `Clone` method. * \return The cloned schedule rule. */ using FClone = runtime::TypedPackedFunc<ScheduleRule()>; /*! * \brief Create a rule that applies customized rules registered using block attribute * `schedule_rule`. The rule will be dispatched according to target keys. * \return The created schedule rule. */ TVM_DLL static ScheduleRule ApplyCustomRule(); /*! \brief Check if the rule is `ApplyCustomRule` */ TVM_DLL static bool IsApplyCustomRule(const ScheduleRule& rule); /*! * \brief Create an auto-inline rule that inlines spatial blocks if it satisfies some conditions * \param into_producer If allows to inline a block into its producer * \param into_consumer If allows to inline a block into its consumer * \param inline_const_tensor Always inline constant tensors * \param disallow_if_then_else Always disallow if-then-else-like constructs * \param require_ordered Always require the read-to-write mapping to be ordered * \param require_injective Always require the read-to-write mapping to be injective * \param disallow_op The operators that are disallowed in auto inline * \return The schedule rule created */ TVM_DLL static ScheduleRule AutoInline(bool into_producer, // bool into_consumer, // bool inline_const_tensor, // bool disallow_if_then_else, // bool require_injective, // bool require_ordered, // Optional<Array<String>> disallow_op); /*! * \brief Inline blocks that produce a constant scalar. Such blocks get in the way of * ReverseComputeInline during AutoInline, since they are also counted as a producer block * unless they are inlined first. So it is recommended to run InlineConstantScalars before * AutoInline. * \return The schedule rule created */ TVM_DLL static ScheduleRule InlineConstantScalars(); /*! * \brief Create a mega rule: multi-level tiling with data reuse * \param structure The tiling structure. Recommended: * - 'SSRSRS' on CPU * - 'SSSRRSRS' on GPU * \param tile_binds For each level of tiles, which thread axis it is bound to. Recommended: * - NullOpt on CPU * - [blockIdx.x, vthread.x, threadIdx.x] on GPU * \param max_innermost_factor The maximum size of the innermost factor. NullOpt means no limit * \param vector_load_lens The length of vector lane in vectorized cooperative fetching. * NullOpt means disable vectorization * \param reuse_read Data reuse configuration for reading. NullOpt means no reuse. * \param reuse_write Data reuse configuration for writing. NullOpt means no reuse. * \return The schedule rule created */ TVM_DLL static ScheduleRule MultiLevelTiling(String structure, // Optional<Array<String>> tile_binds, // Optional<Integer> max_innermost_factor, // Optional<Array<Integer>> vector_load_lens, // Optional<Map<String, ObjectRef>> reuse_read, // Optional<Map<String, ObjectRef>> reuse_write); /*! * \brief Extension of MultiLevelTiling for auto-tensorization with a single intrinsic. * \param intrin_name The name of a tensor intrinsic, must be registered via * TensorIntrin.register(...) beforehand * \param structure The tiling structure. Recommended: * - 'SSRSRS' on CPU * - 'SSSRRSRS' on GPU * \param tile_binds For each level of tiles, which thread axis it is bound to. Recommended: * - NullOpt on CPU * - [blockIdx.x, vthread.x, threadIdx.x] on GPU * \param max_innermost_factor The maximum size of the innermost factor. NullOpt means no limit * \param vector_load_lens The length of vector lane in vectorized cooperative fetching. * NullOpt means disable vectorization * \param reuse_read Data reuse configuration for reading. NullOpt means no reuse. * \param reuse_write Data reuse configuration for writing. NullOpt means no reuse. * \return The schedule rule created */ TVM_DLL static ScheduleRule MultiLevelTilingWithIntrin( String intrin_name, String structure, Optional<Array<String>> tile_binds, Optional<Integer> max_innermost_factor, Optional<Array<Integer>> vector_load_lens, Optional<Map<String, ObjectRef>> reuse_read, Optional<Map<String, ObjectRef>> reuse_write); /*! * \brief Extension of MultiLevelTiling for auto-tensorization with multiple groups of candidate * tensor core intrinsics * \param intrin_groups A list of groups of tensor core intrinsics. The map should contains key * "init", "load_a", "load_b", "compute", "store", which represent the tensor intrin for * initialization, loading operand A, loading operand B, tensor core computation, storing the * result. The value of the map should be names of tensor intrinsics, must be registered via * TensorIntrin.register(...) beforehand * \param structure The tiling structure. Recommended: * - 'SSSRRSRS' on GPU * \param tile_binds For each level of tiles, which thread axis it is bound to. Recommended: * - [blockIdx.y, blockIdx.x, threadIdx.y] on GPU * \param max_innermost_factor The maximum size of the innermost factor. NullOpt means no limit * \param vector_load_lens The length of vector lane in vectorized cooperative fetching. * NullOpt means disable vectorization * \param reuse_read Data reuse configuration for reading. NullOpt means no reuse. * \param reuse_write Data reuse configuration for writing. NullOpt means no reuse. * \param use_software_pipeline Whether use the software pipeline. * \return The schedule rule created */ TVM_DLL static ScheduleRule MultiLevelTilingTensorCore( Array<Map<String, String>> intrin_groups, String structure, Optional<Array<String>> tile_binds, Optional<Integer> max_innermost_factor, Optional<Array<Integer>> vector_load_lens, Optional<Map<String, ObjectRef>> reuse_read, Optional<Map<String, ObjectRef>> reuse_write, bool use_software_pipeline); /*! * \brief Extension of MultiLevelTiling for backends with wide vectors. * The loop over the innermost spatial axis of the output buffer is always vectorized with the * maximum vector length. * \param structure The tiling structure. 'SSRSRS' is recommended. * \param vector_length_in_bits The length of a vector register in bits. * \param max_innermost_factor The maximum size of the innermost factor. NullOpt means no limit * \param reuse_read Data reuse configuration for reading. NullOpt means no reuse. * \param reuse_write Data reuse configuration for writing. NullOpt means no reuse. * \return The schedule rule created */ TVM_DLL static ScheduleRule MultiLevelTilingWideVector( String structure, Integer vector_length_in_bits, Optional<Integer> max_innermost_factor, Optional<Map<String, ObjectRef>> reuse_read, Optional<Map<String, ObjectRef>> reuse_write); /*! * \brief Create a rule: add-rfactor to some blocks if needed * \param max_jobs_per_core The maximum number of jobs to be launched per CPU core. It sets the * uplimit of CPU parallelism, i.e. `num_cores * max_jobs_per_core`. Use -1 to disable * parallelism. * \param max_innermost_factor The maximum size of the innermost factor. NullOpt means no limit * \return The schedule rule created */ TVM_DLL static ScheduleRule AddRFactor(int max_jobs_per_core, // Optional<Integer> max_innermost_factor); /*! * \brief Create a schedule rule which applies cross-thread reduction to some reduction blocks * correspondingly when needed * \param thread_extents Candidates of thread axis extent (values are required to be positive). * \return The schedule rule created */ TVM_DLL static ScheduleRule CrossThreadReduction(Array<Integer> thread_extents); /*! * \brief A rule that randomly select a compute-at location for a free block * \return The schedule rule created */ TVM_DLL static ScheduleRule RandomComputeLocation(); /*! * \brief Mark parallelize, vectorize and unroll to the root block. The mark will be applied to * each block in a follow-up post processor * \param max_jobs_per_core The maximum number of jobs to be launched per CPU core. It sets the * upper limit of CPU parallelism, i.e. `num_cores * max_jobs_per_core`. Use -1 to disable * parallelism. * \param max_vectorize_extent The maximum extent to be vectorized. * It sets the upper limit of the hardware target vectorization. Use -1 to disable vectorization. * \param unroll_max_steps The options of the maximum number of unroll steps to be done. * Use an empty array to disable unroll. * \param unroll_explicit Whether to explicitly unroll the loop, or just add an "unroll" pragma. * \return The schedule rule created */ TVM_DLL static ScheduleRule ParallelizeVectorizeUnroll(int max_jobs_per_core, // int max_vectorize_extent, // Array<Integer> unroll_max_steps, // bool unroll_explicit); /*! * \brief Auto bind loops around the block to BlockIdx and ThreadIdx * \param max_threadblocks The maximum number of threadblock on GPU * \param thread_extents Candidates of thread axis extent. * \param max_threads_per_block The maximum number of threads per block, if it is known * when this schedule rule is created. * \return The schedule rule created */ TVM_DLL static ScheduleRule AutoBind(int max_threadblocks, Array<Integer> thread_extents, int max_threads_per_block = -1); /*! * \brief Create a schedule rule with customized methods on the python-side. * \param f_initialize_with_tune_context The packed function of `InitializeWithTuneContext`. * \param f_apply The packed function of `Apply`. * \param f_clone The packed function of `Clone`. * \param f_as_string The packed function of `AsString`. * \return The schedule rule created. */ TVM_DLL static ScheduleRule PyScheduleRule( FInitializeWithTuneContext f_initialize_with_tune_context, // FApply f_apply, // FClone f_clone, // FAsString f_as_string); /*! \brief Create default schedule rules for LLVM */ TVM_DLL static Array<ScheduleRule, void> DefaultLLVM(); /*! \brief Create default schedule rules for CUDA */ TVM_DLL static Array<ScheduleRule, void> DefaultCUDA(); /*! \brief Create default postprocessors for CUDA with TensorCore */ TVM_DLL static Array<ScheduleRule, void> DefaultCUDATensorCore(); /*! \brief Create default schedule rules for Hexagon */ TVM_DLL static Array<ScheduleRule, void> DefaultHexagon(); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ScheduleRule, ObjectRef, ScheduleRuleNode); }; /*! \brief The schedule rule with customized methods on the python-side. */ class PyScheduleRuleNode : public ScheduleRuleNode { public: using FInitializeWithTuneContext = ScheduleRule::FInitializeWithTuneContext; using FApply = ScheduleRule::FApply; using FClone = ScheduleRule::FClone; using FAsString = ScheduleRule::FAsString; /*! \brief The packed function to the `InitializeWithTuneContext` function. */ FInitializeWithTuneContext f_initialize_with_tune_context; /*! \brief The packed function to the `Apply` function. */ FApply f_apply; /*! \brief The packed function to the `AsString` function. */ FAsString f_as_string; /*! \brief The packed function to the `Clone` function. */ FClone f_clone; void VisitAttrs(tvm::AttrVisitor* v) { // `f_initialize_with_tune_context` is not visited // `f_apply` is not visited // `f_as_string` is not visited // `f_clone` is not visited } void InitializeWithTuneContext(const TuneContext& context) final; Array<tir::Schedule> Apply(const tir::Schedule& sch, const tir::BlockRV& block) final; ScheduleRule Clone() const final; static constexpr const char* _type_key = "meta_schedule.PyScheduleRule"; TVM_DECLARE_FINAL_OBJECT_INFO(PyScheduleRuleNode, ScheduleRuleNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SCHEDULE_RULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/search_strategy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SEARCH_STRATEGY_H_ #define TVM_META_SCHEDULE_SEARCH_STRATEGY_H_ #include <tvm/meta_schedule/arg_info.h> #include <tvm/meta_schedule/cost_model.h> #include <tvm/meta_schedule/database.h> #include <tvm/meta_schedule/measure_candidate.h> #include <tvm/meta_schedule/runner.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { // Forward declaration class TuneContext; class SearchStrategy; /*! * \brief The search strategy for measure candidates generation. * \note The relationship between SearchStrategy and other classes are as follows: β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Tune Context ───────────────────────────┐ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ β”‚ β”‚ β”‚ Space Generator β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β–Ό β”‚ β”‚ β”‚ β”‚ Design Space β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ Pretuning β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Search Strategy β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”œβ”€β”€β”˜ β””β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Managed By Task Scheduler ─────────────────────┐ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ Send to β”‚ β”‚ Send to β”‚ β”‚ β–Ό β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚ Builder β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ Measure Candidate β”‚ Builder β”‚ β”‚ Runner β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ └────►│ Task Scheduler β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Runner β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β–² β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ └─── Runner Future β—„β”€β”€β”€β”€β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ */ class SearchStrategyNode : public runtime::Object { public: /*! \brief Virtual destructor */ virtual ~SearchStrategyNode() = default; /*! * \brief Initialize the search strategy with tuning context. * \param context The tuning context for initialization. * \note This method is supposed to be called only once before every other method. */ virtual void InitializeWithTuneContext(const TuneContext& context) = 0; /*! * \brief Pre-tuning for the search strategy. * \param max_trials The maximum number of trials. * \param num_trials_per_iter The number of trials per iteration. * \param design_spaces The design spaces used during tuning process. * \param database The database used during tuning process. * \param cost_model The cost model used during tuning process. * \note Pre-tuning is supposed to be called before the tuning process and after the * initialization. Because the search strategy is stateful, we can always call pretuning * and reset the search strategy. */ virtual void PreTuning(int max_trials, int num_trials_per_iter, const Array<tir::Schedule>& design_spaces, const Optional<Database>& database, const Optional<CostModel>& cost_model) = 0; /*! * \brief Post-tuning for the search strategy. * \note Post-tuning is supposed to be called after the tuning process and before we reset the * search strategy with another pre-tuning. Post-tuning can be empty. */ virtual void PostTuning() = 0; /*! * \brief Generate measure candidates from design spaces for measurement. * \return The measure candidates generated, nullptr if finished. */ virtual Optional<Array<MeasureCandidate>> GenerateMeasureCandidates() = 0; /*! * \brief Update the search strategy with measurement results. * \param measure_candidates The candidates to be measured. * \param results The measurement results from the runner. */ virtual void NotifyRunnerResults(const Array<MeasureCandidate>& measure_candidates, const Array<RunnerResult>& results) = 0; /*! * \brief Clone the search strategy. * \return The cloned search strategy. */ virtual SearchStrategy Clone() const = 0; static constexpr const char* _type_key = "meta_schedule.SearchStrategy"; TVM_DECLARE_BASE_OBJECT_INFO(SearchStrategyNode, Object); }; /*! * \brief Managed reference to SearchStrategyNode. * \sa SearchStrategyNode */ class SearchStrategy : public runtime::ObjectRef { public: /*! * \brief The function type of `InitializeWithTuneContext` method. * \param context The tuning context for initialization. */ using FInitializeWithTuneContext = runtime::TypedPackedFunc<void(const TuneContext&)>; /*! * \brief The function type of `PreTuning` method. */ using FPreTuning = runtime::TypedPackedFunc<void( int max_trials, int num_trials_per_iter, const Array<tir::Schedule>&, const Optional<Database>&, const Optional<CostModel>&)>; /*! \brief The function type of `PostTuning` method. */ using FPostTuning = runtime::TypedPackedFunc<void()>; /*! * \brief The function type of `GenerateMeasureCandidates` method. * \return The measure candidates generated, nullptr if finished. */ using FGenerateMeasureCandidates = runtime::TypedPackedFunc<Optional<Array<MeasureCandidate>>()>; /*! * \brief The function type of `NotifyRunnerResults` method. * \param results The measurement results from the runner. */ using FNotifyRunnerResults = runtime::TypedPackedFunc<void(const Array<MeasureCandidate>&, const Array<RunnerResult>&)>; /*! * \brief The function type of `Clone` method. * \return The cloned search strategy. */ using FClone = runtime::TypedPackedFunc<SearchStrategy()>; /*! * \brief Create a search strategy with customized methods on the python-side. * \param f_initialize_with_tune_context The packed function of `InitializeWithTuneContext`. * \param f_pre_tuning The packed function of `PreTuning`. * \param f_post_tuning The packed function of `PostTuning`. * \param f_generate_measure_candidates The packed function of `GenerateMeasureCandidates`. * \param f_notify_runner_results The packed function of `NotifyRunnerResults`. * \param f_clone The packed function of `Clone`. * \return The search strategy created. */ TVM_DLL static SearchStrategy PySearchStrategy( FInitializeWithTuneContext f_initialize_with_tune_context, // FPreTuning f_pre_tuning, // FPostTuning f_post_tuning, // FGenerateMeasureCandidates f_generate_measure_candidates, // FNotifyRunnerResults f_notify_runner_results, // FClone f_clone); /*! * \brief Constructor of replay trace search strategy. * \param max_fail_count The max number of failures during trace replaying. */ TVM_DLL static SearchStrategy ReplayTrace(int max_fail_count); /*! \brief Constructor of replay func search strategy. */ TVM_DLL static SearchStrategy ReplayFunc(); /*! * \brief Constructor of evolutionary search strategy. * \param population_size The initial sample population. * \param init_measured_ratio The ratio of measures samples in initial population. * \param init_min_unmeasured The minimal size of unmeasured population in the initial sampling. * \param max_fail_count The max number of failure during initial sampling. * \param genetic_num_iters The iterations to run the genetic algorithm. * \param genetic_mutate_prob The probability of mutation. * \param genetic_max_fail_count The maximum number to try evolving the given trace. * \param eps_greedy The ratio to select samples in a greedy fashion via their predicted score. */ TVM_DLL static SearchStrategy EvolutionarySearch(int population_size, // double init_measured_ratio, // int init_min_unmeasured, // int max_fail_count, // int genetic_num_iters, // double genetic_mutate_prob, // int genetic_max_fail_count, // double eps_greedy); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(SearchStrategy, ObjectRef, SearchStrategyNode); }; /*! \brief The python side customizable class for measure candidate generation */ class PySearchStrategyNode : public SearchStrategyNode { public: using FInitializeWithTuneContext = SearchStrategy::FInitializeWithTuneContext; using FPreTuning = SearchStrategy::FPreTuning; using FPostTuning = SearchStrategy::FPostTuning; using FGenerateMeasureCandidates = SearchStrategy::FGenerateMeasureCandidates; using FNotifyRunnerResults = SearchStrategy::FNotifyRunnerResults; using FClone = SearchStrategy::FClone; /*! \brief The packed function to the `InitializeWithTuneContext` method. */ FInitializeWithTuneContext f_initialize_with_tune_context; /*! \brief The packed function to the `PreTuning` method. */ FPreTuning f_pre_tuning; /*! \brief The packed function to the `PostTuning` method. */ FPostTuning f_post_tuning; /*! \brief The packed function to the `GenerateMeasureCandidates` method. */ FGenerateMeasureCandidates f_generate_measure_candidates; /*! \brief The packed function to the `NotifyRunnerResults` method. */ FNotifyRunnerResults f_notify_runner_results; /*! \brief The packed function to the `Clone` method. */ FClone f_clone; void VisitAttrs(tvm::AttrVisitor* v) { // `f_initialize_with_tune_context` is not visited // `f_pre_tuning` is not visited // `f_post_tuning` is not visited // `f_generate_measure_candidates` is not visited // `f_notify_runner_results` is not visited // `f_clone` is not visited } void InitializeWithTuneContext(const TuneContext& context) final; void PreTuning(int max_trials, int num_trials_per_iter, const Array<tir::Schedule>& design_spaces, const Optional<Database>& database, const Optional<CostModel>& cost_model) final; void PostTuning() final; Optional<Array<MeasureCandidate>> GenerateMeasureCandidates() final; void NotifyRunnerResults(const Array<MeasureCandidate>& measure_candidates, const Array<RunnerResult>& results); SearchStrategy Clone() const final; static constexpr const char* _type_key = "meta_schedule.PySearchStrategy"; TVM_DECLARE_FINAL_OBJECT_INFO(PySearchStrategyNode, SearchStrategyNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SEARCH_STRATEGY_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/space_generator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_SPACE_GENERATOR_H_ #define TVM_META_SCHEDULE_SPACE_GENERATOR_H_ #include <tvm/ir/module.h> #include <tvm/meta_schedule/mutator.h> #include <tvm/meta_schedule/postproc.h> #include <tvm/meta_schedule/schedule_rule.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/target/target.h> #include <tvm/tir/schedule/schedule.h> namespace tvm { namespace meta_schedule { // Forward declaration class TuneContext; class SpaceGenerator; /*! * \brief The abstract class for design space generation. * \note The relationship between SpaceGenerator and other classes are as follows: β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Tune Context ───────────────────────────┐ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ β”‚ β”‚ β”‚ Space Generator β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β–Ό β”‚ β”‚ β”‚ β”‚ Design Space β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ Pretuning β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Search Strategy β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”œβ”€β”€β”˜ β””β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Managed By Task Scheduler ─────────────────────┐ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ Send to β”‚ β”‚ Send to β”‚ β”‚ β–Ό β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚ Builder β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ Measure Candidate β”‚ Builder β”‚ β”‚ Runner β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ └────►│ Task Scheduler β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Runner β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β–² β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ └─── Runner Future β—„β”€β”€β”€β”€β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ */ class SpaceGeneratorNode : public runtime::Object { public: /*! \brief The schedule rules. */ Optional<Array<ScheduleRule>> sch_rules; /*! \brief The postprocessors. */ Optional<Array<Postproc>> postprocs; /*! \brief The probability of using certain mutator. */ Optional<Map<Mutator, FloatImm>> mutator_probs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("sch_rules", &sch_rules); v->Visit("postprocs", &postprocs); v->Visit("mutator_probs", &mutator_probs); } /*! \brief Default destructor */ virtual ~SpaceGeneratorNode() = default; /*! * \brief Initialize the design space generator with tuning context. * \param context The tuning context for initialization. * \note This method is supposed to be called only once before every other method. */ virtual void InitializeWithTuneContext(const TuneContext& context); /*! * \brief Generate design spaces given a module. * \param mod The module used for design space generation. * \return The generated design spaces, i.e., schedules. */ virtual Array<tir::Schedule> GenerateDesignSpace(const IRModule& mod) = 0; /*! * \brief Clone the space generator. * \return The cloned space generator. */ virtual SpaceGenerator Clone() const = 0; static constexpr const char* _type_key = "meta_schedule.SpaceGenerator"; TVM_DECLARE_BASE_OBJECT_INFO(SpaceGeneratorNode, Object); }; /*! * \brief Managed reference to SpaceGeneratorNode. * \sa SpaceGeneratorNode */ class SpaceGenerator : public runtime::ObjectRef { public: /*! * \brief The function type of `InitializeWithTuneContext` method. * \param context The tuning context for initialization. */ using FInitializeWithTuneContext = runtime::TypedPackedFunc<void(const TuneContext&)>; /*! * \brief The function type of `GenerateDesignSpace` method. * \param mod The module used for design space generation. * \return The generated design spaces, i.e., schedules. */ using FGenerateDesignSpace = runtime::TypedPackedFunc<Array<tir::Schedule>(const IRModule&)>; /*! * \brief The function type of `Clone` method. * \return The cloned space generator. */ using FClone = runtime::TypedPackedFunc<SpaceGenerator()>; protected: SpaceGenerator() = default; public: /*! * \brief Create a design space generator with customized methods on the python-side. * \param sch_rules The schedule rules. * \param postprocs The postprocessors. * \param mutator_probs The probability of using certain mutator. * \param f_initialize_with_tune_context The packed function of `InitializeWithTuneContext`. * \param f_generate_design_space The packed function of `GenerateDesignSpace`. * \param f_clone The packed function of `Clone`. * \return The design space generator created. */ TVM_DLL static SpaceGenerator PySpaceGenerator( Optional<Array<ScheduleRule>> sch_rules, Optional<Array<Postproc>> postprocs, Optional<Map<Mutator, FloatImm>> mutator_probs, FInitializeWithTuneContext f_initialize_with_tune_context, FGenerateDesignSpace f_generate_design_space, FClone f_clone); /*! * \brief Create a design space generator with customized schedule function. * \param schedule_fn The schedule function, which can have the following signatures: * 1) void(Schedule) * 2) Schedule(Schedule) * 3) Array<Schedule>(Schedule) * \param sch_rules The schedule rules. * \param postprocs The postprocessors. * \param mutator_probs The probability of using certain mutator. */ TVM_DLL static SpaceGenerator ScheduleFn(PackedFunc schedule_fn, Optional<Array<ScheduleRule>> sch_rules, Optional<Array<Postproc>> postprocs, Optional<Map<Mutator, FloatImm>> mutator_probs); /*! * \brief Create a design space generator that is union of multiple design space generators. * \param space_generators An array of design space generators to be unioned. * \param sch_rules The schedule rules. * \param postprocs The postprocessors. * \param mutator_probs The probability of using certain mutator. * \return The design space generator created. */ TVM_DLL static SpaceGenerator SpaceGeneratorUnion(Array<SpaceGenerator, void> space_generators, Optional<Array<ScheduleRule>> sch_rules, Optional<Array<Postproc>> postprocs, Optional<Map<Mutator, FloatImm>> mutator_probs); /*! * \brief Create a design space generator that generates design spaces by applying schedule * rules to blocks in post-DFS order. * \param f_block_filter The filter function to filter blocks to be applied with schedule rules. * \param sch_rules The schedule rules. * \param postprocs The postprocessors. * \param mutator_probs The probability of using certain mutator. * \return The design space generator created. */ TVM_DLL static SpaceGenerator PostOrderApply(runtime::PackedFunc f_block_filter, Optional<Array<ScheduleRule>> sch_rules, Optional<Array<Postproc>> postprocs, Optional<Map<Mutator, FloatImm>> mutator_probs); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(SpaceGenerator, ObjectRef, SpaceGeneratorNode); }; /*! \brief The design space generator with customized methods on the python-side. */ class PySpaceGeneratorNode : public SpaceGeneratorNode { public: using FInitializeWithTuneContext = SpaceGenerator::FInitializeWithTuneContext; using FGenerateDesignSpace = SpaceGenerator::FGenerateDesignSpace; using FClone = SpaceGenerator::FClone; /*! \brief The packed function to the `InitializeWithTuneContext` function. */ FInitializeWithTuneContext f_initialize_with_tune_context; /*! \brief The packed function to the `GenerateDesignSpace` function. */ FGenerateDesignSpace f_generate_design_space; /*! \brief The packed function to the `Clone` function. */ FClone f_clone; void VisitAttrs(tvm::AttrVisitor* v) { SpaceGeneratorNode::VisitAttrs(v); // `f_initialize_with_tune_context` is not visited // `f_generate_design_space` is not visited // `f_clone` is not visited } void InitializeWithTuneContext(const TuneContext& context) final; Array<tir::Schedule> GenerateDesignSpace(const IRModule& mod) final; SpaceGenerator Clone() const final; static constexpr const char* _type_key = "meta_schedule.PySpaceGenerator"; TVM_DECLARE_FINAL_OBJECT_INFO(PySpaceGeneratorNode, SpaceGeneratorNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_SPACE_GENERATOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/task_scheduler.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_TASK_SCHEDULER_H_ #define TVM_META_SCHEDULE_TASK_SCHEDULER_H_ #include <tvm/meta_schedule/builder.h> #include <tvm/meta_schedule/cost_model.h> #include <tvm/meta_schedule/measure_callback.h> #include <tvm/meta_schedule/runner.h> #include <tvm/meta_schedule/tune_context.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/support/random_engine.h> #include <string> #include <vector> namespace tvm { namespace meta_schedule { class TaskRecordNode : public runtime::Object { public: /*! \brief The tune context of the task. */ TuneContext ctx{nullptr}; /*! \brief The weight of the task */ double task_weight{1.0}; /*! \brief The FLOP count of the task */ double flop{1.0}; /*! \brief Whether the tuning task has been stopped or finished. */ bool is_terminated = false; /*! \brief Builder errors happens in the task */ int build_error_count = 0; /*! \brief Runner errors happens in the task */ int run_error_count = 0; /*! \brief The latency of each run, in milliseconds. */ std::vector<double> latency_ms = {}; /*! \brief The measure candidates. */ Optional<Array<MeasureCandidate>> measure_candidates = NullOpt; /*! \brief The building results. */ Optional<Array<BuilderResult>> builder_results = NullOpt; /*! \brief Packed functions to fetch the runner results asynchronously. */ Optional<Array<RunnerFuture>> runner_futures = NullOpt; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("ctx", &ctx); v->Visit("task_weight", &task_weight); v->Visit("flop", &flop); v->Visit("is_terminated", &is_terminated); v->Visit("build_error_count", &build_error_count); v->Visit("run_error_count", &run_error_count); // `latency_ms` is not visited v->Visit("measure_candidates", &measure_candidates); v->Visit("builder_results", &builder_results); v->Visit("runner_futures", &runner_futures); } static constexpr const char* _type_key = "meta_schedule.TaskRecord"; TVM_DECLARE_FINAL_OBJECT_INFO(TaskRecordNode, Object); }; /*! * \brief Managed reference to TaskRecordNode. * \sa TaskRecordNode */ class TaskRecord : public runtime::ObjectRef { public: /*! \brief Constructor */ explicit TaskRecord(TuneContext task, double task_weight); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(TaskRecord, ObjectRef, TaskRecordNode); }; /*! * \brief The abstract interface of task schedulers. * \note The relationship between SpaceGenerator and other classes are as follows: β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”Œβ”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Tune Context ───────────────────────────┐ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ β”‚ β”‚ β”‚ Space Generator β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β–Ό β”‚ β”‚ β”‚ β”‚ Design Space β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ Generate β”‚ β”‚ Pretuning β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Search Strategy β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”œβ”€β”€β”˜ β””β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ Managed By Task Scheduler ─────────────────────┐ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ Send to β”‚ β”‚ Send to β”‚ β”‚ β–Ό β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚ Builder β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ Measure Candidate β”‚ Builder β”‚ β”‚ Runner β”‚ β”‚ β”‚ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ β”‚ └────►│ Task Scheduler β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ Runner β”‚β—„β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β–² β””β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ └─── Runner Future β—„β”€β”€β”€β”€β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ */ class TaskSchedulerNode : public runtime::Object { public: /*! \brief The tuning task's logging function. */ PackedFunc logger; /*! \brief Records for each task */ Array<TaskRecord> tasks_; /*! \brief The list of measure callbacks of the scheduler. */ Array<MeasureCallback> measure_callbacks_; /*! \brief The database used in tuning */ Optional<Database> database_; /*! \brief The cost model used in tuning */ Optional<CostModel> cost_model_; /*! \brief The number of remaining tasks to be tuned. */ int remaining_tasks_; /*! \brief The default destructor. */ virtual ~TaskSchedulerNode() = default; void VisitAttrs(tvm::AttrVisitor* v) { // `logger` is not visited v->Visit("tasks_", &tasks_); v->Visit("measure_callbacks_", &measure_callbacks_); v->Visit("database_", &database_); v->Visit("cost_model_", &cost_model_); v->Visit("remaining_tasks_", &remaining_tasks_); } /*! * \brief Fetch the next task id. * \return The next task id. */ virtual int NextTaskId() = 0; /*! * \brief Wait until the task is finished. * \param task_id The task id to be joined. * \return The results from the runner. */ virtual Array<RunnerResult> JoinRunningTask(int task_id); /*! * \brief Jointly tune a given list of tasks. * \param tasks The tasks to be tuned * \param task_weights The weight of each task * \param max_trials_global The maximum number of trials to be performed globally * \param max_trials_per_task The maximum number of trials to be performed for each task * \param num_trials_per_iter The number of trials to be performed in each iteration * \param builder The MetaSchedule builder * \param runner The MetaSchedule runner * \param measure_callbacks The callbacks to be called after each measurement * \param database The database used in tuning * \param cost_model The cost model used in tuning */ virtual void Tune(Array<TuneContext> tasks, // Array<FloatImm> task_weights, // int max_trials_global, // int max_trials_per_task, // int num_trials_per_iter, // Builder builder, // Runner runner, // Array<MeasureCallback> measure_callbacks, // Optional<Database> database, // Optional<CostModel> cost_model); /*! * \brief Terminate a task * \param task_id The id of the task to be terminated */ void TerminateTask(int task_id); /*! * \brief Touch the task and update its status * \param task_id The task id to be checked. */ void TouchTask(int task_id); /*! \brief Print out a human-readable format of the tuning statistics. */ void PrintTuningStatistics(); static constexpr const char* _type_key = "meta_schedule.TaskScheduler"; TVM_DECLARE_BASE_OBJECT_INFO(TaskSchedulerNode, Object); }; class TaskScheduler; /*! \brief The task scheduler with customized methods on the python-side. */ class PyTaskSchedulerNode : public TaskSchedulerNode { public: /*! * \brief The function type of `NextTaskId` method. * \return The next task id. */ using FNextTaskId = runtime::TypedPackedFunc<int()>; /*! * \brief The function type of `JoinRunningTask` method. * \param task_id The task id to be joined. */ using FJoinRunningTask = runtime::TypedPackedFunc<Array<RunnerResult>(int)>; /*! \brief The function type of `Tune` method. */ using FTune = runtime::TypedPackedFunc<void(Array<TuneContext> tasks, // Array<FloatImm> task_weights, // int max_trials_global, // int max_trials_per_task, // int num_trials_per_iter, // Builder builder, // Runner runner, // Array<MeasureCallback> measure_callbacks, // Optional<Database> database, // Optional<CostModel> cost_model)>; /*! \brief The packed function to the `NextTaskId` function. */ FNextTaskId f_next_task_id; /*! \brief The packed function to the `JoinRunningTask` function. */ FJoinRunningTask f_join_running_task; /*! \brief The packed function to the `Tune` function. */ FTune f_tune; void VisitAttrs(tvm::AttrVisitor* v) { TaskSchedulerNode::VisitAttrs(v); // `f_next_task_id` is not visited // `f_join_running_task` is not visited // `f_tune` is not visited } int NextTaskId() final; Array<RunnerResult> JoinRunningTask(int task_id) final; void Tune(Array<TuneContext> tasks, Array<FloatImm> task_weights, int max_trials_global, int max_trials_per_task, int num_trials_per_iter, Builder builder, Runner runner, Array<MeasureCallback> measure_callbacks, Optional<Database> database, Optional<CostModel> cost_model) final; static constexpr const char* _type_key = "meta_schedule.PyTaskScheduler"; TVM_DECLARE_FINAL_OBJECT_INFO(PyTaskSchedulerNode, TaskSchedulerNode); }; /*! * \brief Managed reference to TaskSchedulerNode. * \sa TaskSchedulerNode */ class TaskScheduler : public runtime::ObjectRef { public: /*! * \brief Create a task scheduler that fetches tasks in a round-robin fashion. * \param logger The tuning task's logging function. * \return The task scheduler created. */ TVM_DLL static TaskScheduler RoundRobin(PackedFunc logger); /*! * \brief Create a task scheduler that fetches tasks in a gradient based fashion. * \param logger The tuning task's logging function. * \param alpha The parameter alpha to control gradient computation. * \param window_size The parameter to control backward window size. * \param seed The random seed. * \return The task scheduler created. */ TVM_DLL static TaskScheduler GradientBased(PackedFunc logger, double alpha, int window_size, support::LinearCongruentialEngine::TRandState seed); /*! * \brief Create a task scheduler with customized methods on the python-side. * \param logger The tuning task's logging function. * \param f_next_task_id The packed function of `NextTaskId`. * \param f_join_running_task The packed function of `JoinRunningTask`. * \param f_tune The packed function of `Tune`. * \return The task scheduler created. */ TVM_DLL static TaskScheduler PyTaskScheduler( PackedFunc logger, PyTaskSchedulerNode::FNextTaskId f_next_task_id, PyTaskSchedulerNode::FJoinRunningTask f_join_running_task, PyTaskSchedulerNode::FTune f_tune); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(TaskScheduler, ObjectRef, TaskSchedulerNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_TASK_SCHEDULER_H_
https://github.com/zk-ml/tachikoma
include/tvm/meta_schedule/tune_context.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_META_SCHEDULE_TUNE_CONTEXT_H_ #define TVM_META_SCHEDULE_TUNE_CONTEXT_H_ #include <tvm/ir/expr.h> #include <tvm/ir/module.h> #include <tvm/meta_schedule/builder.h> #include <tvm/meta_schedule/runner.h> #include <tvm/meta_schedule/search_strategy.h> #include <tvm/meta_schedule/space_generator.h> #include <tvm/node/reflection.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/support/random_engine.h> #include <tvm/target/target.h> namespace tvm { namespace meta_schedule { class TaskSchedulerNode; class MeasureCallback; class TuneContext; /*! \brief The auto tuning context. */ class TuneContextNode : public runtime::Object { public: using TRandState = support::LinearCongruentialEngine::TRandState; /*! \brief The workload to be tuned. */ Optional<IRModule> mod; /*! \brief The target to be tuned for. */ Optional<Target> target; /*! \brief The design space generator. */ Optional<SpaceGenerator> space_generator; /*! \brief The search strategy. */ Optional<SearchStrategy> search_strategy; /*! \brief The name of the tuning task. */ Optional<String> task_name; /*! \brief The number of threads to be used. */ int num_threads; /*! \brief The random state. */ TRandState rand_state; /*! \brief The tuning task's logging function. t*/ PackedFunc logger; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("mod", &mod); v->Visit("target", &target); v->Visit("space_generator", &space_generator); v->Visit("search_strategy", &search_strategy); v->Visit("task_name", &task_name); v->Visit("num_threads", &num_threads); v->Visit("rand_state", &rand_state); // `logger` is not visited } /*! * \brief Initialize members that needs initialization with tune context. */ void Initialize(); /*! * \brief Clone the tune context. * \return The cloned tune context. */ TuneContext Clone() const; static constexpr const char* _type_key = "meta_schedule.TuneContext"; TVM_DECLARE_FINAL_OBJECT_INFO(TuneContextNode, Object); }; /*! * \brief Managed reference to TuneContextNode. * \sa TuneContextNode */ class TuneContext : public runtime::ObjectRef { public: using TRandState = support::LinearCongruentialEngine::TRandState; /*! * \brief Constructor. * \param mod The workload to be tuned. * \param target The target to be tuned for. * \param space_generator The design space generator. * \param search_strategy The search strategy. * \param task_name The name of the tuning task. * \param num_threads The number of threads to be used. * \param rand_state The random state. * \param logger The tuning task's logging function. */ TVM_DLL explicit TuneContext(Optional<IRModule> mod, Optional<Target> target, Optional<SpaceGenerator> space_generator, Optional<SearchStrategy> search_strategy, Optional<String> task_name, int num_threads, TRandState rand_state, PackedFunc logger); TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(TuneContext, ObjectRef, TuneContextNode); }; } // namespace meta_schedule } // namespace tvm #endif // TVM_META_SCHEDULE_TUNE_CONTEXT_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/attr_registry_map.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/attr_registry_map.h * \brief Attribute map used in registry. */ #ifndef TVM_NODE_ATTR_REGISTRY_MAP_H_ #define TVM_NODE_ATTR_REGISTRY_MAP_H_ #include <tvm/runtime/container/string.h> #include <utility> #include <vector> namespace tvm { /*! * \brief Generic attribute map. * \tparam KeyType the type of the key. */ template <typename KeyType> class AttrRegistryMapContainerMap { public: /*! * \brief Check if the map has key. * \param key The key to the map * \return 1 if key is contained in map, 0 otherwise. */ int count(const KeyType& key) const { if (key.defined()) { const uint32_t idx = key->AttrRegistryIndex(); return idx < data_.size() ? (data_[idx].second != 0) : 0; } else { return 0; } } /*! * \brief get the corresponding value element at key. * \param key The key to the map * \return the const reference to the content value. */ const runtime::TVMRetValue& operator[](const KeyType& key) const { ICHECK(key.defined()); const uint32_t idx = key->AttrRegistryIndex(); ICHECK(idx < data_.size() && data_[idx].second != 0) << "Attribute " << attr_name_ << " has not been registered for " << key->name; return data_[idx].first; } /*! * \brief get the corresponding value element at key with default value. * \param key The key to the map * \param def_value The default value when the key does not exist. * \return the const reference to the content value. * \tparam ValueType The content value type. */ template <typename ValueType> ValueType get(const KeyType& key, ValueType def_value) const { ICHECK(key.defined()); const uint32_t idx = key->AttrRegistryIndex(); if (idx < data_.size() && data_[idx].second != 0) { return data_[idx].first; } else { return def_value; } } private: /*! \brief The name of the attr field */ String attr_name_; /*! \brief The internal data. */ std::vector<std::pair<runtime::TVMRetValue, int>> data_; /*! \brief The constructor */ AttrRegistryMapContainerMap() = default; template <typename, typename> friend class AttrRegistry; friend class OpRegEntry; }; /*! * \brief Map<Key, ValueType> used to store meta-data. * \tparam KeyType The type of the key * \tparam ValueType The type of the value stored in map. */ template <typename KeyType, typename ValueType> class AttrRegistryMap { public: /*! * \brief constructor * \param map The internal map. */ explicit AttrRegistryMap(const AttrRegistryMapContainerMap<KeyType>& map) : map_(map) {} /*! * \brief Check if the map has op as key. * \param key The key to the map * \return 1 if op is contained in map, 0 otherwise. */ int count(const KeyType& key) const { return map_.count(key); } /*! * \brief get the corresponding value element at key. * \param key The key to the map * \return the const reference to the content value. */ ValueType operator[](const KeyType& key) const { return map_[key]; } /*! * \brief get the corresponding value element at key with default value. * \param key The key to the map * \param def_value The default value when the key does not exist. * \return the const reference to the content value. */ ValueType get(const KeyType& key, ValueType def_value) const { return map_.get(key, def_value); } protected: /*! \brief The internal map field */ const AttrRegistryMapContainerMap<KeyType>& map_; }; } // namespace tvm #endif // TVM_NODE_ATTR_REGISTRY_MAP_H_
https://github.com/zk-ml/tachikoma