file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-error,too-many-locals,too-many-statements,too-many-branches,unused-variable
"""Dynamic programming tuner."""
import sys
import numpy as np
from ._base import MAX_OUTPUT_NODES
from .base_graph_tuner import BaseGraphTuner
from .dynamic_programming_stage import DPStage
from .utils import has_multiple_inputs, is_boundary_node
if sys.version_info[0] == 3:
import queue
else:
import Queue as queue
class DPTuner(BaseGraphTuner):
"""Tuner which uses dynamic programming to solve MDP problem.
Note: currently dynamic programming is used to solve this MDP problem. However,
this problem is intrinsically non-polynomial. DP can't apply for more complicated
models, such as networks with many element-wise sum operators. In this case, switch
to heuristic algorithm such as PBQP tuner.
"""
def __init__(self, *args, **kwargs):
"""Create a dynamic programming tuner."""
super(DPTuner, self).__init__(*args, **kwargs)
self._num_states = self._max_num_states = None
self._stage_dict = {}
self._dep_dict = {}
self._counted_nodes_set = set()
self._global_data_dict = {
"dtype": self._dtype,
"counted_nodes_set": self._counted_nodes_set,
"stage_dict": self._stage_dict,
"in_nodes_dict": self._in_nodes_dict,
"out_nodes_dict": self._out_nodes_dict,
"dep_dict": self._dep_dict,
"node_list": self._node_list,
"input_shapes": self._input_shapes,
"layout_transform_interlayer_cost": self._layout_transform_interlayer_cost,
}
def _check_num_states(self, num_states):
"""Track the number of states."""
self._num_states += num_states
if self._max_num_states is not None:
if self._num_states > self._max_num_states:
raise RuntimeError(
"Too many states detected while running dynamic "
"programming: got %d states but upper limit is %d."
% (self._num_states, self._max_num_states)
)
def _forward(self):
"""Forward pass in DP to generate states for all stages."""
self._logger.info("Start forward pass...")
for node_idx in sorted(self._in_nodes_dict.keys()):
stage = DPStage(idx=node_idx, target_ops=self._target_ops, **self._global_data_dict)
self._check_num_states(stage.full_states.size)
self._stage_dict[node_idx] = stage
self._logger.info("Finished forward pass.")
def _backward(self):
"""Backward pass in DP to generate optimal solution."""
self._logger.info("Start backward pass...")
input_names = self._input_shapes.keys()
optimal_record_dict = {}
# Pick optimal schedule for output nodes
output_idx_list = []
for key, val in self._out_nodes_dict.items():
if not val:
output_idx_list.append(key)
# Restrict number of output nodes to avoid numpy reshape error
if len(output_idx_list) > MAX_OUTPUT_NODES:
msg = (
"The number of outputs in graph is larger than upper "
"limit: %s vs %s. Usually this is caused by too many "
"LAYOUT_FIXED_OP in graph. Switch to greedily select schedule."
"No action required at this moment. We will continuously improve graph tuner"
% (len(output_idx_list), MAX_OUTPUT_NODES)
)
self._logger.warning(msg)
self._optimal_record_dict = {key: 0 for key in self._in_nodes_dict}
return
states_list, aligned_node_list = DPStage.align_states(
output_idx_list, self._stage_dict, self._node_list
)
num_states = states_list[0][3].size
self._check_num_states(num_states * len(output_idx_list))
aligned_node_shape = states_list[0][3].shape
min_time = 0
min_pos = -1
for states in states_list:
min_time += np.amax(states[3])
flatten_states_list = [current_states[3].flatten() for current_states in states_list]
for i in range(num_states):
current_time = 0
for j, current_states in enumerate(states_list):
current_time += flatten_states_list[j][i]
if min_time > current_time:
min_time = current_time
min_pos = i
for i, states in enumerate(states_list):
current_major_axis = states[1]
current_sch_idx = (
min_pos % (states[2] * aligned_node_shape[current_major_axis])
) // states[2]
optimal_record_dict[aligned_node_list[i]] = current_sch_idx
# Pick optimal schedule for dependencies of output nodes
for i in range(len(states_list), len(aligned_node_list)):
multiplier = 1
for j in range(i + 1, len(aligned_node_list)):
multiplier *= aligned_node_shape[j]
optimal_record_dict[aligned_node_list[i]] = (
min_pos // multiplier % aligned_node_shape[i]
)
# Backward pass to get optimal schedules for other nodes
bfs_q = queue.Queue()
visited = set()
for out_idx in output_idx_list:
bfs_q.put(out_idx)
while not bfs_q.empty():
node_idx = bfs_q.get()
visited.add(node_idx)
node = self._node_list[node_idx]
if is_boundary_node(node, input_names):
continue
optimal_sch_idx = optimal_record_dict[node_idx]
full_states = self._stage_dict[node_idx].full_states
if not has_multiple_inputs(self._node_list, node_idx, input_names, self._opt_out_op):
input_idx = self._in_nodes_dict[node_idx][0]
input_node = self._node_list[input_idx]
if is_boundary_node(input_node, input_names):
continue
if input_idx not in visited:
bfs_q.put(input_idx)
if input_idx not in optimal_record_dict:
dep_list = self._stage_dict[node_idx].dep
dep_idx = tuple([optimal_record_dict[item] for item in dep_list])
tmp = np.argmin(full_states, axis=1)
optimal_input_sch_idx = tmp[(optimal_sch_idx,) + dep_idx]
optimal_record_dict[input_idx] = optimal_input_sch_idx
else:
input_idx_list = self._in_nodes_dict[node_idx]
optimal_record_dict[input_idx_list[0]] = optimal_sch_idx
full_states_idx = self._stage_dict[node_idx].full_states_idx
tmp = full_states[optimal_sch_idx]
new_states_idx, new_states_pos = [], []
visited_states_idx, visited_states_pos = [], []
for i in range(1, len(full_states_idx)):
if full_states_idx[i] in optimal_record_dict:
visited_states_idx.append(full_states_idx[i])
visited_states_pos.append(i - 1)
else:
new_states_idx.append(full_states_idx[i])
new_states_pos.append(i - 1)
if visited_states_idx:
tmp = np.transpose(tmp, tuple(visited_states_pos + new_states_pos))
tmp = tmp[tuple([optimal_record_dict[idx] for idx in visited_states_idx])]
min_pos = np.argmin(tmp)
multiplier = 1
for i in range(len(new_states_idx)):
multiplier *= full_states.shape[new_states_pos[i] + 1]
for pos, idx in zip(new_states_pos, new_states_idx):
multiplier //= full_states.shape[pos + 1]
optimal_record_dict[idx] = min_pos // multiplier
min_pos %= multiplier
for input_idx in input_idx_list:
if input_idx not in visited:
bfs_q.put(input_idx)
self._optimal_record_dict = optimal_record_dict
for node_idx, _ in self._in_nodes_dict.items():
if self._node_list[node_idx]["op"] not in self._target_ops:
continue
self._logger.info("Finished backward pass...")
def run(self, **kwargs):
"""Run dynamic programming solver."""
max_num_states = None if "max_num_states" not in kwargs else kwargs["max_num_states"]
self._num_states = 0
self._max_num_states = max_num_states
self._logger.info("Start to run dynamic programming algorithm...")
self._forward()
self._backward()
self._logger.info("Finished DPExecutor run.")
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/pbqp_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals
"""Partitioned Boolean Quadratic Programming Tuner"""
from ._base import INVALID_LAYOUT_TIME
from .base_graph_tuner import BaseGraphTuner
from .utils import is_boundary_node, has_multiple_inputs
class PBQPTuner(BaseGraphTuner):
"""An approximation method to deal with intractably
large size of graph tuning problem.
This graph coloring algorithm mainly comes from:
Lang Hames and Bernhard Scholz.
Nearly optimal register allocation with pbqp.JMLC 2006.
LNCS, vol.4228,pp. 346-361, 2016
"""
def __init__(self, *args, **kwargs):
"""Create a partitioned boolean quadratic programming tuner."""
super(PBQPTuner, self).__init__(*args, **kwargs)
# Remove input and ruled_out nodes
input_names = self._input_shapes.keys()
for node_idx in self._out_nodes_dict:
node = self._node_list[node_idx]
if is_boundary_node(node, input_names):
for out_node_idx in self._out_nodes_dict[node_idx]:
self._in_nodes_dict[out_node_idx].remove(node_idx)
self._adj_dict = {}
for node_idx in self._in_nodes_dict:
self._adj_dict[node_idx] = list(self._in_nodes_dict[node_idx]) + list(
self._out_nodes_dict[node_idx]
)
self._record_cost_dict = {}
for key in self._in_nodes_dict:
self._record_cost_dict[key] = []
for record in self._node_list[key]["record_candidates"]:
self._record_cost_dict[key].append(record[1].costs[0])
self._max_degree = -1
self._node_degree_dict = {}
for node_idx in self._in_nodes_dict:
node_degree = self._get_degree(node_idx)
self._node_degree_dict[node_idx] = node_degree
self._max_degree = max(self._max_degree, node_degree)
self._stack = []
self._buckets = [[] for _ in range(self._max_degree + 2)]
for node_idx in sorted(self._in_nodes_dict):
node_degree = self._get_degree(node_idx)
self._buckets[node_degree].append(node_idx)
self._is_optimal = True
def _get_degree(self, node_idx):
"""Get node degree."""
return len(self._adj_dict[node_idx])
def _reorder_adj_nodes(self, node_idx):
"""Update buckets list with current adjacency list."""
for adj_node in self._adj_dict[node_idx]:
current_degree = self._get_degree(adj_node)
prev_degree = self._node_degree_dict[adj_node]
if prev_degree != current_degree:
self._buckets[prev_degree].remove(adj_node)
self._buckets[current_degree].insert(0, adj_node)
self._node_degree_dict[adj_node] = current_degree
def _remove_node(self, node_idx):
"""Remove node from graph. Update adjacency list accordingly."""
node_degree = self._get_degree(node_idx)
self._buckets[node_degree].remove(node_idx)
for adj_node in self._adj_dict[node_idx]:
self._adj_dict[adj_node].remove(node_idx)
def _insert_edge(self, node_x, node_y, adj_cost_matrix):
"""Insert an edge between two nodes."""
self._layout_transform_interlayer_cost[(node_x, node_y)] = adj_cost_matrix
self._layout_transform_interlayer_cost[(node_y, node_x)] = []
for i in range(len(adj_cost_matrix[0])):
self._layout_transform_interlayer_cost[(node_y, node_x)].append([])
for cost_vec in adj_cost_matrix:
self._layout_transform_interlayer_cost[(node_y, node_x)][i].append(cost_vec[i])
self._adj_dict[node_x].append(node_y)
self._adj_dict[node_y].append(node_x)
def _backward_insert_node(self, node_idx):
"""Reinsert node in backward pass."""
for adj_node in self._adj_dict[node_idx]:
self._adj_dict[adj_node].append(node_idx)
def _RI_reduction(self, node_idx):
"""Reduce nodes with degree 1."""
adj_node = self._adj_dict[node_idx][0]
ltf_matrix = self._layout_transform_interlayer_cost[(adj_node, node_idx)]
for i, cost_vec in enumerate(ltf_matrix):
min_cost = INVALID_LAYOUT_TIME
for j, cost in enumerate(cost_vec):
min_cost = min(min_cost, cost + self._record_cost_dict[node_idx][j])
self._record_cost_dict[adj_node][i] += min_cost
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _RII_reduction(self, node_idx):
"""Reduce nodes with degree 2."""
adj_node_x, adj_node_y = self._adj_dict[node_idx]
ltf_matrix_x = self._layout_transform_interlayer_cost[(adj_node_x, node_idx)]
ltf_matrix_y = self._layout_transform_interlayer_cost[(adj_node_y, node_idx)]
delta_matrix = [[] for _ in range(len(ltf_matrix_x))]
for i, cost_vec_x in enumerate(ltf_matrix_x):
for j, cost_vec_y in enumerate(ltf_matrix_y):
min_cost = INVALID_LAYOUT_TIME
for k in range(len(self._record_cost_dict[node_idx])):
min_cost = min(
min_cost,
cost_vec_x[k] + cost_vec_y[k] + self._record_cost_dict[node_idx][k],
)
delta_matrix[i].append(min_cost)
if adj_node_x == adj_node_y:
for i, delta_row in enumerate(delta_matrix):
self._record_cost_dict[adj_node_x][i] += delta_row[i]
elif adj_node_x in self._adj_dict[adj_node_y]:
for i, _ in enumerate(delta_matrix):
for j, delta in enumerate(delta_matrix[i]):
self._layout_transform_interlayer_cost[(adj_node_x, adj_node_y)][i][j] += delta
self._layout_transform_interlayer_cost[(adj_node_y, adj_node_x)][j][i] += delta
else:
self._insert_edge(adj_node_x, adj_node_y, delta_matrix)
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _RN_reduction(self, node_idx):
"""Reduce nodes with degree greater than 2."""
min_cost = INVALID_LAYOUT_TIME
record_idx = -1
for i, record_cost in enumerate(self._record_cost_dict[node_idx]):
current_cost = record_cost
for adj_node in self._adj_dict[node_idx]:
ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)]
adj_record_cost = list(self._record_cost_dict[adj_node])
for j, ltf_cost in enumerate(ltf_matrix[i]):
adj_record_cost[j] += ltf_cost
current_cost += min(adj_record_cost)
if current_cost < min_cost:
min_cost = current_cost
record_idx = i
if record_idx < 0:
raise RuntimeError(
"Can't find a soltuion for node %d when " "applying RN reduction" % node_idx
)
self._optimal_record_dict[node_idx] = record_idx
self._is_optimal = False
for adj_node in self._adj_dict[node_idx]:
ltf_matrix = self._layout_transform_interlayer_cost[(node_idx, adj_node)]
for i, ltf_cost in enumerate(ltf_matrix[record_idx]):
self._record_cost_dict[adj_node][i] += ltf_cost
self._remove_node(node_idx)
self._reorder_adj_nodes(node_idx)
self._stack.append(node_idx)
def _forward(self):
"""Forward pass in PBQP to reduce nodes."""
while True:
if self._buckets[1]:
node_idx = self._buckets[1][0]
self._RI_reduction(node_idx)
elif self._max_degree >= 2 and self._buckets[2]:
node_idx = self._buckets[2][0]
self._RII_reduction(node_idx)
elif self._max_degree >= 3:
max_degree_node = -1
for i in range(self._max_degree, 2, -1):
if self._buckets[i]:
max_degree_node = self._buckets[i][0]
self._RN_reduction(max_degree_node)
break
if max_degree_node < 0:
break
else:
break
def _backward(self):
"""Backward pass in PBQP to generate optimal solution."""
# Solve nodes left in the forward graph
for node_idx in self._buckets[0]:
record_costs = self._record_cost_dict[node_idx]
min_cost = min(record_costs)
self._optimal_record_dict[node_idx] = record_costs.index(min_cost)
# Solve nodes with one or two degrees
for node_idx in reversed(self._stack):
self._backward_insert_node(node_idx)
if node_idx not in self._optimal_record_dict:
record_costs = list(self._record_cost_dict[node_idx])
for adj_node in self._adj_dict[node_idx]:
adj_optimal_idx = self._optimal_record_dict[adj_node]
for i, _ in enumerate(record_costs):
record_costs[i] += self._layout_transform_interlayer_cost[
(node_idx, adj_node)
][i][adj_optimal_idx]
min_cost = min(record_costs)
self._optimal_record_dict[node_idx] = record_costs.index(min_cost)
def run(self, **kwargs):
"""Run partitioned boolean quadratic programming tuner."""
self._logger.info("Start to run PBQP algorithm...")
# Define virtual record lists and layout transformaton matrices
# for multi-input nodes.
input_names = self._input_shapes.keys()
temp = {}
for key, val in self._in_nodes_dict.items():
target_input_idx = -1
target_input_pos = -1
if has_multiple_inputs(self._node_list, key, input_names, self._opt_out_op):
for i, item in enumerate(val):
node = self._node_list[item]
if not is_boundary_node(node, input_names):
target_input_idx = item
target_input_pos = i
break
# Skip boundary operator
if target_input_idx < 0:
continue
temp[(target_input_idx, key)] = []
record_candidates = self._node_list[target_input_idx]["record_candidates"]
for j in range(len(record_candidates)):
temp[(target_input_idx, key)].append([])
for k in range(len(record_candidates)):
temp[(target_input_idx, key)][j].append(
0 if j == k else INVALID_LAYOUT_TIME
)
for j in range(target_input_pos + 1, len(val)):
input_idx = val[j]
input_node = self._node_list[input_idx]
if is_boundary_node(input_node, input_names):
continue
temp[(input_idx, key)] = self._layout_transform_interlayer_cost[
(input_idx, target_input_idx)
]
self._layout_transform_interlayer_cost.update(temp)
# Create reverse layout transformation matrices
temp = {}
for idx_pair, ltf_matrix in self._layout_transform_interlayer_cost.items():
reverse_key = (idx_pair[1], idx_pair[0])
reverse_matrix = [[] for _ in range(len(ltf_matrix[0]))]
for i, _ in enumerate(ltf_matrix):
for j, ltf in enumerate(ltf_matrix[i]):
reverse_matrix[j].append(ltf)
temp[reverse_key] = reverse_matrix
self._layout_transform_interlayer_cost.update(temp)
self._forward()
self._backward()
is_optimal = "optimal" if self._is_optimal else "sub-optimal"
msg = "Finished PBQPExecutor run. Got %s solution." % is_optimal
self._logger.info(msg)
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Graph tuner utility functions"""
from __future__ import absolute_import
from . import traverse_graph
from . import utils
from .traverse_graph import expr2graph, get_direct_ancestor, get_in_nodes, get_out_nodes
from .utils import has_multiple_inputs, is_boundary_node, bind_inputs
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/utils/traverse_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access
"""API for graph traversing."""
import threading
import re
import tvm
from tvm import relay, autotvm
from tvm.relay import transform
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
from tvm.relay.function import Function
from tvm.relay.ty import TupleType, TensorType
from tvm.autotvm.task import TaskExtractEnv
from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node
from .._base import OPT_OUT_OP
def expr2graph(expr, target_ops, node_dict, node_list, tvm_target):
"""Convert relay expr to graph data structure
and fetch workloads of target operators.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
target_ops: List of tvm.ir.Op
List of target relay ops
node_dict : dictionary from tvm.relay.Expr to int
Dictionary to record node index
node_list : list of dictionary
List of nodes which contains all expr in the input relay function.
Each node will be stored as a dictionary in the format of
{"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type],
"name": str, "workloads": [tuple], "topi_op": [function]}
tvm_target : tvm.target
The TVM target object.
"""
# TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact
# that # autotvm tasks == # ops. But this won't be true after having relay op
# strategy. We need to find a solution to fix this.
env = TaskExtractEnv.get(allow_duplicate=True)
env.reset(target_ops)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target)
task_pos = 0
for node_entry in node_list:
if node_entry["op"] in target_ops:
task_name, args = env.task_collection[task_pos]
task = autotvm.task.create(task_name, args, target=tvm_target)
node_entry["workloads"] = [task.workload]
node_entry["topi_op"] = [task_name]
task_pos += 1
def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def _replace_device_with_tracing(target):
"""This is to replace -device=XXX with -device=tracing in the tvm_target string.
It is a stand-along function for testability.
We need to have device=tracing in order to fetch the workloads, it is not used
for anything beyond that so it is safe to override the device here only."""
target = str(target)
if "-device" in target:
return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ")
return target + " -device=tracing"
def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target):
"""Implementation to convert relay expr to graph data structure"""
def _traverse_expr(node):
if node in node_dict:
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None}
if isinstance(node, Call):
op = node.op
node_entry["op"] = node.op
for arg in node.args:
in_node_idx = node_dict[arg]
if isinstance(arg, (Tuple, TupleGetItem)):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
infer_out = _infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
node_entry["types"].append(out_type)
elif isinstance(out_type, TupleType):
for tupe_type in out_type.fields:
node_entry["types"].append(tupe_type)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), op.name)
)
# Utilize tracing target to fetch workload with topo-order.
# Since we only need workload, dummy target can be used to
# create task.
if op in target_ops:
params = []
for i, input_idx in enumerate(node_entry["inputs"]):
input_node_entry = node_list[input_idx[0]]
input_type = input_node_entry["types"][input_idx[1]]
if not isinstance(input_node_entry["node"], (Var, Constant, Call)):
raise RuntimeError(
"Graph tuner can only tune target "
"operators with input node of type "
"relay.expr.Var/Constant/Call. Now "
"find a target op %s with input type %s"
% (op, str(type(input_node_entry["node"])))
)
free_var = relay.Var("var_%d" % i, input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
relay.backend.te_compiler.get().clear()
tracing_target = _replace_device_with_tracing(tvm_target)
build_thread = threading.Thread(target=relay.build, args=(mod, tracing_target))
build_thread.start()
build_thread.join()
elif isinstance(node, Var):
node_entry["name"] = node.name_hint
node_entry["types"] = [node.type_annotation]
elif isinstance(node, Function):
# Ignore root node since it equals to input function expression
if node != expr:
_expr2graph_impl(node, target_ops, node_dict, node_list, tvm_target)
return
elif isinstance(node, TupleGetItem):
in_node_idx = node_dict[node.tuple_value]
node_entry["inputs"].append([in_node_idx, node.index, 0])
elif isinstance(node, Tuple):
for tuple_item in node:
in_node_idx = node_dict[tuple_item]
if isinstance(tuple_item, TupleGetItem):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
elif isinstance(tuple_item, Tuple):
raise RuntimeError("Graph tuner doesn't support nested tuple.")
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
elif isinstance(node, Constant):
node_entry["name"] = "Constant_" + str(node_index)
node_entry["types"] = [node.checked_type]
elif isinstance(node, tvm.ir.Op):
return
else:
raise RuntimeError(
"Not supported relay node type in graph tuning: %s" % str(type(node))
)
node_dict[node] = node_index
node_list.append(node_entry)
relay.analysis.post_order_visit(expr, _traverse_expr)
def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names):
"""Given a node_list in relay function and a node index, return the
closest ancestor which has op_name as operator name or is multi_input operator.
If node has multiple inputs, multiple ancestor nodes will be returned.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
visited_dict : dict of int to int
Nodes and corresponding ancestors which have been visited.
target_ops: List of str
List of target relay base op name
node_idx : int
Input node index.
input_names : list of str
Names of graph input nodes.
Returns
-------
out : list of int
List of ancestor node index.
"""
if node_idx in visited_dict:
return visited_dict[node_idx]
node = node_list[node_idx]
if is_boundary_node(node, input_names):
return [node_idx]
node_direct_ancestor = []
for item_idx in node["inputs"]:
item = node_list[item_idx[0]]
is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP)
if item["op"] in target_ops or is_multiple_inputs:
node_direct_ancestor.append(item_idx[0])
else:
tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names)
for tmp_item in tmp:
if tmp_item not in node_direct_ancestor:
node_direct_ancestor.append(tmp_item)
visited_dict[node_idx] = node_direct_ancestor
return node_direct_ancestor
def get_in_nodes(node_list, target_ops, input_names):
"""Create a dictionary mapping from op_name nodes or multi-input
nodes to closest input ancestors.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
target_ops: List of str
List of target relay op
input_names : list of str
Names of graph input nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest input ancestors.
"""
visited_dict = {}
in_node_dict = {}
for i, node in enumerate(node_list):
if is_boundary_node(node, input_names) or is_skipped_node(node):
continue
get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names)
for key, val in visited_dict.items():
node = node_list[key]
is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP)
if node["op"] in target_ops or is_multiple_inputs:
in_node_dict[key] = val
# Reduce boundary nodes
out_node_dict = get_out_nodes(in_node_dict)
has_reduced_node = True
while has_reduced_node:
boundary_nodes = []
for key, val in in_node_dict.items():
node = node_list[key]
is_boundary = True
# Target ops can't be boundary nodes
if node["op"] not in target_ops:
for input_idx in val:
in_node = node_list[input_idx]
if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict:
is_boundary = False
else:
val.remove(input_idx)
if is_boundary:
boundary_nodes.append(key)
if boundary_nodes:
for idx in boundary_nodes:
if idx in in_node_dict:
del in_node_dict[idx]
else:
has_reduced_node = False
# Remove empty nodes to ignore pre-computed sub-graph
has_empty_node = True
while has_empty_node:
empty_nodes = []
for key, val in in_node_dict.items():
if not val:
empty_nodes.append(key)
if empty_nodes:
has_empty_node = True
for node in empty_nodes:
del in_node_dict[node]
if node in out_node_dict:
for out_node in out_node_dict[node]:
in_node_dict[out_node].remove(node)
else:
has_empty_node = False
return in_node_dict
def get_out_nodes(in_node_dict):
"""Create output dictionary from input dictionary.
Parameters
----------
in_node_dict : dict of int to list of int
Dictionary maps node index to closest input ancestors.
It can be created with get_in_nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest output nodes.
"""
out_node_dict = {}
for key in in_node_dict:
out_node_dict[key] = []
for key, val in in_node_dict.items():
for item in val:
if item in out_node_dict:
out_node_dict[item].append(key)
else:
out_node_dict[item] = [key]
return out_node_dict
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/graph_tuner/utils/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=eval-used,invalid-name,too-many-arguments
"""Utility functions"""
import tvm
from tvm import relay
from tvm.relay import transform
def has_multiple_inputs(node_list, node_idx, input_names, opt_out_op):
"""Check whether a node has multiple input nodes
except variable nodes.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
node_idx : int
Node index to be checked.
input_names : list of str
List of input names of graph.
Returns
-------
out : bool
Whether the specified node has multiple input nodes
"""
num_inputs = 0
node = node_list[node_idx]
for in_idx in node["inputs"]:
in_idx = in_idx[0]
in_node = node_list[in_idx]
# Exclude parameter nodes
if in_node["op"] is not None and in_node["op"].name in opt_out_op:
increase = False
for t_idx in in_node["inputs"]:
increase = has_multiple_inputs(node_list, t_idx[0], input_names, opt_out_op)
if increase:
num_inputs += 1
elif in_node["op"] is not None or ("name" in in_node and in_node["name"] in input_names):
num_inputs += 1
return num_inputs > 1
def is_boundary_node(node_entry, input_names):
"""Whether a node is a boundary node.
Currently input node and nodes in LAYOUT_FIXED_OP are
counted as boundary.
Parameters
----------
node_entry : dict
Node entry.
input_names : list of str
List of input names of graph.
Returns
-------
out : bool
whether node is a boundary node.
"""
# Operators dependent on original layouts.
_LAYOUT_FIXED_OP = [
relay.op.get(name)
for name in (
"nn.batch_flatten",
"transpose",
"reshape",
"vision.multibox_prior",
"vision.multibox_transform_loc",
"where",
"vision.non_max_suppression",
"strided_slice",
)
]
out = node_entry["op"] in _LAYOUT_FIXED_OP or (
"name" in node_entry and node_entry["name"] in input_names
)
return out
def is_skipped_node(node_entry):
"""Whether a node is not counted.
Parameters
----------
node_entry : dict
Node entry.
Returns
-------
out : bool
whether node is skipped.
"""
# Operators not counted in graph tuner.
return isinstance(node_entry["node"], relay.Tuple)
def bind_inputs(expr, input_shapes=None, input_dtypes="float32"):
"""Bind input variables of a relay function expression
to new shapes and/or dtypes.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
input_shapes : dict of str to tuple of int, optional
Input shapes.
input_dtypes : str or dict of str to str, optional
Input dtypes.
Returns
-------
out : tvm.relay.Expr.Function
Bind relay function expression.
"""
if input_shapes is None:
return expr
if isinstance(input_dtypes, str):
input_dtypes = {key: input_dtypes for key in input_shapes.keys()}
updated_input_dict = {}
for input_name in input_shapes.keys():
updated_input = relay.var(
input_name, shape=input_shapes[input_name], dtype=input_dtypes[input_name]
)
updated_input_dict[input_name] = updated_input
rebind_dict = {}
for var in expr.params:
if var.name_hint in updated_input_dict:
rebind_dict[var] = updated_input_dict[var.name_hint]
updated_expr = relay.expr.bind(expr, rebind_dict)
mod = tvm.IRModule.from_expr(updated_expr)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(updated_expr, relay.Function) else entry.body
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/measure/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Distributed executor infrastructure to scale up the tuning"""
from .measure import (
MeasureInput,
MeasureResult,
MeasureErrorNo,
measure_option,
create_measure_batch,
)
from .measure_methods import (
LocalBuilder,
LocalRunner,
RPCRunner,
default_module_loader,
request_remote,
)
from .executor import Executor
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/measure/executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Abstraction for asynchronous job execution """
class Executor(object):
"""
Base abstract executor interface for asynchronous job submission.
Allows submit asynchronous jobs and returns the Future object.
"""
# timeout for jobs that may hang
DEFAULT_TIMEOUT = 120
def submit(self, func, *args, **kwargs):
"""
Pass task (function, arguments) to the Executor.
Parameters
----------
func : callable
function to be run by a worker
args : list or tuple, optional
arguments passed to the function
kwargs : dict, optional
The keyword arguments
Returns
-------
future : Future
Future object wrapping the task which can be used to
collect the task's result.
"""
raise NotImplementedError()
class Future(object):
"""
Base class of the future object.
The implementations can return object of subclass of this.
This objects encapsulates the asynchronous execution of task
submitted to another thread, or another worker for execution.
Future objects store the state of tasks--can be polled for
result or a blocking call to retrieve the result can be used.
"""
def done(self):
"""
Return True if job was successfully cancelled or finished running.
"""
raise NotImplementedError()
def get(self, timeout=None):
"""
Get the result. This will block until the result is available.
Parameters
----------
timeout : int or float, optional
Maximum number of seconds to wait before it timeouts.
If not specified, it means we block until the result is available.
Returns
-------
result : Any
The result returned by the submitted function.
Raises
------
TimeoutError : if the result call timeouts.
"""
raise NotImplementedError()
class FutureError(RuntimeError):
"""Base error class of all future events"""
# pylint:disable=redefined-builtin
class TimeoutError(FutureError):
"""Error raised when a task is timeout."""
class ExecutionError(FutureError):
"""
Error raised when future execution crashes or failed.
"""
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/measure/measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=pointless-string-statement,consider-using-enumerate,invalid-name
"""User facing API for specifying how to measure the generated code"""
import enum
import multiprocessing
from collections import namedtuple
class MeasureInput(namedtuple("MeasureInput", ["target", "task", "config"])):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
target : tvm.target.Target
The target device
task : task.Task
Task function
config : ConfigEntity
Specific configuration.
"""
class MeasureResult(namedtuple("MeasureResult", ["costs", "error_no", "all_cost", "timestamp"])):
"""
Stores all the results of a measurement
Parameters
----------
costs: Array of float or Array of Exception
If no error occurs during measurement, it is an array of measured running times.
If an error occurs during measurement, it is an array of the exception objections.
error_no: int
Denote error type, defined by MeasureErrorNo
all_cost: float
All cost of this measure, including rpc, compilation, test runs
timestamp: float
The absolute time stamp when we finish measurement.
"""
def __repr__(self):
error_no_str = (
str(MeasureErrorNo(self.error_no))
if isinstance(self.error_no, (MeasureErrorNo, int))
else str(self.error_no)
)
return (
f"{self.__class__.__name__}(costs={self.costs!r}, error_no={error_no_str}, "
f"all_cost={self.all_cost}, timestamp={self.timestamp!r})"
)
class MeasureErrorNo(enum.IntEnum):
"""Error type for MeasureResult"""
NO_ERROR = 0 # no error
INSTANTIATION_ERROR = 1 # actively detected error in instantiating a template with a config
COMPILE_HOST = 2 # error when compiling code on host (e.g. tvm.build)
COMPILE_DEVICE = 3 # error when compiling code on device (e.g. OpenCL JIT on the device)
RUNTIME_DEVICE = 4 # error when run program on device
WRONG_ANSWER = 5 # answer is wrong when compared to a golden output
BUILD_TIMEOUT = 6 # timeout during compilation
RUN_TIMEOUT = 7 # timeout during run
UNKNOWN_ERROR = 8 # unknown error
class Builder(object):
"""Builder that builds programs in tuning
Parameters
----------
timeout: float, optional
The timeout of a build task
n_parallel: int, optional
The number of tasks submitted in parallel
By default it will use all cpu cores
build_kwargs: dict, optional
Keyword args given to the build function.
"""
def __init__(self, timeout=10, n_parallel=None, build_kwargs=None):
self.timeout = timeout
self.n_parallel = n_parallel or multiprocessing.cpu_count()
self.user_build_kwargs = build_kwargs if build_kwargs is not None else {}
self.runner_build_kwargs = None
self.task = None
def set_task(self, task, build_kwargs=None):
"""
Initialize for a new tuning task
Parameters
----------
task: Task
The tuning task
build_kwargs: dict, optional
The additional kwargs for build function
"""
self.task = task
self.build_kwargs = dict(build_kwargs.items()) if build_kwargs is not None else {}
if any(k in self.build_kwargs for k in self.user_build_kwargs):
logging.warn(
"Overriding these runner-supplied kwargs with user-supplied:\n%s",
"\n".join(
f" * {k}: from {build_kwargs[k]!r} to {self.user_build_kwargs[k]!r}"
for k in sorted([k for k in build_kwargs if k in self.user_build_kwargs])
),
)
for k, v in self.user_build_kwargs.items():
self.build_kwargs[k] = v
def build(self, measure_inputs):
"""Build programs
Parameters
----------
measure_inputs: List of MeasureInput
The measure input
Returns
-------
build_results: List of BuildResult
The build result.
"""
raise NotImplementedError()
class Runner(object):
"""Runner that runs and measures the time cost of a generated program in tuning
Parameters
----------
timeout: float, optional
The timeout of a build task
n_parallel: int, optional
The number of tasks submitted in parallel
By default it will use all cpu cores
"""
def __init__(self, timeout=5, n_parallel=None):
self.timeout = timeout
self.n_parallel = n_parallel or multiprocessing.cpu_count()
self.task = None
def set_task(self, task):
"""
Initialize for a new tuning task
Parameters
----------
task: Task
The tuning task
"""
self.task = task
def get_build_kwargs(self):
"""
Get device specific build arguments (e.g. maximum shared memory size)
Returns
----------
kwargs: dict
The additional keyword arguments
"""
raise NotImplementedError()
def run(self, measure_inputs, build_results):
"""Run amd measure built programs
Parameters
----------
measure_inputs: List of MeasureInput
The raw measure input
build_results: List of BuildResults
The build results
Returns
-------
measure_results: List of MeasureResult
The final results of measurement
"""
raise NotImplementedError()
def measure_option(builder, runner):
"""
Set options for measure. To measure a config, we will build it and run it.
So we have to set options for these two steps.
They have their own options on timeout, parallel, etc.
Parameters
----------
builder: Builder
Specify how to build programs
runner: Runner
Specify how to run programs
Examples
--------
# example setting for using local devices
>>> measure_option = autotvm.measure_option(
>>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation
>>> runner=autotvm.LocalRunner( # measure them sequentially
>>> number=10,
>>> timeout=5)
>>> )
# example setting for using remote devices
>>> measure_option = autotvm.measure_option(
>>> builder=autotvm.LocalBuilder(), # use all local cpu cores for compilation
>>> runner=autotvm.RPCRunner(
>>> 'rasp3b', 'locahost', 9190, # device key, host and port of the rpc tracker
>>> number=4,
>>> timeout=4) # timeout of a run on the device. RPC request waiting time is excluded.
>>>)
Note
----
To make measurement results accurate, you should pick the correct value for the argument
`number` and `repeat` in Runner(). Some devices need a certain minimum running time to
"warm up," such as GPUs that need time to reach a performance power state.
Using `min_repeat_ms` can dynamically adjusts `number`, so it is recommended.
The typical value for NVIDIA GPU is 150 ms.
"""
# pylint: disable=import-outside-toplevel
from .measure_methods import LocalBuilder, LocalRunner
if isinstance(builder, str):
if builder == "local":
builder = LocalBuilder()
else:
raise ValueError("Invalid builder: " + builder)
if isinstance(runner, str):
if runner == "local":
runner = LocalRunner()
else:
raise ValueError("Invalid runner: " + runner)
opt = {
"builder": builder,
"runner": runner,
}
return opt
def create_measure_batch(task, option):
"""Get a standard measure_batch function.
Parameters
----------
task: tvm.autotvm.task.Task
The tuning task
option: dict
The option for measuring generated code.
You should use the return value of function :any:`measure_option` for this argument.
Returns
-------
measure_batch: callable
a callback function to measure a batch of configs
"""
builder = option["builder"]
runner = option["runner"]
attach_objects = runner.set_task(task)
# feed device related information from runner to builder
# (e.g. max shared memory for validity checking)
build_kwargs = runner.get_build_kwargs()
builder.set_task(task, build_kwargs)
def measure_batch(measure_inputs):
build_results = builder.build(measure_inputs)
results = runner.run(measure_inputs, build_results)
return results
measure_batch.n_parallel = builder.n_parallel
measure_batch.attach_objects = attach_objects
return measure_batch
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/measure/measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import contextlib
import logging
import os
import shutil
import tempfile
import threading
import time
import traceback
import typing
import warnings
from collections import namedtuple
from random import getrandbits
import tvm._ffi
import tvm.ir.transform
from tvm import nd
from tvm import rpc as _rpc
from tvm.autotvm.env import AutotvmGlobalScope, reset_global_scope
from tvm.contrib import ndk, stackvm, tar
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.driver import build
from tvm.error import TVMError
from tvm.target import Target
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from ..utils import get_const_tuple
from .measure import Builder, MeasureErrorNo, MeasureResult, Runner
logger = logging.getLogger("autotvm")
class BuildResult(namedtuple("BuildResult", ("filename", "arg_info", "error", "time_cost"))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_kwargs: dict
If supplied, additional kwargs passed to build_func. Overrides any build_kwargs supplied
by the Runner.
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If id 'stackvm', use function for stackvm
If is callable, use it as custom build function, expect lib_format field.
do_fork: bool
If False, do not fork when building. Requires n_parallel=1.
runtime: Optional[Runtime]
Specify the runtime to generate artifacts for
"""
def __init__(
self,
timeout=10,
n_parallel=None,
build_kwargs=None,
build_func="default",
do_fork=False,
runtime=None,
):
super(LocalBuilder, self).__init__(timeout, n_parallel, build_kwargs)
if isinstance(build_func, str):
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
elif build_func == "stackvm":
build_func = stackvm.build
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _WrappedBuildFunc(build_func, runtime)
if not do_fork:
assert n_parallel in (
None,
1,
), f"if do_fork=False, need n_parallel=None or 1; got {n_parallel}"
self.executor = PopenPoolExecutor(
timeout=timeout, initializer=reset_global_scope, initargs=(AutotvmGlobalScope.current,)
)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i : i + self.n_parallel]:
ret = self.executor.submit(self.build_func, inp, self.tmp_dir, **self.build_kwargs)
futures.append(ret)
for future in futures:
try:
res = future.result()
if res.error is not None:
assert len(res.error) == 2, (
f"BuildResult errors should be a 2-tuple, but it is a {len(res.error)}"
"-tuple. This should not happen!"
)
tb, exception = res.error
# instantiation error
if isinstance(exception, InstantiationError):
res = MeasureResult(
(
tb,
exception,
),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost,
time.time(),
)
else:
if "InstantiationError" in str(exception):
msg = str(exception)
try:
msg = msg.split("\n")[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
res = MeasureResult(
(
tb,
InstantiationError(msg),
),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost,
time.time(),
)
else: # tvm error
res = MeasureResult(
(
tb,
res.error,
),
MeasureErrorNo.COMPILE_HOST,
res.time_cost,
time.time(),
)
except TimeoutError as ex:
tb = traceback.format_exc()
res = MeasureResult(
(
tb,
ex,
),
MeasureErrorNo.BUILD_TIMEOUT,
self.timeout,
time.time(),
)
except ChildProcessError as ex:
tb = traceback.format_exc()
res = MeasureResult(
(
tb,
ex,
),
MeasureErrorNo.RUNTIME_DEVICE,
self.timeout,
time.time(),
)
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a RPCRunner measurement task
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
module_loader : ModuleLoader
If given, a context manager that loads the module to be timed into the remote runtime.
If not given, default_module_loader is used.
"""
def __init__(
self,
key,
host,
port,
priority=1,
timeout=10,
n_parallel=None,
number=4,
repeat=3,
min_repeat_ms=0,
cooldown_interval=0.1,
enable_cpu_cache_flush=False,
module_loader=None,
):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self._ref_input = None
self.enable_cpu_cache_flush = enable_cpu_cache_flush
self.cooldown_interval = cooldown_interval
self.module_loader = module_loader
self.executor = PopenPoolExecutor(
timeout=timeout * (self.n_parallel + 1),
initializer=reset_global_scope,
initargs=(AutotvmGlobalScope.current,),
)
@property
def ref_input(self):
"""
Fixed input for tuning special operators, e.g., sparse operators
requiring indices as input.
"""
return self._ref_input
@ref_input.setter
def ref_input(self, val):
if val is not None:
warnings.warn(
"You are specifying fixed input for tuning the operator. "
"Be sure your input always fits the operator. Some "
"operators may conduct layout transformation during tuning, "
"thus can lead to unexpected behaviors. ",
RuntimeWarning,
)
self._ref_input = val
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError(
"Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status."
)
def get_build_kwargs(self):
kwargs = {}
if (
"cuda" in self.task.target.keys
or "opencl" in self.task.target.keys
or "rocm" in self.task.target.keys
or "vulkan" in self.task.target.keys
):
remote = request_remote(self.key, self.host, self.port)
dev = remote.device(str(self.task.target), 0)
max_dims = dev.max_thread_dimensions
kwargs["check_gpu"] = {
"max_shared_memory_per_block": dev.max_shared_memory_per_block,
"max_threads_per_block": dev.max_threads_per_block,
"max_thread_x": max_dims[0],
"max_thread_y": max_dims[1],
"max_thread_z": max_dims[2],
}
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_kwargs = dict(
device_key=self.key,
host=self.host,
port=self.port,
priority=self.priority,
timeout=self.timeout,
)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(
measure_inputs[i : i + self.n_parallel], build_results[i : i + self.n_parallel]
):
module_loader = (
self.module_loader
if self.module_loader is not None
else default_module_loader()
)
ret = self.executor.submit(
run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_kwargs,
self.ref_input,
self.enable_cpu_cache_flush,
module_loader,
)
futures.append(ret)
for future in futures:
try:
res = future.result()
results.append(res)
except Exception as ex: # pylint: disable=broad-except
tb = traceback.format_exc()
results.append(
MeasureResult(
(
tb,
ex,
),
MeasureErrorNo.RUN_TIMEOUT,
self.timeout,
time.time(),
)
)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(
self,
timeout=10,
number=4,
repeat=3,
min_repeat_ms=0,
cooldown_interval=0.1,
enable_cpu_cache_flush=False,
module_loader=None,
):
super(LocalRunner, self).__init__(
"",
None,
None,
0,
timeout=timeout,
n_parallel=1,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
enable_cpu_cache_flush=enable_cpu_cache_flush,
module_loader=module_loader,
)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.server import Server
from ...rpc.tracker import Tracker
self.task = task
tracker = Tracker(port=9000, port_end=10000, silent=True)
device_key = "$local$device$%d" % tracker.port
server = Server(
port=9000,
port_end=10000,
key=device_key,
silent=True,
tracker_addr=("127.0.0.1", tracker.port),
)
self.key = device_key
self.host = "127.0.0.1"
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, runtime=None, check_gpu=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
target, task.target_host = Target.canon_target_and_host(target, task.target_host)
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
# if target is vta, we need to use vta build
if (
hasattr(measure_input.target, "device_name")
and measure_input.target.device_name == "vta"
):
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
current_pass_context: tvm.ir.transform.PassContext = (
tvm.ir.transform.PassContext.current()
)
current_config = dict(current_pass_context.config)
if build_option is not None:
current_config.update(build_option)
if "tir.add_lower_pass" in current_config:
current_add_lower_pass = list(current_config["tir.add_lower_pass"])
else:
current_add_lower_pass = []
if check_gpu:
current_add_lower_pass.append((2, gpu_verify_pass(**check_gpu)))
current_config["tir.add_lower_pass"] = current_add_lower_pass
with tvm.ir.transform.PassContext(
opt_level=current_pass_context.opt_level,
required_pass=current_pass_context.required_pass,
disabled_pass=current_pass_context.disabled_pass,
instruments=current_pass_context.instruments,
config=current_config,
):
func = build(s, args, target_host=task.target_host, runtime=runtime)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
class _WrappedBuildFunc:
"""
Wrap build_func to a function that can be used in measure.
Note: this is a class instead of a closure so that it can be pickled when
using multiprocessing.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format".
runtime : Optional[Runtime]
The runtime to generate artifacts for
Returns
-------
wrapped_build_func : callable
The wrapped build function
"""
def __init__(self, build_func, runtime=None):
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
self.build_func = build_func
self.runtime = runtime
def __call__(self, measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(
tmp_dir, "tmp_func_%0x.%s" % (getrandbits(64), self.build_func.output_format)
)
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, self.runtime, **kwargs)
if self.build_func.output_format == ".model-library-format":
# Late import to preserve autoTVM with USE_MICRO OFF
try:
from tvm import micro # pylint: disable=import-outside-toplevel
except ImportError:
raise ImportError("Requires USE_MICRO")
micro.export_model_library_format(func, filename)
else:
func.export_library(filename, self.build_func)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
return BuildResult(None, None, (tb, e), time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
ModuleLoader = typing.Callable[
[dict, dict], typing.ContextManager[typing.Tuple[tvm.rpc.RPCSession, tvm.runtime.Module]]
]
def run_through_rpc(
measure_input,
build_result,
number,
repeat,
min_repeat_ms,
cooldown_interval,
remote_kwargs,
ref_input,
enable_cpu_cache_flush=False,
module_loader=None,
):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_kwargs: dict
Passed to module_loader(). Ultimately, keyword args to request_remote().
ref_input: List of np.ndarray
The reference input used for tuning. Empty for randomly filled input.
enable_cpu_cache_flush: bool
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
module_loader: ModuleLoader
A function that returns a ContextManager used to establish and teardown the remote session.
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
with module_loader(remote_kwargs, build_result) as (remote, mod):
dev = remote.device(str(measure_input.target), 0)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = mod.time_evaluator(
mod.entry_name,
dev,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
if ref_input:
args = [nd.array(x, device=dev) for x in ref_input]
else:
try:
random_fill = remote.get_function("tvm.contrib.random.random_fill")
except AttributeError:
raise AttributeError(
"Please make sure USE_RANDOM is ON in the config.cmake "
"on the remote devices"
)
args = [nd.empty(x[0], x[1], dev) for x in build_result.arg_info]
if "scatter" not in measure_input.task.name:
# the index tensor of scatter op cannot be randomly initialized
for arg in args:
random_fill(arg)
dev.sync()
costs = time_f(*args).results
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[: msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[: msg.index("CUDA Source")]
costs = (
traceback.format_exc(),
RuntimeError(msg[:1024]),
)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
class DefaultModuleLoader:
"""See default_module_loader(). A pickleable emulation of the original function closure."""
def __init__(self, pre_load_function=None) -> None:
self.pre_load_function = pre_load_function
@contextlib.contextmanager
def __call__(self, remote_kwargs, build_result):
remote = request_remote(**remote_kwargs)
if self.pre_load_function is not None:
self.pre_load_function(remote, build_result)
remote.upload(build_result.filename)
try:
yield remote, remote.load_module(os.path.split(build_result.filename)[1])
finally:
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + ".so")
remote.remove("")
def default_module_loader(pre_load_function=None):
"""Returns a default function that can be passed as module_loader to run_through_rpc.
Parameters
----------
pre_load_function : Optional[Function[tvm.rpc.Session, tvm.runtime.Module]]
Invoked after a session is established and before the default code-loading RPC calls are
issued. Allows performing pre-upload actions, e.g. resetting the remote runtime environment.
Returns
-------
DefaultModuleLoader :
A callable that can be passed as module_loader to run_through_rpc.
"""
# This was a function with a closure before but that couldn't be pickled!
# We need pickle to work for using python's multiprocessing on some platforms.
return DefaultModuleLoader(pre_load_function)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ["TVM_TRACKER_HOST"]
port = port or int(os.environ["TVM_TRACKER_PORT"])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority, session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
logger.debug("waiting for device...")
remote = request_remote(device_key, host, port, priority)
dev = remote.device(str(target))
while not dev.exist: # wait until we get an available device
pass
logger.debug("device available")
t = threading.Thread(
target=_check,
)
t.start()
t.join(timeout)
remote = request_remote(device_key, host, port, priority)
dev = remote.device(str(target))
return dev.exist
def set_cuda_target_arch(arch):
"""THIS API IS DEPRECATED.
set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
raise ValueError(
"The API 'autotvm.measure.set_cuda_target_arch' is deprecated."
"Try specifying it by adding '-arch=sm_xx' to your target, such as 'cuda -arch=sm_86'."
"See https://github.com/apache/tvm/pull/9544 for the upgrade guide."
)
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/record.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=superfluous-parens, redefined-outer-name, redefined-outer-name,pointless-string-statement
# pylint: disable=consider-using-enumerate,invalid-name
"""Tuning record and serialization format"""
import argparse
import base64
from io import TextIOBase
import logging
import pickle
import json
import time
from typing import Union
import os
import itertools
from collections import OrderedDict
import numpy as np
from .. import build, lower
from ..target import Target
from ..contrib import popen_pool
from .. import __version__
from . import task
from .task import ConfigEntity, ApplyHistoryBest
from .measure import MeasureInput, MeasureResult
AUTOTVM_LOG_VERSION = 0.2
_old_version_warning = True
logger = logging.getLogger("autotvm")
try: # convert unicode to str for python2
_unicode = unicode
except NameError:
_unicode = ()
try:
_long = long
except NameError:
_long = int
def measure_str_key(inp, include_config=True):
"""get unique str key for MeasureInput
Parameters
----------
inp: autotvm.measure.MeasureInput
input for the measure
include_config: bool, optional
whether includes config in the str key
Returns
-------
key: str
The str representation of key
"""
config_str = str(inp.config) if include_config else ""
return "".join(
[str(inp.target), inp.task.name, str(inp.task.args), str(inp.task.kwargs), config_str]
)
def encode(inp, result, protocol="json"):
"""encode (MeasureInput, MeasureResult) pair to a string
Parameters
----------
inp: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
pair of input/result
protocol: str
log protocol, json or pickle
Returns
-------
row: str
a row in the logger file
"""
if protocol == "json":
json_dict = {
"input": (str(inp.target), inp.task.name, inp.task.args, inp.task.kwargs),
"config": inp.config.to_json_dict(),
"result": (
result.costs if result.error_no == 0 else (1e9,),
result.error_no,
result.all_cost,
result.timestamp,
),
"version": AUTOTVM_LOG_VERSION,
"tvm_version": __version__,
}
return json.dumps(json_dict)
if protocol == "pickle":
row = (
str(inp.target),
str(
base64.b64encode(
pickle.dumps([inp.task.name, inp.task.args, inp.task.kwargs])
).decode()
),
str(base64.b64encode(pickle.dumps(inp.config)).decode()),
str(base64.b64encode(pickle.dumps(tuple(result))).decode()),
str(AUTOTVM_LOG_VERSION),
str(__version__),
)
return "\t".join(row)
raise RuntimeError("Invalid log protocol: " + protocol)
def decode(row, protocol="json"):
"""Decode encoded record string to python object
Parameters
----------
row : str
a row in the logger file
protocol : str
log protocol, json or pickle
Returns
-------
ret : tuple(autotvm.measure.MeasureInput, autotvm.measure.MeasureResult), or None
The tuple of input and result, or None if input uses old version log format.
"""
# pylint: disable=unused-variable
global _old_version_warning
if protocol == "json":
row = json.loads(row)
if "v" in row and row["v"] == 0.1:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt, task_name, task_args, task_kwargs = row["input"]
tgt = str(tgt)
if "-target" in tgt:
logger.warning('"-target" is deprecated, use "-mtriple" instead.')
tgt = tgt.replace("-target", "-mtriple")
tgt = Target(str(tgt))
def clean_json_to_python(x):
"""1. Convert all list in x to tuple (hashable)
2. Convert unicode to str for python2
"""
if isinstance(x, list):
return tuple([clean_json_to_python(a) for a in x])
if isinstance(x, _unicode):
return str(x)
if isinstance(x, (_long, int)):
return int(x)
return x
tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args))
config = ConfigEntity.from_json_dict(row["config"])
inp = MeasureInput(tgt, tsk, config)
result = MeasureResult(*[tuple(x) if isinstance(x, list) else x for x in row["result"]])
config.cost = np.mean(result.costs)
return inp, result
if protocol == "pickle":
items = row.split("\t")
if len(items) == 4:
if _old_version_warning:
logger.warning("AutoTVM log version 0.1 is no longer supported.")
_old_version_warning = False
return None
tgt = Target(items[0])
task_tuple = pickle.loads(base64.b64decode(items[1].encode()))
config = pickle.loads(base64.b64decode(items[2].encode()))
result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode())))
config.cost = np.mean(result.costs)
tsk = task.Task(task_tuple[0], task_tuple[1])
return MeasureInput(tgt, tsk, config), result
raise RuntimeError("Invalid log protocol: " + protocol)
def load_from_buffer(file: TextIOBase):
"""Generator: load records from buffer.
This is a generator that yields the records.
Parameters
----------
file: io.TextIOBase
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
for row in file:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def load_from_file(filepath: Union[str, bytes, os.PathLike]):
"""Generator: load records from path.
This is a generator that yields the records.
Parameters
----------
filepath: str, bytes, or os.PathLike
Yields
------
input: autotvm.measure.MeasureInput
result: autotvm.measure.MeasureResult
"""
with open(filepath) as f:
for row in f:
if row and not row.startswith("#"):
ret = decode(row)
if ret is None:
continue
yield ret
def split_workload(in_file, clean=True):
"""Split a log file into separate files, each of which contains only a single workload
This function can also delete duplicated records in log file
Parameters
----------
in_file: str
input filename
clean: bool
whether delete duplicated items
"""
tic = time.time()
lines = list(open(in_file).readlines())
logger.info("start converting...")
pool = popen_pool.PopenPoolExecutor()
lines = [rec for rec in pool.map(decode, lines) if rec is not None]
logger.info("map done %.2f", time.time() - tic)
wkl_dict = OrderedDict()
for inp, res in lines:
wkl = measure_str_key(inp, False)
if wkl not in wkl_dict:
wkl_dict[wkl] = []
wkl_dict[wkl].append([inp, res])
if clean:
for i, (k, v) in enumerate(wkl_dict.items()):
# clean duplicated items
added = set()
cleaned = []
for inp, res in v:
str_key = measure_str_key(inp)
if str_key in added:
continue
added.add(str_key)
cleaned.append([inp, res])
# write to file
logger.info("Key: %s\tValid: %d\tDup: %d\t", k, len(cleaned), len(v) - len(cleaned))
with open(args.i + ".%03d.wkl" % i, "w") as fout:
for inp, res in cleaned:
fout.write(encode(inp, res) + "\n")
else:
for i, (k, v) in enumerate(wkl_dict.items()):
logger.info("Key: %s\tNum: %d", k, len(v))
with open(args.i + ".%03d.wkl" % i, "w") as fout:
for inp, res in v:
fout.write(encode(inp, res) + "\n")
def pick_best(in_file, out_file):
"""
Pick the best entries from a file and store them to another file.
This function distills the useful log entries from a large log file.
If out_file already exists, the best entries from both
in_file and out_file will be saved.
Parameters
----------
in_file: str
The filename of input
out_file: str or file
The filename of output
"""
context = load_from_file(in_file)
if os.path.isfile(out_file):
out_context = load_from_file(out_file)
context = itertools.chain(context, out_context)
context, context_clone = itertools.tee(context)
best_context = ApplyHistoryBest(context)
best_set = set()
for v in best_context.best_by_model.values():
best_set.add(measure_str_key(v[0]))
for v in best_context.best_by_targetkey.values():
best_set.add(measure_str_key(v[0]))
logger.info("Extract %d best records from the %s", len(best_set), in_file)
fout = open(out_file, "w") if isinstance(out_file, str) else out_file
for inp, res in context_clone:
if measure_str_key(inp) in best_set:
fout.write(encode(inp, res) + "\n")
best_set.remove(measure_str_key(inp))
"""
Usage:
This record executable module has three modes.
* Print log file in readable format
e.g. python -m tvm.autotvm.record --mode read --i collect_conv.log --begin 0 --end 5 --ir --code
* Extract history best from a large log file
e.g. python -m tvm.autotvm.record --mode pick --i collect.log
* Split a log file into separate files, each of which contains only a single wkl
e.g. python -m tvm.autotvm.record --mode split --i collect.log
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mode", choices=["read", "pick", "split"], default="read")
parser.add_argument("--i", type=str, help="input file")
parser.add_argument("--o", type=str, default=None, help="output file")
parser.add_argument("--begin", type=int, default=0)
parser.add_argument("--end", type=int, default=5)
parser.add_argument("--ir", action="store_true")
parser.add_argument("--code", action="store_true")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.mode == "pick":
args.o = args.o or args.i + ".best.log"
pick_best(args.i, args.o)
elif args.mode == "read":
for i, (inp, result) in enumerate(load_from_file(args.i)):
if args.begin <= i < args.end:
with inp.target:
s, arg_bufs = inp.task.instantiate(inp.config)
print("")
print(inp.target, inp.task, inp.config)
print(result)
if args.ir:
with inp.target:
print(lower(s, arg_bufs, simple_mode=True))
if args.code:
with inp.target:
func = build(s, arg_bufs)
print(func.imported_modules[0].get_source())
elif args.mode == "split":
split_workload(args.i)
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task is a tunable composition of template functions.
Tuner takes a tunable task and optimizes the joint configuration
space of all the template functions in the task.
This module defines the task data structure, as well as a collection(zoo)
of typical tasks of interest.
"""
from .task import (
Task,
create,
get_config,
args_to_workload,
template,
serialize_args,
deserialize_args,
)
from .space import ConfigSpace, ConfigEntity
from .code_hash import attach_code_hash, attach_code_hash_to_arg
from .dispatcher import (
DispatchContext,
ApplyConfig,
ApplyFixedConfig,
ApplyHistoryBest,
FallbackContext,
clear_fallback_cache,
ApplyGraphBest,
)
from .topi_integration import (
register_topi_compute,
register_topi_schedule,
TaskExtractEnv,
get_workload,
)
from .relay_integration import extract_from_program, extract_from_multiple_program
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/code_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Decorator functions for hashing schedule code
code hashing is used to check the consistence of schedule code and the parameters loaded from log
"""
import functools
import inspect
import zlib
from tvm.te import schedule
def attach_code_hash(s):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
s: Schedule
tvm.te.schedule.Schedule to attach the hash to
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode())
s.code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
def attach_code_hash_to_arg(arg_idx=1):
"""Decorator for attaching a code hash to a schedule
Parameters
----------
arg_idx: int
index of the argument (expected to be a Schedule) to attach the code
hash to
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
assert isinstance(args[arg_idx], schedule.Schedule)
raw_hash = zlib.crc32("".join(inspect.getsourcelines(func)[0]).encode())
args[arg_idx].code_hash = hex(raw_hash)[2:]
return wrapper
return decorator
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/dispatcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Template dispatcher module.
A dispatcher is a function that can contains multiple behaviors.
Its specific behavior is can be controlled by DispatchContext.
DispatchContext is used in two ways, usually via different implementation
of the DispatchContext base class.
- During search, we can use it to pass the current proposal from tuner.
- During evaluation, we can use it to set pick the best policy.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
from io import TextIOBase
import logging
from os import PathLike
from pathlib import Path
from typing import List, Iterable, Tuple, Union
import numpy as np
from .space import FallbackConfigEntity
from .. import env as _env
from ..measure import MeasureInput, MeasureResult
logger = logging.getLogger("autotvm")
Records = Union[
Union[str, bytes, Path], # Path-like objects
TextIOBase, # File-like objects
Iterable[Tuple[MeasureInput, MeasureResult]],
]
class DispatchContext(object):
"""
Base class of dispatch context.
DispatchContext enables the target and workload
specific dispatch mechanism for templates.
"""
current = None
# a set to prevent print duplicated message
warning_messages = set()
def __init__(self):
self._old_ctx = DispatchContext.current
def query(self, target, workload):
"""
Query the context to get the specific config for a template.
If cannot find the result inside this context, this function will query it
from the upper contexts.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
ret = self._query_inside(target, workload)
if ret is None:
ret = self._old_ctx.query(target, workload)
return ret
def update(self, target, workload, cfg):
"""
Update context with a specific config.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
cfg : ConfigSpace
The specific configuration.
Note
----
This interface is for cases when TVM decides to replace an operator in the graph.
For example, `AlterOpLayout` pass (enables when `opt_level = 3`) replaces `NCHW`
convolution with `NCHW[x]c` implementation on x86 CPUs.
Thus in TOPI, we first query schedule using original `NCHW` workload,
then update the dispatcher with the new `NCHW[x]c` workload.
So that later on, `NCHW[x]c` convolution can get schedule from the dispatcher using
its own workload directly.
.. code-block:: python
@conv2d_alter_layout.register("cpu")
def _alter_conv2d_layout(attrs, inputs, tinfo):
workload = get_conv2d_workload(...)
dispatch_ctx = autotvm.task.DispatchContext.current
target = tvm.target.Target.current()
config = dispatch_ctx.query(target, workload)
# Get conv2d_NCHWc workload from config
# new_workload = ...
# new_inputs = ...
# new_attrs = ...
# Store altered operator's config
dispatch_ctx.update(target, new_workload, config)
return sym.contrib.conv2d_NCHWc(*new_inputs, **new_attrs)
We directly store `config` back because `conv2d_NCHW` and `conv2d_NCHWc`
share the same schedule parameters.
One can construct a new `ConfigEntity` if this is not the case.
"""
raise NotImplementedError()
def _query_inside(self, target, workload):
"""
Query the context to get the specific config for a template.
This function only query config inside this context.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
raise NotImplementedError()
def __enter__(self):
self._old_ctx = DispatchContext.current
DispatchContext.current = self
return self
def __exit__(self, ptype, value, trace):
DispatchContext.current = self._old_ctx
class ApplyConfig(DispatchContext):
"""Apply a deterministic config entity for all queries.
Parameters
----------
config : ConfigSpace or ConfigEntity
The specific configuration we care about.
"""
def __init__(self, config):
super(ApplyConfig, self).__init__()
self._config = config
self.workload = None
def _query_inside(self, target, workload):
"""Override query"""
self.workload = workload
return self._config
def update(self, target, workload, cfg):
"""Override update"""
self.workload = workload
self._config = cfg
class ApplyFixedConfig(DispatchContext):
"""Apply a config of a deterministic schedule.
This is used for building a single Relay operator with deterministic schedule
for testing schedules at Relay level.
Parameters
----------
tasks : list[tvm.autotvm.task.task.Task]
List of autoTVM tasks.
schedule_names : str, List[str]
Name of schedules to use.
"""
def __init__(self, tasks, schedule_names: Union[str, List[str]]):
super(ApplyFixedConfig, self).__init__()
if isinstance(schedule_names, str):
self._schedule_names = list(schedule_names)
elif isinstance(schedule_names, list):
self._schedule_names = schedule_names
else:
raise RuntimeError("Incorrect type: " + schedule_names)
self._tasks = tasks
self.workload = None
def _query_inside(self, target, workload):
"""Override query"""
self.workload = workload
# Create a config from correct task
for task in self._tasks:
if task.name == workload[0]:
config = task.config_space.get(0)
break
if not config:
raise RuntimeError(
"workload: %s does not exist in %s" % (str(workload), str(self._tasks))
)
# Add low cost to the target schedule and high cost to others.
if workload[0] in self._schedule_names:
config.cost = 1e-6
else:
config.cost = 100000
return config
def update(self, target, workload, cfg):
"""Override update"""
self.workload = workload
self._config = cfg
class ApplyHistoryBest(DispatchContext):
"""
Apply the history best config
Parameters
----------
records : None, Records, or iterator of Records objects, where a
Records object is a path-like object, a file-like object,
or an iterator of (MeasureInput, MeasureResult).
Collection of tuning records. If multiple Records objects are passed, their
contents will be merged.
"""
def __init__(self, records: Union[None, Records, Iterable[Records]]):
super(ApplyHistoryBest, self).__init__()
self.best_by_targetkey = {}
self.best_by_model = {}
self._best_user_defined = {}
if records:
self.load(records)
def load(self, records: Union[Records, Iterable[Records]]):
"""Load records to this dispatch context
Parameters
----------
records : str, list of str, or iterator of (autotvm.measure.MeasureInput,\
autotvm.measure.MeasureResult)
Collection of tuning records. If multiple Records objects are passed, their
contents will be merged.
"""
# pylint: disable=import-outside-toplevel
from ..record import load_from_file, load_from_buffer
def _unpack_records(
records: Union[Records, Iterable[Records]]
) -> List[Tuple[MeasureInput, MeasureResult]]:
if isinstance(records, (str, bytes, PathLike)):
return load_from_file(records)
if isinstance(records, TextIOBase):
return load_from_buffer(records)
joint_records = []
for record in records:
if isinstance(record, Tuple) and isinstance(record[0], MeasureInput):
joint_records.append(record)
else:
joint_records += _unpack_records(record)
return joint_records
flattened_records = _unpack_records(records)
if not flattened_records:
return
best_by_targetkey = self.best_by_targetkey
best_by_model = self.best_by_model
counter = 0
for inp, res in flattened_records:
counter += 1
if res.error_no != 0:
continue
# use target keys in tvm target system as key to build best map
for k in inp.target.keys:
key = (k, inp.task.workload)
if key not in best_by_targetkey:
best_by_targetkey[key] = (inp, res)
else:
_, other_res = best_by_targetkey[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_targetkey[key] = (inp, res)
# use model as key to build best map
key = (inp.target.model, inp.task.workload)
if key not in best_by_model:
if inp.target.model != "unknown":
best_by_model[key] = (inp, res)
else:
_, other_res = best_by_model[key]
if np.mean(other_res.costs) > np.mean(res.costs):
best_by_model[key] = (inp, res)
logger.debug("Finish loading %d records", counter)
def _query_inside(self, target, workload):
if target is None:
raise RuntimeError(
"Need a target context to find the history best. "
"Hint: If your target is llvm, use `with tvm.target.Target('llvm'):`"
" above the dispatcher call. So does other target. "
)
# first try matching by model
key = (target.model, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_model:
inp, _ = self.best_by_model[key]
return inp.config
# then try matching by target key
for k in target.keys:
key = (k, workload)
if key in self._best_user_defined:
return self._best_user_defined[key]
if key in self.best_by_targetkey:
inp, _ = self.best_by_targetkey[key]
return inp.config
return None
def update(self, target, workload, cfg):
model = target.model
key = (model, workload)
# assume user provided config is the best
cfg.cost = 0
self._best_user_defined[key] = cfg
for k in target.keys:
key = (k, workload)
self._best_user_defined[key] = cfg
class FallbackContext(DispatchContext):
"""
A fallback dispatch context.
Any tunable template can be called under this context.
This is the root context.
"""
def __init__(self):
super(FallbackContext, self).__init__()
self.memory = {}
def _query_inside(self, target, workload):
key = (str(target), workload)
if key in self.memory:
return self.memory[key]
if not _env.GLOBAL_SCOPE.silent:
msg = (
"Cannot find config for target=%s, workload=%s. A fallback configuration "
"is used, which may bring great performance regression." % (target, workload)
)
if msg not in DispatchContext.warning_messages:
DispatchContext.warning_messages.add(msg)
logger.warning(msg)
cfg = FallbackConfigEntity()
# cache this config
self.memory[key] = cfg
return cfg
def clear_cache(self, target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
"""
key = (str(target), workload)
if key in self.memory:
del self.memory[key]
def update(self, target, workload, cfg):
key = (str(target), workload)
self.memory[key] = cfg
DispatchContext.current = FallbackContext()
def clear_fallback_cache(target, workload):
"""Clear fallback cache. Pass the same argument as _query_inside to this function
to clean the cache.
Parameters
----------
target: Target
The current target
workload : Workload
The current workload.
Note
----
This is used in alter_op_layout to clear the bad cache created before call topi compute function
"""
context = DispatchContext.current
while not isinstance(context, FallbackContext):
context = context._old_ctx
context.clear_cache(target, workload)
class ApplyGraphBest(DispatchContext):
"""Load the graph level tuning optimal schedules.
The input records should be in the ascending order of
node index for target operator. Usually this can be obtained
with graph tuner.
This context maintains an internal counter to indicate the current
node index.
"""
def __init__(self, records: Records):
"""
Parameters
----------
records : str or iterator of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult)
Collection of tuning records.
If is str, then it should be the filename of a records log file.
Each row of this file is an encoded record pair.
Otherwise, it is an iterator.
"""
# pylint: disable=import-outside-toplevel
from ..record import load_from_file, load_from_buffer
super(ApplyGraphBest, self).__init__()
if isinstance(records, (str, bytes, PathLike)):
records = load_from_file(records)
elif isinstance(records, TextIOBase):
records = load_from_buffer(records)
else:
records = list(records)
self._records = list(records)
self._counter = 0
self._global_cfg_dict = {}
def _query_inside(self, target, workload):
"""
Query the context to get config from records.
Parameters
----------
target : Target
The current target
workload : Workload
The current workload.
Returns
-------
cfg : ConfigSpace
The specific configuration.
"""
if self._counter < len(self._records):
cfg = self._records[self._counter][0].config
wkl = self._records[self._counter][0].task.workload
if workload is not None:
assert wkl == workload
self._counter += 1
self.update(target, wkl, cfg)
cfg.workload = wkl
return cfg
key = (str(target), workload)
if key not in self._global_cfg_dict:
msg = (
"Config for target=%s, workload=%s is missing in ApplyGraphBest context. "
"A fallback configuration is used, which may bring great performance "
"regression." % (target, workload)
)
logger.warning(msg)
cfg = FallbackConfigEntity()
self._global_cfg_dict[key] = cfg
else:
cfg = self._global_cfg_dict[key]
return cfg
def update(self, target, workload, cfg):
key = (str(target), workload)
self._global_cfg_dict[key] = cfg
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params, opt_level=3):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target=target, params=params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod, mod["main"])
return
# Alter op layout code has been written expecting that tuning is applied
# without it, so we disable AlterOpLayout to maintain that behavior.
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
target, target_host = Target.canon_target_and_host(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.canon_target_and_host(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.te_compiler.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.te_compiler.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-few-public-methods,invalid-name,unused-argument,arguments-differ
# pylint: disable=consider-using-enumerate,too-many-lines
"""
Template configuration space.
Each template function can be parameterized by a ConfigSpace.
The space is declared when we invoke the template function with ConfigSpace.
During evaluation, we pass in a ConfigEntity, which contains a specific
entity in the space. This entity contains deterministic parameters.
"""
from __future__ import absolute_import as _abs
import itertools
import functools
import math
from collections import namedtuple, OrderedDict
from random import randrange
import numpy as np
from tvm.te import schedule, thread_axis
from tvm.tir import expr
from tvm.autotvm.utils import get_const_int
Axis = namedtuple("Axis", ["space", "index"])
try:
_long = long
except NameError:
_long = int
class InstantiationError(ValueError):
"""Actively detected error in instantiating a template with a config,
raised by cfg.raise_error
e.g. too many unrolling, too many threads in a block
"""
class TransformSpace(object):
"""Base class for transform space
TransformSpace is the node in the computation graph of axes
.. note::
We can regard our schedule code as a transformation graph of axes.
Starting from raw axes in the definition of te.compute, we can transform these axes
by some operators. The operator includes 'split', 'reorder' and 'annotate'.
Each operator has some tunable parameters (e.g. the split factor).
Then the tuning process is just to find good parameters of these op.
So all the combinations of the parameters of these op form our search space.
Naming convention:
We call the set of all possible values as XXXSpace. (XXX can be Split, Reorder, Config ...)
We call a specific entity in a space as XXXEntity.
"""
def __init__(self):
self.ins = []
self.num_output = 0
self.entities = []
def __len__(self):
return len(self.entities)
def __getitem__(self, index):
"""Get an entity of the space by index
Parameters
----------
index: int
Returns
-------
transform entity
"""
return self.entities[index]
@staticmethod
def get_num_output():
"""get number of output axes after this transform
Returns
-------
n: int
number of output axes
"""
return 0
class VirtualAxis(TransformSpace):
"""Axis placeholder in template
Parameters
----------
var: int or tvm.te.schedule.IterVar
If is int, return a virtual axis whose length is the provided argument.
If is IterVar, return a virtual axis whose length is extracted from
the IterVar's extent domain.
name: str
"""
name_ct = 0
def __init__(self, var, name=None):
super(VirtualAxis, self).__init__()
self.num_output = 1
if name is None:
name = "axis_%d" % VirtualAxis.name_ct
VirtualAxis.name_ct += 1
self.name = name
if isinstance(var, (int, _long)):
self.length = var
elif isinstance(var, schedule.IterVar):
self.name = var.var.name
if var.dom is None:
self.length = -1
else:
self.length = get_const_int(var.dom.extent)
elif isinstance(var, VirtualAxis):
self.length = var.length
else:
raise RuntimeError("Invalid type of axis: " + str(type(var)))
@staticmethod
def get_num_output(var, name=None):
return 1
def __repr__(self):
return "vaxis(%s)" % self.name
def get_factors(n):
"""return all factors of an integer
Parameters
----------
n: int
integer to factorize
Returns
-------
factors: list
List of all factors
"""
step = 2 if n % 2 else 1
ret = list(
set(
functools.reduce(
list.__add__,
([i, n // i] for i in range(1, int(math.sqrt(n)) + 1, step) if n % i == 0),
)
)
)
ret.sort()
return ret
def get_pow2s(n):
"""return all power-of-two numbers that are less or equal than the integer
Parameters
----------
n: int
integer for reference
Returns
-------
factors: list
List of all power-of-two numbers
"""
return [2**x for x in range(math.floor(math.log2(n)) + 1)]
class SplitSpace(TransformSpace):
"""Split an axis for several times"""
def __init__(self, axes, policy, **kwargs):
super(SplitSpace, self).__init__()
axis = axes[0]
self.policy = policy
self.entities = []
max_factor = kwargs.get("max_factor", 1 << 31)
fil = kwargs.get("filter", lambda x: True)
self.product = axis.length
self.num_output = kwargs.get("num_outputs", 0)
assert self.num_output > 0
if policy == "candidate":
for size in kwargs["candidate"]:
assert len(size) == self.num_output
self.entities.append(SplitEntity(size))
else:
if policy == "verbose":
# Include factors and power-of-twos. May generate tails.
divisibles = get_factors(self.product)
pow2s = get_pow2s(self.product)
factors = [x for x in list(set(divisibles) | set(pow2s)) if x <= max_factor]
elif policy == "factors":
# Include divisible factors. Guarantee no tails.
factors = [x for x in get_factors(self.product) if x <= max_factor]
elif policy == "power2":
# Include less, equal, and round-up power-of-two numbers. May generate tails.
factors = [x for x in get_pow2s(self.product) if x <= max_factor]
else:
raise RuntimeError("Invalid policy: %s" % policy)
# Enforce the product of all split factors equals to the axis length
no_tail = kwargs.get("no_tail", policy == "factors")
# Generate split entity by enumerating candidate factors.
self.factors = factors
self._generate_space(0, [None] * (self.num_output - 1), enforce_no_tail=no_tail)
self.entities = list(filter(fil, self.entities))
def _generate_space(self, now, tmp_stack, enforce_no_tail=False):
"""Generate space by DFS"""
if now == self.num_output - 1:
prod = functools.reduce(lambda x, y: x * y, tmp_stack)
if prod > self.product:
return
if self.product % prod == 0 or (not enforce_no_tail and prod < self.product):
self.entities.append(SplitEntity([-1] + tmp_stack[::-1]))
else:
for factor in self.factors:
tmp_stack[now] = factor
self._generate_space(now + 1, tmp_stack, enforce_no_tail)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return kwargs["num_outputs"]
def __repr__(self):
return "Split(policy=%s, product=%d, num_outputs=%d) len=%d" % (
self.policy,
self.product,
self.num_output,
len(self),
)
class SplitEntity(object):
"""
A split operation with detailed parameters
that can apply to an axis
Parameters
----------
size: Array of int
the size of every axis after split.
e.g. an axis of extent 128, we split it into 3 axes, a possible
size is [4, 4, 8] (4x4x8 = 128).
"""
def __init__(self, size):
self.size = size
def apply(self, sch, op, axis):
"""Apply split to an axis
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axis: tvm.te.schedule.IterVar
axis to split
Returns
-------
axes : list of Axis
The transformed axes.
"""
ret = []
for i in range(1, len(self.size)):
ax0, ax1 = sch[op].split(axis, int(np.prod(self.size[i:])))
ret.append(ax0)
axis = ax1
return ret + [axis]
def __repr__(self):
return str(self.size)
class ReorderSpace(TransformSpace):
"""The parameter space for ordering an array of axes"""
def __init__(self, axes, policy, **kwargs):
super(ReorderSpace, self).__init__()
self.ins = axes
self.policy = policy
self.num_output = len(axes)
if policy == "identity":
self.entities = [ReorderEntity(range(len(axes)))]
elif policy == "all":
self.entities = [ReorderEntity(x) for x in itertools.permutations(range(len(axes)))]
elif policy == "interval_all":
begin, end = kwargs["interval"]
sub_space = list(itertools.permutations(range(begin, end)))
prefix, suffix = tuple(range(begin)), tuple(range(end, len(axes)))
self.entities = [ReorderEntity(prefix + x + suffix) for x in sub_space]
elif policy == "candidate":
candidate = kwargs["candidate"]
for can in candidate:
perm = [axes.index(x) for x in can]
self.entities.append(ReorderEntity(perm))
elif policy == "interleave":
spatial, reduce = kwargs["spatial"], kwargs["reduce"]
spatial = [[axes.index(x) for x in ch] for ch in spatial]
reduce = [[axes.index(x) for x in ch] for ch in reduce]
outer_merged = self._merge_chain([x[:-1] for x in spatial])
inner_merged = self._merge_chain([x[-1:] for x in spatial] + reduce)
for o in outer_merged:
for i in inner_merged:
self.entities.append(ReorderEntity(o + i))
elif policy == "interleave_cuda":
spatial, reduce = kwargs["spatial"], kwargs["reduce"]
spatial = [[axes.index(x) for x in ch] for ch in spatial]
reduce = [[axes.index(x) for x in ch] for ch in reduce]
outer_merged = self._merge_chain([x[:-1] for x in spatial])
reduce_merged = self._merge_chain(reduce)
inner_merged = [x[-1] for x in spatial]
for o in outer_merged:
for r in reduce_merged:
self.entities.append(ReorderEntity(o + r + inner_merged))
else:
raise RuntimeError("Invalid policy: " + policy)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return len(axes)
def __repr__(self):
return "Reorder(policy=%s) len=%d" % (self.policy, len(self))
def _merge_chain(self, chains):
"""generate all combinations of merge some chains"""
merged = []
tmp_pt = [0] * len(chains)
tmp_stack = []
size = np.sum([len(x) for x in chains])
self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged)
return merged
def _merge_dfs(self, chains, size, tmp_pt, tmp_stack, merged):
if np.sum(tmp_pt) == size:
merged.append(list(tmp_stack))
return
for i in range(len(chains)):
# use i == np.argmax(....) here to take spatial order into consideration
# if we don't want to consider spatial order, we can use tmp_pt[i] == np.max(....)
if tmp_pt[i] < len(chains[i]) and (
i == np.argmax([len(chains[x]) - tmp_pt[x] for x in range(len(chains))])
):
tmp_stack.append(chains[i][tmp_pt[i]])
tmp_pt[i] += 1
self._merge_dfs(chains, size, tmp_pt, tmp_stack, merged)
tmp_pt[i] -= 1
tmp_stack.pop()
class ReorderEntity(object):
"""A reorder operation with detailed parameters that can apply to axes
Parameters
----------
perm: Array of int
define the permutation
"""
def __init__(self, perm):
self.perm = perm
def apply(self, sch, op, axes):
"""Apply reorder to an array of axes
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axis: tvm.te.schedule.IterVar
axis to split
Returns
-------
axes : list of Axis
The transformed axes.
"""
if len(axes) == len(self.perm):
new_order = [axes[i] for i in self.perm]
else:
new_order = [axes[i] for i in self.perm if i < len(axes)]
sch[op].reorder(*new_order)
return new_order
def __repr__(self):
return str(self.perm)
class AnnotateSpace(TransformSpace):
"""The parameter space for annotating an array of axes"""
def __init__(self, axes, policy, **kwargs):
super(AnnotateSpace, self).__init__()
self.ins = axes
self.policy = policy
self.num_output = len(axes)
if policy == "bind_gpu":
self.num_axis = len(axes)
if self.num_axis >= 6:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 6)
+ [
"blockIdx.z",
"blockIdx.y",
"blockIdx.x",
"threadIdx.z",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 4:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 4)
+ ["blockIdx.y", "blockIdx.x", "threadIdx.y", "threadIdx.x"]
)
)
elif self.num_axis >= 2:
self.entities.append(
AnnotateEntity(["fuse"] * (self.num_axis - 2) + ["blockIdx.x", "threadIdx.x"])
)
else:
raise RuntimeError("Unhandled case in bind_gpu")
elif policy == "bind_gpu_virtual":
self.num_axis = len(axes)
if self.num_axis >= 9:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 9)
+ [
"blockIdx.z",
"blockIdx.y",
"blockIdx.x",
"vthread",
"vthread",
"vthread",
"threadIdx.z",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 6:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 6)
+ [
"blockIdx.y",
"blockIdx.x",
"vthread",
"vthread",
"threadIdx.y",
"threadIdx.x",
]
)
)
elif self.num_axis >= 3:
self.entities.append(
AnnotateEntity(
["fuse"] * (self.num_axis - 3) + ["blockIdx.x", "vthread", "threadIdx.x"]
)
)
else:
raise RuntimeError("Unhandled case in bind_gpu")
elif policy == "locate_cache":
self.num_axis = len(axes)
num_anchor = kwargs["num_anchor"]
self.anns = list(itertools.combinations(range(self.num_axis), num_anchor))
self.entities = [AnnotateEntity(x) for x in self.anns]
else: # none, vec, unroll, try_vec, try_unroll, try_vec_unroll, ...
anns = policy.replace("try", "none").split("_")
for ann in anns:
if ann not in ["none", "unroll", "vec"]:
raise RuntimeError("Invalid policy: " + policy)
self.num_axis = len(axes)
self.anns = [anns] * self.num_axis
self._generate_space(0, [""] * self.num_axis)
def _generate_space(self, now, tmp_stack):
"""Generate space by DFS"""
if now == self.num_axis:
# only vectorize inner most dimension
vec_ct = tmp_stack.count("vec")
if vec_ct in (0, 1):
self.entities.append(AnnotateEntity(list(tmp_stack)))
else:
for ann in self.anns[now]:
tmp_stack[now] = ann
self._generate_space(now + 1, tmp_stack)
@staticmethod
def get_num_output(axes, policy, **kwargs):
return len(axes)
def __repr__(self):
return "Annotate(policy=%s) len=%d" % (self.policy, len(self))
class AnnotateEntity(object):
"""An annotation operation with detailed parameters that can apply to axes
Parameters
----------
anns: Array of string
The annotations of axes
"""
def __init__(self, anns):
self.anns = anns
def apply(
self, sch, op, axes, axis_lens=None, max_unroll=None, vec_size=None, cfg=None, source=None
):
"""Apply annotation to an array of axes
Parameters
----------
sch: tvm.te.schedule.Schedule
The tvm schedule
op: tvm.te.Operation
The stage to be applied
axes: Array of tvm.te.schedule.IterVar
axis to split
axis_lens: Array of int, optional
the length of axes
max_unroll: int, optional
maximum unroll step
vec_size: Array of int, optional
valid vector lanes for vectorization
cfg: ConfigEntity, optional
cfg for recording error
source: Array of Array tensor, optional
source tensor for attaching cache
Returns
-------
axes : list of tvm.te.schedule.IterVar
The transformed axes
"""
if source is not None: # special case : attach cache_read/cache_write
for src, to in zip(source, self.anns):
for t in src:
sch[t].compute_at(sch[op], axes[to])
else: # other cases
for i, ann in enumerate(self.anns):
if ann == "none":
pass
elif ann == "unroll":
if max_unroll and axis_lens[i] > max_unroll:
cfg.raise_error("Too large factor for unrolling")
sch[op].unroll(axes[i])
elif ann == "vec":
if vec_size and axis_lens[i] not in vec_size:
cfg.raise_error("Wrong size of lanes in vectorization")
sch[op].vectorize(axes[i])
elif ann == "blockIdx.x":
sch[op].bind(axes[i], thread_axis("blockIdx.x"))
elif ann == "blockIdx.y":
sch[op].bind(axes[i], thread_axis("blockIdx.y"))
elif ann == "blockIdx.z":
sch[op].bind(axes[i], thread_axis("blockIdx.z"))
elif ann == "threadIdx.x":
sch[op].bind(axes[i], thread_axis("threadIdx.x"))
elif ann == "threadIdx.y":
sch[op].bind(axes[i], thread_axis("threadIdx.y"))
elif ann == "threadIdx.z":
sch[op].bind(axes[i], thread_axis("threadIdx.z"))
elif ann == "vthread":
sch[op].bind(axes[i], thread_axis("vthread"))
elif ann == "fuse":
assert i < len(axes) - 1
axes[i + 1] = sch[op].fuse(axes[i], axes[i + 1])
else:
raise RuntimeError("Invalid annotation " + ann)
return axes
def __repr__(self):
return str(self.anns)
class OtherOptionSpace(TransformSpace):
"""The parameter space for general option"""
def __init__(self, axes, policy, **kwargs):
super(OtherOptionSpace, self).__init__()
candidate = kwargs["candidate"]
self.entities = [OtherOptionEntity(x) for x in candidate]
@staticmethod
def get_num_output(axes, policy, **kwargs):
return 0
def __repr__(self):
return "OtherOption(%s) len=%d" % (self.entities, len(self))
class OtherOptionEntity(object):
"""The parameter entity for general option, with a detailed value"""
def __init__(self, val):
self.val = val
def __repr__(self):
return str(self.val)
class ConfigSpace(object):
"""The configuration space of a schedule. Pass it as config in template to
collect transformation space and build transform graph of axes
"""
def __init__(self):
# private dict to provide sugar
self.space_map = OrderedDict() # name -> space
self._collect = True
self._length = None
self._range_length = None
self._dims = None
self._entity_map = OrderedDict() # name -> entity
self._constraints = []
self.errors = []
self.code_hash = None
self.flop = 0
self.cost = None
self.is_fallback = False
self._shared_filter = None
self._shared_filter_cache = None
@staticmethod
def axis(var):
"""get a virtual axis (axis placeholder)
Parameters
----------
var: int or tvm.te.schedule.IterVar
If is int, return an axis whose length is the provided argument.
If is IterVar, return an axis whose length is extracted from the
IterVar's extent domain.
"""
return VirtualAxis(var)
reduce_axis = axis
def define_split(self, name, axis, policy="factors", **kwargs):
"""Define a new tunable knob which splits an axis into a list of axes
Parameters
----------
name: str
name to index the entity of this space
axis: tvm.te.schedule.IterVar
axis to split
policy: str
name of policy.
If is 'factors', the tuner will try all divisible factors.
If is 'power2', the tuner will try power-of-two factors less or equal to the length.
If is 'verbose', the tuner will try all candidates in above two policies.
If is 'candidate', try given candidates.
**kwargs:
extra arguments for policy
``max_factor``:
the maximum split factor (`int`).
``filter``:
see examples below for how to use filter (`Callable[[int], bool]`).
``num_outputs``:
the total number of axis after split (`int`).
``no_tail``:
should we only include divisible numbers as split factors (`bool`).
``candidate``:
(policy=candidate) manual candidate list (`List`).
Examples
--------
>>> # use custom candidates
>>> cfg.define_split('tile_x', x, policy='candidate', num_outputs=3,
>>> candidate=[[1, 4, 4], [4, 1, 4]])
>>> # use a filter that only accepts the split scheme whose inner most tile is less then 4
>>> cfg.define_split('tile_y', y, policy='factors', num_outputs=3,
>>> filter=lambda x: x.size[-1] <= 4)
"""
axes = [axis]
return self._add_new_transform(SplitSpace, name, axes, policy, **kwargs)
def define_reorder(self, name, axes, policy, **kwargs):
"""Define a new tunable knob which reorders a list of axes
Parameters
----------
name: str
name to index the entity of this space
axes: Array of tvm.te.schedule.IterVar
axes to reorder
policy: str
name of policy
If is 'identity', do an identity permutation.
If is 'all', try all permutations.
If is 'interval_all', try all permutations of an interval of axes.
If is 'candidate', try listed candidate.
If is 'interleave', interleave chains of spatial axes and chains of reduction axes.
kwargs: dict
extra arguments for policy
"""
return self._add_new_transform(ReorderSpace, name, axes, policy, **kwargs)
def define_annotate(self, name, axes, policy, **kwargs):
"""Define a new tunable knob which annotates a list of axes
Parameters
----------
name: str
name to index the entity of this space
axes: Array of tvm.te.schedule.IterVar
axes to annotate
policy: str
name of policy
If is 'unroll', unroll the axes.
If is 'try_unroll', try to unroll the axes.
If is 'try_unroll_vec', try to unroll or vectorize the axes.
If is 'bind_gpu', bind the first few axes to gpu threads.
If is 'locate_cache', choose n axes to attach shared/local cache.
kwargs: dict
extra arguments for policy
"""
return self._add_new_transform(AnnotateSpace, name, axes, policy, **kwargs)
def define_knob(self, name, candidate):
"""Define a tunable knob with a list of candidates
Parameters
----------
name: str
name key of that option
candidate: list
list of candidates
"""
return self._add_new_transform(OtherOptionSpace, name, [], None, candidate=candidate)
def add_flop(self, flop):
"""Add float operation statistics for this tuning task
Parameters
---------
flop: int or float or IntImm or FloatImm
number of float operations
"""
if isinstance(flop, (expr.IntImm, expr.FloatImm)):
flop = flop.value
self.flop += float(flop)
def raise_error(self, msg):
"""register error in config
Using this to actively detect error when scheduling.
Otherwise these error will occur during runtime, which
will cost more time.
Parameters
----------
msg: str
"""
self.errors.append(msg)
def valid(self):
"""Check whether the config meets all the constraints
.. note::
This check should be called after instantiation of task,
because the ConfigEntity/ConfigSpace collects errors during instantiation
Returns
-------
valid: bool
whether the config meets all the constraints
"""
return not bool(self.errors)
def is_index_valid(self, index):
"""Checks if the index satisfies the multi_filter condition
Parameters
----------
index: int
index from the range of the space
Returns
-------
valid: bool
whether the index meets all the constraints
"""
assert 0 <= index < self.range_length
if self._shared_filter is None:
return True
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._shared_filter_cache[index]
def multi_filter(self, filter): # pylint: disable=redefined-builtin
"""The filter can restrict combination of parameters in difference to the knob filter,
that restricts only single parameter
Parameters
----------
filter: function
predicate with one argument (Callable[[int], bool])
.. note::
Using this filter causes additional restrictions on the use of __len__.
Normally, it define the count of valid indexes and the range of space, but when
multi_filter enabled, it requires to use __len__ for getting the count of valid
indexes or range_length for the range of space. It is recommended to use:
``is_index_valid``, ``get_next_index``, ``get_rand_index`` to bypass the space
Examples
--------
>>> # Pre-requisites
>>> candidates = [[16, 64], [32, 32], [64, 16]]
>>> filter = lambda v: v.size[0] != 16
>>> multi_filter = lambda e: (e["tile_x"].size[0] + e["tile_y"].size[0]) <= 64
>>> # Case 1 - without filtering
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates)
>>> # [('tile_x', [16, 64]), ('tile_y', [16, 64])],None,0
>>> # [('tile_x', [32, 32]), ('tile_y', [16, 64])],None,1
>>> # [('tile_x', [64, 16]), ('tile_y', [16, 64])],None,2
>>> # [('tile_x', [16, 64]), ('tile_y', [32, 32])],None,3
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,4
>>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,5
>>> # [('tile_x', [16, 64]), ('tile_y', [64, 16])],None,6
>>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,7
>>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,8
>>> # Case 2 - with filter
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0
>>> # [('tile_x', [64, 16]), ('tile_y', [32, 32])],None,1
>>> # [('tile_x', [32, 32]), ('tile_y', [64, 16])],None,2
>>> # [('tile_x', [64, 16]), ('tile_y', [64, 16])],None,3
>>> # Case 3 - with filter and multi_filter
>>> cfg.define_split("tile_x", x, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.define_split("tile_y", y, num_outputs=2, policy="candidate", candidate=candidates,
>>> filter=filter)
>>> cfg.multi_filter(filter=multi_filter)
>>> # [('tile_x', [32, 32]), ('tile_y', [32, 32])],None,0
"""
if self._collect:
self.clear_cache()
self._shared_filter = filter
@property
def range_length(self):
"""Length of the index range in the space"""
if self._range_length is None:
self._range_length = int(np.prod([len(x) for x in self.space_map.values()]))
return self._range_length
@property
def dims(self):
"""Dimensions in the space"""
if self._dims is None:
self._dims = [len(x) for x in self.space_map.values()]
return self._dims
def subrange_length(self, start, end):
"""Returns the number of valid indexes within the limited range from [start, end]
Parameters
----------
start: int
start of subrange, inclusive
end: int
end of subrange, exclusive
Returns
-------
count: int
number of valid indexes
"""
assert 0 <= start <= end <= self.range_length
if self._shared_filter is None:
return end - start
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._shared_filter_cache[start:end].count(True)
def get_rand_index(self, start=None, end=None, to_exclude=None):
"""Returns a random valid index unlisted to exclusion
Parameters
----------
start: int, optional
specifying at which position to start, inclusive
end: int, optional
specifying at which position to end, exclusive
to_exclude: list, optional
determines unsuitable values
Returns
-------
rand: int
random index in the space
.. note::
Excluding all valid space indexes will lead to an infinite loop.
"""
start = start or 0
end = end or self.range_length
while True:
index = randrange(start, end)
if self.is_index_valid(index) and index not in (to_exclude or []):
return index
def get_next_index(self, index, n=1, start=None, end=None):
"""Returns the nth valid next index or None if out of range
Parameters
----------
index: int
specifying at which position to start, inclusive
n: int, optional
step by using to find the next index, for the opposite
direction a negative number should be used
start: list, optional
start of subrange, inclusive
end: list, optional
end of subrange, exclusive
Returns
-------
next: int
next index in the space
"""
assert n != 0
start = start or 0
end = end or self.range_length
if self._shared_filter is None:
index += n
if start <= index < end:
return index
return None
trend = 1 if n > 0 else -1
counter = abs(n)
while counter != 0:
index += trend
if index < start or index >= end:
return None
if self.is_index_valid(index):
counter -= 1
return index
def clear_cache(self):
"""Clears the cache of index validity"""
del self._shared_filter_cache
self._dims = None
self._length = None
self._range_length = None
self._shared_filter_cache = None
def _make_shared_filter_cache(self):
def apply(t):
entities = OrderedDict()
for name, space in self.space_map.items():
entities[name] = space[t % len(space)]
t //= len(space)
return bool(self._shared_filter(entities))
self._shared_filter_cache = tuple(apply(i) for i in range(self.range_length))
self._length = self._shared_filter_cache.count(True)
def point2knob(self, point):
"""Convert point form (single integer) to knob (vector)
Parameters
----------
point: int
point to convert
Returns
-------
knob: list
knob representation of the point
"""
knob = []
for dim in self.dims:
knob.append(point % dim)
point //= dim
return knob
def knob2point(self, knob):
"""Convert knob form (vector) to point form (single integer)
Parameters
----------
knob: list
knob to convert
Returns
-------
point: int
point of the knob representation
"""
point = 0
for j, k in enumerate(knob):
point += int(np.prod(self.dims[:j])) * k
return point
def sample_ints(self, m):
"""
Sample m different integer numbers from [0, self.range_length) without replacement
This function is an alternative of `np.random.choice` when self.range_length > 2 ^ 32, in
which case numpy does not work.
Parameters
----------
m: int
The number of sampled int
Returns
-------
ints: an numpy array of size m
"""
assert m <= len(self)
vis = set()
while len(vis) < m:
new = randrange(0, self.range_length)
if self.is_index_valid(new):
vis.add(new)
return np.fromiter(vis, int, len(vis))
def random_walk(self, point):
"""random walk as local transition
Parameters
----------
point: int
index of the ConfigEntity
Returns
-------
new_point: int
new neighborhood index
"""
# transform to knob form
old_knob = self.point2knob(point)
new_knob = old_knob.copy()
new_point = self.knob2point(new_knob)
# mutate
while new_knob == old_knob or not self.is_index_valid(new_point):
from_i = np.random.randint(len(old_knob))
to_v = np.random.randint(self.dims[from_i])
new_knob[from_i] = to_v
new_point = self.knob2point(new_knob)
# transform to index form
return new_point
def _add_new_transform(self, space_class, name, axes, policy, **kwargs):
"""Add a new transform space in template"""
# if we do not have tuned info (_collect == True) but defined KNOB value
# for "default" scheduling before call of _add_new_transform, in this case
# no need to create new space and override previously pointed KNOB values
if kwargs.get("filter"):
self.clear_cache()
if self._collect and not (self.is_fallback and name in self._entity_map):
# convert schedule axis to space definition axis
axes = [x if isinstance(x, (VirtualAxis, Axis)) else self.axis(x) for x in axes]
# add subspace (knob)
space = space_class(axes, policy, **kwargs)
self.space_map[name] = space
self._entity_map[name] = space[0]
return [Axis(space, i) for i in range(space.num_output)]
return [Axis(None, i) for i in range(space_class.get_num_output(axes, policy, **kwargs))]
def __len__(self):
"""Returns the number of valid indexes in the space"""
if self._shared_filter is None:
return self.range_length
if self._shared_filter_cache is None:
self._make_shared_filter_cache()
return self._length
def get(self, index):
"""Get a config entity with detailed parameters from this space
Parameters
----------
index: int
index in the space
Returns
-------
config: ConfigEntity
config corresponds to the index
"""
if index < 0 or index >= self.range_length:
raise IndexError(
"Index out of range: size {}, got index {}".format(self.range_length, index)
)
if not self.is_index_valid(index):
raise IndexError(
"Index does not correspond to the multi-filter condition, got index {}. "
"Use is_index_valid to pre-check".format(index)
)
entities = OrderedDict()
t = index
for name, space in self.space_map.items():
entities[name] = space[t % len(space)]
t //= len(space)
ret = ConfigEntity(index, self.code_hash, entities, self._constraints)
return ret
def __iter__(self):
return self._entity_map.__iter__()
def __getitem__(self, name):
"""get the transform entity(knob) of this entity by name
do not use this to get a ConfigEntity of this space (should use ConfigSpace.get instead)
Parameters
----------
name: str
name of the transform
"""
return self._entity_map[name]
def __repr__(self):
res = "ConfigSpace (len={}, range_length={}, space_map=\n".format(
len(self), self.range_length
)
for i, (name, space) in enumerate(self.space_map.items()):
res += " %2d %s: %s\n" % (i, name, space)
return res + ")"
_ann_to_number = {
"none": 0,
"vec": 1,
"unroll": 2,
"blockIdx.x": 3,
"blockIdx.y": 4,
"blockIdx.z": 5,
"threadIdx.x": 6,
"threadIdx.y": 7,
"threadIdx.z": 8,
"vthread": 9,
"fuse": 10,
}
class ConfigEntity(ConfigSpace):
"""A configuration with detailed parameters
Parameters
----------
index: int
index of this config in space
code_hash: str
hash of schedule code
entity_map: dict
map name to transform entity
constraints : list
List of constraints
"""
def __init__(self, index, code_hash, entity_map, constraints):
super(ConfigEntity, self).__init__()
self.index = index
self._collect = False
self._entity_map = entity_map
self._space_map = None
self._constraints = constraints
self.code_hash = code_hash
def get_flatten_feature(self):
"""flatten entities to a numerical one-dimensional feature vector
Returns
-------
fea: np.array
one dimensional float32 array
"""
fea = []
for _, v in self._entity_map.items():
if isinstance(v, SplitEntity):
fea.extend(v.size)
elif isinstance(v, ReorderEntity):
# use a naive way: directly copy the permutation
fea.extend(v.perm)
elif isinstance(v, AnnotateEntity):
# one-hot encoding
for ann in v.anns:
tmp = [0] * len(_ann_to_number)
tmp[_ann_to_number[ann]] = 1
fea.extend(tmp)
elif isinstance(v, OtherOptionEntity):
fea.append(v.val)
return np.array(fea, dtype=np.float32)
def get_other_option(self):
"""
Returns
-------
other_option: dict
other tunable parameters (tunable parameters defined by `cfg.define_knob`)
"""
return {x: x.val for x in self._entity_map.values() if isinstance(x, OtherOptionEntity)}
def to_json_dict(self):
"""convert to a json serializable dictionary
Return
------
json_dict: dict
a json serializable dictionary
"""
ret = {}
ret["index"] = int(self.index)
ret["code_hash"] = self.code_hash
entity_map = []
for k, v in self._entity_map.items():
if isinstance(v, SplitEntity):
entity_map.append((k, "sp", v.size))
elif isinstance(v, ReorderEntity):
entity_map.append((k, "re", v.perm))
elif isinstance(v, AnnotateEntity):
entity_map.append((k, "an", v.anns))
elif isinstance(v, OtherOptionEntity):
entity_map.append((k, "ot", v.val))
else:
raise RuntimeError("Invalid entity instance: " + v)
ret["entity"] = entity_map
return ret
@staticmethod
def from_json_dict(json_dict):
"""Build a ConfigEntity from json serializable dictionary
Parameters
----------
json_dict: dict
Json serializable dictionary. This should be the return value
of :any:`to_json_dict`.
Returns
-------
config: ConfigEntity
The corresponding config object
"""
index = json_dict["index"]
code_hash = json_dict["code_hash"]
constraints = []
entity_map = OrderedDict()
for item in json_dict["entity"]:
key, knob_type, knob_args = item
if knob_type == "sp":
entity = SplitEntity(knob_args)
elif knob_type == "re":
entity = ReorderEntity(knob_args)
elif knob_type == "an":
entity = AnnotateEntity(knob_args)
elif knob_type == "ot":
entity = OtherOptionEntity(knob_args)
else:
raise RuntimeError("Invalid config knob type: " + knob_type)
entity_map[str(key)] = entity
return ConfigEntity(index, code_hash, entity_map, constraints)
def __repr__(self):
return "%s,%s,%d" % (str(self._entity_map)[12:-1], self.code_hash, self.index)
class FallbackConfigEntity(ConfigSpace):
"""The config entity created to support fallback"""
def __init__(self):
super(FallbackConfigEntity, self).__init__()
self.is_fallback = True
def fallback_split(self, name, constraints):
"""Fallback a split knob
Parameters
----------
name: str
name of the knob
constraints: List of int
The maximum tile size for every dimension. Value `-1` means no constraint.
Examples
--------
If you use cfg.define_split('tile_0', 128, num_outputs=3),
Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [4, 8, 4]
If you use cfg.define_split('tile_0', 49, num_outputs=3),
Then cfg.fallback_split('tile_0', [-1, 8, 4]) will give you cfg['tile_0'].size = [7, 7, 1]
"""
space = self.space_map[name]
assert isinstance(space, SplitSpace)
assert len(constraints) == space.num_output
# '-1' means no constraint
constraints = [x if x != -1 else 1e10 for x in constraints]
entity = self._entity_map[name]
now = space.product
for i in reversed(range(space.num_output)):
factors = get_factors(now)
find = len(factors) - 1
for j, f in enumerate(factors):
if f > constraints[i]:
find = j - 1
break
if find >= 0:
entity.size[i] = factors[find]
now //= factors[find]
else:
raise RuntimeError("Cannot find feasible fallback split entity for node: " + name)
def fallback_with_reference_log(self, ref_log):
"""A data driven fallback mechanism.
We use tuned parameters from TopHub as reference data.
For an unseen shape, we find the most similar tuned one from TopHub and
mimic its parameters.
Note that we are not matching by workload (e.g., input size, kernel size),
but instead matching by configuration space. The idea is that if two workloads have
similar configuration space, their optimal configurations are also likely to be similar.
Parameters
----------
ref_log: List of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult)
The reference log
"""
knob_names = [x for x in self.space_map.keys() if isinstance(self.space_map[x], SplitSpace)]
# find best match config in reference data by matching tiling factors
factor_list = []
for knob_name in knob_names:
factor_list.append(get_factors(self.space_map[knob_name].product))
best_match_cfg = None
best_match_score = 0
for inp, _ in ref_log:
match_score = 0
for i, knob_name in enumerate(knob_names):
factors = get_factors(int(np.prod(inp.config[knob_name].size)))
match_score += float(len(set(factor_list[i]).intersection(factors))) / len(
factor_list[i]
)
if match_score > best_match_score:
best_match_score, best_match_cfg = match_score, inp.config
if best_match_cfg is None:
return
# mimic its tiling strategy
for knob_name in knob_names:
constraint = list(best_match_cfg[knob_name].size)
constraint[0] = -1
self.fallback_split(knob_name, constraint)
# copy other knobs
for knob_name in self.space_map.keys():
if not isinstance(self.space_map[knob_name], SplitSpace):
self._entity_map[knob_name] = best_match_cfg[knob_name]
def __setitem__(self, name, entity):
"""set the entity(knob) of by name
Parameters
----------
name: str
name of the entity
entity: SplitEntity, ReorderEntity, AnnotateEntity, OtherOptionEntity
value of the entity
"""
self._entity_map[name] = entity
def __repr__(self):
return "%s,%s" % (str(self._entity_map)[12:-1], self.code_hash)
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/task.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,not-callable
"""Definition of task function.
Task can be constructed from tuple of func, args, and kwargs.
func is a state-less function, or a string that
registers the standard task.
"""
import functools
import numpy as np
from tvm import runtime
from tvm.ir import container
from tvm.target import Target
from tvm.te import placeholder, tensor
from tvm.tir import expr
from ..utils import get_const_int, get_const_tuple
from .dispatcher import ApplyConfig, DispatchContext
from .space import ConfigSpace
def _lookup_task(name):
task = TASK_TABLE.get(name)
if task is None:
# Unable to find the given task. This might be because we are
# creating a task based on a name that has not been imported.
# Rather than raising an exception here, we return a dummy
# task which cannot be invoked.
task = MissingTask(name)
return task
def serialize_args(args):
"""serialize arguments of a topi function to a hashable tuple.
Parameters
----------
args: list of hashable or Tensor
"""
def _encode(x):
if isinstance(x, tensor.Tensor):
return ("TENSOR", get_const_tuple(x.shape), x.dtype)
if isinstance(x, (tuple, list, container.Array)):
return tuple([_encode(a) for a in x])
if isinstance(x, (str, int, float, expr.Var, expr.Any)):
return x
if isinstance(x, (expr.StringImm, expr.IntImm, expr.FloatImm)):
return x.value
if isinstance(x, runtime.container.String):
return str(x)
if x is None:
return None
raise RuntimeError(
'Do not support type "%s" in argument. Consider to use'
"primitive types or tvm.tir.Var only" % type(x)
)
ret = []
for t in args:
ret.append(_encode(t))
return tuple(ret)
def deserialize_args(args):
"""The inverse function of :code:`serialize_args`.
Parameters
----------
args: list of hashable or Tensor
"""
ret = []
for t in args:
if isinstance(t, tuple) and t[0] == "TENSOR":
ret.append(placeholder(shape=t[1], dtype=t[2]))
else:
ret.append(t)
return ret
def args_to_workload(args, task_name=None):
"""Convert argument list to hashable workload tuple.
This function will convert list to tuple, tvm node to python value and
flatten te.tensor.Tensor to a tuple
Parameters
----------
task_name : str
The AutoTVM task name
args : list of args
The arguments to the function
Returns
-------
ret: hashable
The hashable value
"""
return (task_name,) + serialize_args(args) if task_name is not None else serialize_args(args)
class Task(object):
"""A Tunable Task
Parameters
----------
name: str
The name of the task.
args: Tuple
Positional argument of func
"""
def __init__(self, name, args):
self.name = name
self.args = args
self.kwargs = {} # currently unused
# init null config space
self.config_space = None
self.func = _lookup_task(name)
# auxiliary info, available after `init_space` is called
self.flop = None
self.target = None
self.target_host = None
@property
def workload(self):
return (self.name,) + serialize_args(self.args)
def instantiate(self, config):
"""Instantiate this task function (template) with a config.
Returns corresponding schedule.
Parameters
----------
config: template.ConfigEntity
parameter config for this template
Returns
-------
sch: tvm.te.schedule.Schedule
The tvm schedule
arg_bufs: Array of te.tensor.Tensor
The input/output buffers
"""
config.flop = 0
with ApplyConfig(config):
sch, arg_bufs = self.func(*self.args, **self.kwargs)
if not self.flop:
config.flop = config.flop or compute_flop(sch)
self.flop = config.flop
return sch, arg_bufs
def __getstate__(self):
# custom pickle implementation is required for
# some unpickable local task functions.
# So we only pickle the name of the function
# and restore the function by name when unpickling it.
import cloudpickle # pylint: disable=import-outside-toplevel
self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host)
return {
"name": self.name,
"args": self.args,
"kwargs": self.kwargs,
"config_space": self.config_space,
"flop": self.flop,
"target": self.target,
"target_host": self.target_host,
"func": cloudpickle.dumps(self.func),
}
def __setstate__(self, state):
import cloudpickle # pylint: disable=import-outside-toplevel
self.name = state["name"]
self.args = state["args"]
self.kwargs = state["kwargs"]
self.config_space = state["config_space"]
self.func = cloudpickle.loads(state["func"])
self.flop = state["flop"]
self.target, self.target_host = Target.canon_target_and_host(
state["target"], state["target_host"]
)
def __repr__(self):
return "Task(func_name=%s, args=%s, kwargs=%s, workload=%s)" % (
self.name,
self.args,
self.kwargs,
self.workload,
)
TASK_TABLE = {}
class TaskTemplate(object):
"""
Task template is used to creates a tunable AutoTVM task.
It can be defined by a pair of compute and schedule function using
`_register_task_compute` and `_register_task_schedule`,
or by a customized task creation function that is more flexible using
`_register_customized_task`.
Note that when customized func is registered, compute and schedule function
will be ignored
"""
def __init__(self):
self.fcompute = None
self.fschedule = None
self.fcustomized = None
def __call__(self, *args, **kwargs):
args = deserialize_args(args)
if self.fcustomized is None:
return self._default_func(*args, **kwargs)
assert callable(self.fcustomized)
return self.fcustomized(*args, **kwargs)
def _default_func(self, *args, **kwargs):
assert callable(self.fcompute) and callable(self.fschedule)
out = self.fcompute(*args, **kwargs)
arg_bufs = [out] + self._get_inputs(out)
s = self.fschedule([out])
return s, arg_bufs
@staticmethod
def _get_inputs(out):
inputs = []
queue = [out]
hash_set = set()
while queue:
t = queue.pop(0)
if isinstance(t.op, tensor.PlaceholderOp):
inputs.append(t)
else:
input_tensors = [t for t in t.op.input_tensors if t not in hash_set]
queue.extend(input_tensors)
hash_set.update(input_tensors)
return inputs
class MissingTask(TaskTemplate):
"""
Dummy task template for a task lookup which cannot be resolved.
This can occur if the task being requested from _lookup_task()
has not been imported in this run.
"""
def __init__(self, taskname: str):
super().__init__()
self._taskname = taskname
def __call__(self, *args, **kwargs):
raise RuntimeError(
f"Attempting to invoke a missing task {self._taskname}."
"It is possible that the function is registered in a "
"Python module that is not imported in this run, or the log is out-of-date."
)
def _register_task_compute(name, func=None):
"""Register compute function to autotvm task
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fcompute is not None:
raise ValueError("Compute is already registered in autoTVM task %s" % name)
tmpl.fcompute = f
return f
if func:
return _do_reg(func)
return _do_reg
def _register_task_schedule(name, func=None):
"""Register schedule function to autotvm task
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fschedule is not None:
raise ValueError("Schedule is already registered in autoTVM task %s" % name)
tmpl.fschedule = f
return f
if func:
return _do_reg(func)
return _do_reg
def _register_customized_task(name, func=None):
"""Register a customized function to AutoTVM task.
Parameters
----------
name: str
The task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
"""
def _do_reg(f):
if name not in TASK_TABLE:
TASK_TABLE[name] = TaskTemplate()
tmpl = TASK_TABLE[name]
if tmpl.fcustomized is not None:
raise ValueError("Customized func is already registered in autoTVM task %s" % name)
tmpl.fcustomized = f
return f
if func:
return _do_reg(func)
return _do_reg
def template(task_name, func=None):
"""Decorate a function as a tunable schedule template.
Parameters
----------
task_name: str
The task name
func: None or callable
A callable template function.
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
func: callable
The decorated function
Examples
--------
The following code is a tunable template for a blocked matrix multiplication
.. code-block:: python
@autotvm.template("matmul")
def matmul(N, L, M, dtype):
A = te.placeholder((N, L), name='A', dtype=dtype)
B = te.placeholder((L, M), name='B', dtype=dtype)
k = te.reduce_axis((0, L), name='k')
C = te.compute((N, M), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k), name='C')
s = te.create_schedule(C.op)
# schedule
y, x = s[C].op.axis
k = s[C].op.reduce_axis[0]
##### define space begin #####
cfg = autotvm.get_config()
cfg.define_split("tile_y", y, num_outputs=2)
cfg.define_split("tile_x", x, num_outputs=2)
##### define space end #####
# schedule according to config
yo, yi = cfg["tile_y"].apply(s, C, y)
xo, xi = cfg["tile_x"].apply(s, C, x)
s[C].reorder(yo, xo, k, yi, xi)
return s, [A, B, C]
"""
def _decorate(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
workload = args_to_workload(args, task_name)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
with ApplyConfig(cfg):
return f(*args, **kwargs)
_register_customized_task(task_name, f)
return wrapper
if func:
return _decorate(func)
return _decorate
def create(task_name, args, target, target_host=None):
"""Create a tuning task and initialize its search space
Parameters
----------
task_name : str
The AutoTVM task name
args : List
Positional arguments
target : Target
The compilation target
target_host: Target, optional
The compilation target for host side
Returns
-------
tsk: Task
a task object
"""
args = serialize_args(args)
ret = Task(task_name, args)
target, target_host = Target.canon_target_and_host(target, target_host)
# init config space
ret.config_space = ConfigSpace()
ctx = ApplyConfig(ret.config_space)
with ctx:
with target:
sch, _ = ret.func(*args)
ret.config_space.code_hash = getattr(sch, "code_hash", None)
ret.flop = ret.config_space.flop or compute_flop(sch)
ret.target = target
ret.target_host = target_host
return ret
def get_config():
"""Get current config object
Returns
-------
cfg: ConfigSpace or ConfigEntity
The current config
"""
tgt = Target.current(allow_none=True)
return DispatchContext.current.query(tgt, None)
class FlopCalculationError(RuntimeError):
"""Error happens when estimating FLOP for a compute op"""
def compute_flop(sch):
"""Calculate number of FLOP (floating number operations) of the compute ops in a schedule
Parameters
----------
sch: tvm.te.schedule.Schedule
schedule
Returns
-------
flop: int
number of FLOP in this schedule
"""
def _prod_length(axes):
"""compute product of the lengths of a list of axes"""
try:
num_iter = int(np.prod([get_const_int(axis.dom.extent) for axis in axes]))
except ValueError:
raise FlopCalculationError("The length of axis is not constant. ")
return num_iter
def _count_flop(exp):
"""compute flop for a single expression"""
if isinstance(exp, expr.Reduce):
num_iter = _prod_length(exp.axis)
combiner = exp.combiner.result
source = exp.source
if len(combiner) != 1:
raise FlopCalculationError("Found multiple output in the combiner of reduce op")
if len(source) != 1:
raise FlopCalculationError("Found multiple output in the source of reduce op")
return num_iter * (_count_flop(combiner[0]) + _count_flop(source[0]))
if isinstance(exp, (expr.FloatImm, expr.IntImm)):
return 0
if isinstance(exp, expr.Cast):
return _count_flop(exp.value)
if isinstance(exp, expr.Var):
return 0
if isinstance(
exp,
(
expr.Add,
expr.Sub,
expr.Mul,
expr.Div,
expr.Mod,
expr.FloorDiv,
expr.FloorMod,
expr.Max,
expr.Min,
expr.EQ,
expr.NE,
expr.LT,
expr.LE,
expr.GT,
expr.GE,
expr.And,
expr.Or,
expr.Not,
),
):
base = 1
if isinstance(exp, expr.Not): # unary
return base + _count_flop(exp.a)
return base + _count_flop(exp.a) + _count_flop(exp.b)
if isinstance(exp, expr.Select):
return _count_flop(exp.condition) + max(
_count_flop(exp.true_value), _count_flop(exp.false_value)
)
if isinstance(exp, expr.ProducerLoad):
# Ignore flops from indexing expressions.
return 0
if isinstance(exp, expr.Call):
return sum([_count_flop(x) for x in exp.args])
raise FlopCalculationError("Found unsupported operator in the compute expr")
def traverse(ops):
"""accumulate flops"""
ret = 0
for op in ops:
if isinstance(op, tensor.ComputeOp):
num_element = _prod_length(op.axis)
body = op.body
if len(body) != 1:
raise FlopCalculationError("Found multiple output in the compute")
exp = body[0]
ret += num_element * _count_flop(exp)
ret += traverse([t.op for t in op.input_tensors])
elif isinstance(op, tensor.PlaceholderOp):
pass
else:
raise FlopCalculationError(
f"{op.name} is not supported by autotvm. "
"Only support te.compute currently. "
"Other ops like tvm.te.scan/te.extern is not supported"
)
return ret
try:
ret = traverse(sch.outputs)
except FlopCalculationError as exc:
raise RuntimeError(
"FLOP estimator fails for this operator. Error msg: "
+ str(exc)
+ ". Please use `cfg.add_flop` to manually set "
"FLOP for this operator"
)
if ret == 0:
raise RuntimeError(
"Cannot find float number operation in this operator. "
"Please use `cfg.add_flop` to manually set "
"FLOP for this operator"
)
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/task/topi_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name,unused-argument
"""
Decorators for registering tunable templates to TOPI.
These decorators can make your simple implementation be able to use different configurations
for different workloads.
Here we directly use all arguments to the TOPI call as "workload", so make sure all the arguments
(except tvm.te.Tensor) in you calls are hashable. For tvm.te.Tensor,
we will serialize it to a hashable tuple.
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
import functools
import tvm.te._ffi_api
from tvm.target import Target
from tvm.te import tensor
from .task import (
args_to_workload,
serialize_args,
DispatchContext,
_register_task_compute,
_register_task_schedule,
)
# Task extractor for relay program
class TaskExtractEnv:
"""Global environment for extracting tuning tasks from graph"""
current = None
registered = None
def __init__(self, allow_duplicate=False):
self.allow_duplicate = allow_duplicate
self.task_collection = []
self.wanted_relay_ops = None
self.modified_funcs = []
self.tracing = False
def __enter__(self):
self.task_collection = []
self.tracing = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.tracing = False
def reset(self, wanted_relay_ops=None):
"""Reset task collections
Parameters
----------
wanted_relay_ops: List of tvm.ir.Op
The relay ops to be extracted
"""
self.task_collection = []
self.wanted_relay_ops = wanted_relay_ops
def add_task(self, task_name, args):
"""Add AutoTVM task
Parameters
----------
task_name: str
AutoTVM task name.
args: tuple
Arguments to the TOPI function.
"""
key = (task_name, serialize_args(args))
if self.allow_duplicate or key not in self.task_collection:
self.task_collection.append(key)
def get_tasks(self):
"""Get collected tasks
Returns
-------
tasks: List of tuple(name, args)
A list of tasks extracted from the graph
"""
return self.task_collection
@staticmethod
def get(allow_duplicate=False):
"""Get the single instance of TaskExtractEnv
Parameters
----------
allow_duplicate : boolean
Whether to fetch all workloads in the network,
even though some of them are the same. This is
useful for graph tuning.
Returns
-------
env: TaskExtractEnv
The single instance of TaskExtractEnv
"""
if not TaskExtractEnv.current:
TaskExtractEnv.current = TaskExtractEnv(allow_duplicate)
else:
TaskExtractEnv.current.allow_duplicate = allow_duplicate
return TaskExtractEnv.current
def register_topi_compute(task_name, func=None):
"""Register a tunable template for a topi compute function.
The registration will wrap this topi compute to take `cfg` as the first argument,
followed by the original argument list. It uses all its argument as workload and
stores this "workload" to its final ComputeOp, which can be used to reconstruct
"workload" in the following topi_schedule call.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_compute):
@functools.wraps(topi_compute)
@_register_task_compute(task_name)
def wrapper(*args, **kwargs):
"""wrapper function for topi compute"""
assert not kwargs, "Do not support kwargs in template function call"
task_env = TaskExtractEnv.current
if task_env is not None and task_env.tracing:
task_env.add_task(task_name, args)
workload = args_to_workload(args, task_name)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
node = topi_compute(cfg, *args)
# attach workload to return op
op = node.op
attrs = {}
for k, v in node.op.attrs.items():
attrs[k] = v
attrs["workload"] = workload
if isinstance(op, tensor.ComputeOp):
op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body)
elif isinstance(op, tensor.ExternOp):
op = tvm.te._ffi_api.ExternOp(
op.name,
op.tag,
attrs,
op.inputs,
op.input_placeholders,
op.output_placeholders,
op.body,
)
else:
raise RuntimeError("Unsupported op type: " + str(type(op)))
if isinstance(node, tensor.Tensor):
return op.output(0)
return [op.output(i) for i in range(len(node))]
return wrapper
if func:
return _decorate(func)
return _decorate
def register_topi_schedule(task_name, func=None):
"""Register a tunable template for a topi schedule function.
The registration will wrap this topi schedule to take `cfg` as the first argument,
followed by the original argument list.
Note that this function will try to find "workload" from all the ComputeOp in the input.
You can attach "workload" to your compute op by using :any:`register_topi_compute`.
The task name has to be the same as that of the corresponding topi compute function.
Parameters
----------
task_name: str
The AutoTVM task name
func: None or callable
If it is None, return a decorator.
If is callable, decorate this function.
Returns
-------
decorator: callable
A decorator
Examples
--------
See tvm/topi/python/topi/arm_cpu/depthwise_conv2d.py for example usage.
"""
def _decorate(topi_schedule):
@functools.wraps(topi_schedule)
@_register_task_schedule(task_name)
def wrapper(outs, *args, **kwargs):
"""wrapper function for topi schedule"""
workload = get_workload(outs, task_name)
if workload is None:
raise RuntimeError(
f"Cannot find TOPI workload {task_name}. "
"Is it registered with `register_topi_compute`?"
)
tgt = Target.current()
cfg = DispatchContext.current.query(tgt, workload)
return topi_schedule(cfg, outs, *args, **kwargs)
return wrapper
if func:
return _decorate(func)
return _decorate
def get_workload(outs, task_name=None):
"""Retrieve the workload from outputs"""
def traverse(tensors):
"""traverse all ops to find attached workload"""
for t in tensors:
op = t.op
wkl = traverse(op.input_tensors)
if wkl is not None:
return wkl
if "workload" in op.attrs:
ret = args_to_workload(op.attrs["workload"])
if task_name is None or ret[0] == task_name:
return ret
return None
outs = [outs] if isinstance(outs, tensor.Tensor) else outs
return traverse(outs)
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities for autotvm"""
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/testing/tune_relay.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import argparse
import json
import os
import warnings
from distutils.util import strtobool
import tvm
from tvm import autotvm
from tvm import meta_schedule as ms
from tvm import relay
from tvm.autotvm.graph_tuner import DPTuner
from tvm.autotvm.tuner import XGBTuner
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tune_utils import create_timer, generate_input_data
from tvm.support import describe
def _parse_args():
args = argparse.ArgumentParser()
args.add_argument(
"--workload",
type=str,
required=True,
help="The name of the workload to tune. Supported models: "
"https://github.com/apache/tvm/blob/main/python/tvm/meta_schedule/testing/relay_workload.py#L303-L322", # pylint: disable=line-too-long
)
args.add_argument(
"--input-shape",
type=str,
required=True,
help="The input shape of the workload. Example: '[1, 3, 224, 224]'",
)
args.add_argument(
"--target",
type=str,
required=True,
help="The target device to tune. "
"Example: 'aws/cpu/c5.9xlarge', 'nvidia/nvidia-v100', 'nvidia/geforce-rtx-3090'",
)
args.add_argument(
"--num-trials",
type=int,
required=True,
help="The number of trials per kernel. Example: 800",
)
args.add_argument(
"--rpc-host",
type=str,
required=True,
help="The host address of the RPC tracker. Example: 192.168.6.66",
)
args.add_argument(
"--rpc-port",
type=int,
required=True,
help="The port of the RPC tracker. Example: 4445",
)
args.add_argument(
"--rpc-key",
type=str,
required=True,
help="The key of the RPC tracker. Example: '3090ti'",
)
args.add_argument(
"--work-dir",
type=str,
required=True,
help="The working directory to store the tuning logs. Example: '/tmp/tune_relay'",
)
args.add_argument(
"--layout",
type=str,
default=None,
help="The layout of the workload. Example: 'NCHW', 'NHWC'",
)
args.add_argument(
"--cache-dir",
type=str,
default=None,
)
args.add_argument(
"--number",
type=int,
default=3,
)
args.add_argument(
"--repeat",
type=int,
default=1,
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
)
args.add_argument(
"--cpu-flush",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--graph-tuner",
type=lambda x: bool(strtobool(x)),
help="example: True / False",
required=True,
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
help="example: graph / vm",
required=True,
)
parsed = args.parse_args()
parsed.target = tvm.target.Target(parsed.target)
parsed.input_shape = json.loads(parsed.input_shape)
parsed.rpc_config = ms.runner.RPCConfig(
tracker_host=parsed.rpc_host,
tracker_port=parsed.rpc_port,
tracker_key=parsed.rpc_key,
session_timeout_sec=600,
)
return parsed
ARGS = _parse_args()
def main():
if ARGS.target.kind.name != "llvm" and ARGS.graph_tuner:
raise ValueError("GraphTuner only supports llvm target")
if ARGS.target.kind.name != "llvm" and ARGS.cpu_flush:
raise ValueError("cpu_flush only supports llvm target")
if ARGS.target.kind.name == "llvm" and not ARGS.cpu_flush:
warnings.warn("cpu_flush is not enabled for llvm target")
log_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}.json")
graph_opt_sch_file = os.path.join(ARGS.work_dir, f"{ARGS.workload}_graph_opt.log")
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
key=ARGS.rpc_key,
host=ARGS.rpc_host,
port=ARGS.rpc_port,
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
)
describe()
print(f"Workload: {ARGS.workload}")
mod, params, (input_name, input_shape, input_dtype) = get_network(
ARGS.workload,
ARGS.input_shape,
layout=ARGS.layout,
cache_dir=ARGS.cache_dir,
)
input_info = [
{
"name": input_name,
"shape": input_shape,
"dtype": input_dtype,
},
]
input_data = {
item["name"]: generate_input_data(item["shape"], item["dtype"]) for item in input_info
}
for item in input_info:
print(f" input_name : {item['name']}")
print(f" input_shape: {item['shape']}")
print(f" input_dtype: {item['dtype']}")
with ms.Profiler() as profiler:
with ms.Profiler.timeit("TaskExtraction"):
# extract workloads from relay program
tasks = autotvm.task.extract_from_program(
mod["main"],
target=ARGS.target,
params=params,
ops=(
relay.op.get("nn.conv2d"),
relay.op.get("nn.conv3d"),
relay.op.get("nn.conv2d_transpose"),
relay.op.get("nn.dense"),
relay.op.get("nn.batch_matmul"),
),
)
for i, task in enumerate(tasks):
print(f"Task {i} {task.name}: {task}")
with ms.Profiler.timeit("Tuning"):
if ARGS.num_trials > 0:
for i, task in enumerate(tasks):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
tuner_obj = XGBTuner(task, loss_type="rank")
n_trial = min(len(task.config_space), ARGS.num_trials)
tuner_obj.tune(
n_trial=n_trial,
early_stopping=800,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial, prefix=prefix),
autotvm.callback.log_to_file(log_file),
],
)
if ARGS.graph_tuner:
executor = DPTuner(
graph=mod["main"],
input_shapes={input_name: input_shape},
records=log_file,
target_ops=[
relay.op.get("nn.conv2d"),
],
target=ARGS.target,
)
executor.benchmark_layout_transform(min_exec_num=1000)
executor.run()
executor.write_opt_sch2record_file(graph_opt_sch_file)
relay_build = {"graph": relay.build, "vm": relay.vm.compile}[ARGS.backend]
with ms.Profiler.timeit("PostTuningCompilation"):
if ARGS.graph_tuner:
ctx = autotvm.apply_graph_best(graph_opt_sch_file)
else:
ctx = autotvm.apply_history_best(log_file)
with ctx:
print("compile...")
with tvm.transform.PassContext(opt_level=3):
lib = relay_build(mod, target=ARGS.target, params=params)
print("Tuning Time:")
print(profiler.table())
run_module_via_rpc(
rpc_config=ARGS.rpc_config,
lib=lib,
dev_type=ARGS.target.kind.name,
args=input_data,
continuation=create_timer(ARGS.backend),
backend=ARGS.backend,
)
if __name__ == "__main__":
main()
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tophub.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
TopHub: Tensor Operator Hub
To get the best performance, we typically need auto-tuning for the specific devices.
TVM releases pre-tuned parameters in TopHub for some common networks and hardware targets.
TVM will download these parameters for you when you call relay.build.
"""
# pylint: disable=invalid-name
import logging
from os import getenv
import sys
from pathlib import Path
from tvm.ir.container import Array
from .task import ApplyHistoryBest
from ..target import Target
from ..contrib.download import download
from .record import load_from_file
from .utils import EmptyContext
# environment variable to read TopHub location
AUTOTVM_TOPHUB_LOC_VAR = "TOPHUB_LOCATION"
# default location of TopHub
AUTOTVM_TOPHUB_DEFAULT_LOC = "https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub"
# value of AUTOTVM_TOPHUB_LOC_VAR to specify to not read from TopHub
AUTOTVM_TOPHUB_NONE_LOC = "NONE"
# root path to store TopHub files
AUTOTVM_TOPHUB_ROOT_PATH = Path(Path("~").expanduser(), ".tvm", "tophub")
# the version of each package
PACKAGE_VERSION = {
"arm_cpu": "v0.08",
"llvm": "v0.04",
"cuda": "v0.10",
"rocm": "v0.05",
"opencl": "v0.04",
"mali": "v0.06",
"intel_graphics": "v0.02",
"vta": "v0.10",
"amd_apu": "v0.01",
}
logger = logging.getLogger("autotvm")
def _alias(name):
"""convert alias for some packages"""
table = {
"vtacpu": "vta",
"webgpu": "opencl",
"vulkan": "opencl",
"nvptx": "cuda",
"amd_apu": "amd_apu",
}
return table.get(name, name)
def _get_tophub_location():
location = getenv(AUTOTVM_TOPHUB_LOC_VAR, None)
return AUTOTVM_TOPHUB_DEFAULT_LOC if location is None else location
def context(target, extra_files=None):
"""Return the dispatch context with pre-tuned parameters.
This function will load the corresponding *.log files in AUTOTVM_TOPHUB_ROOT_PATH.
If cannot find them, it will download them from TopHub github repo.
Users can also add their own files in argument `extra_files`.
Parameters
----------
target: Target or List of Target
The compilation targets
extra_files: list of str, optional
Extra log files to load
"""
tophub_location = _get_tophub_location()
if tophub_location == AUTOTVM_TOPHUB_NONE_LOC:
return EmptyContext()
best_context = ApplyHistoryBest([])
targets = target if isinstance(target, (Array, list, tuple)) else [target]
for tgt in targets:
if isinstance(tgt, str):
tgt = Target(tgt)
possible_names = []
device = tgt.attrs.get("device", "")
if device != "":
possible_names.append(_alias(device))
possible_names.append(tgt.kind.name)
all_packages = list(PACKAGE_VERSION.keys())
for name in possible_names:
name = _alias(name)
if name in all_packages:
if not check_backend(tophub_location, name):
continue
filename = "%s_%s.log" % (name, PACKAGE_VERSION[name])
best_context.load(Path(AUTOTVM_TOPHUB_ROOT_PATH, filename))
break # only load one file to avoid some fallback template mismatch problem
if extra_files:
for filename in extra_files:
best_context.load(filename)
return best_context
def check_backend(tophub_location, backend):
"""Check whether have pre-tuned parameters of the certain target.
If not, will download it.
Parameters
----------
backend: str
The name of backend.
Returns
----------
success: bool
Whether the check is successful.
"""
backend = _alias(backend)
assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend
version = PACKAGE_VERSION[backend]
package_name = "%s_%s.log" % (backend, version)
if Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name).is_file():
return True
# pylint: disable=import-outside-toplevel
if sys.version_info >= (3,):
import urllib.request as urllib2
else:
import urllib2
try:
download_package(tophub_location, package_name)
return True
except urllib2.URLError as e:
logging.warning("Failed to download tophub package for %s: %s", backend, e)
return False
def download_package(tophub_location, package_name):
"""Download pre-tuned parameters of operators for a backend
Parameters
----------
tophub_location: str
The location to download TopHub parameters from
package_name: str
The name of package
"""
rootpath = Path(AUTOTVM_TOPHUB_ROOT_PATH)
rootpath.mkdir(parents=True, exist_ok=True)
download_url = "{0}/{1}".format(tophub_location, package_name)
logger.info("Download pre-tuned parameters package from %s", download_url)
download(download_url, Path(rootpath, package_name), overwrite=True)
# global cache for load_reference_log
REFERENCE_LOG_CACHE = {}
def load_reference_log(backend, model, workload_name):
"""Load reference log from TopHub to support fallback in template.
Template will use these reference logs to choose fallback config.
Parameters
----------
backend: str
The backend name
model: str
The name of the device model
workload_name: str
The name of the workload. (The first item in the workload tuple)
"""
backend = _alias(backend)
if backend not in PACKAGE_VERSION:
return []
version = PACKAGE_VERSION[backend]
package_name = "%s_%s.log" % (backend, version)
filename = Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name)
global REFERENCE_LOG_CACHE
key = (backend, model, workload_name)
if key not in REFERENCE_LOG_CACHE:
tmp = []
# If TOPHUB_LOCATION is not AUTOTVM_TOPHUB_NONE_LOC,
# Download the config file from tophub if not exists.
if not Path(filename).exists():
tophub_location = _get_tophub_location()
if tophub_location != AUTOTVM_TOPHUB_NONE_LOC:
download_package(tophub_location, package_name)
if Path(filename).is_file(): # in case download failed
find = False
inp = None
counts = {}
for inp, res in load_from_file(filename):
counts[inp.target.model] = counts.get(inp.target.model, 0) + 1
if model == inp.target.model:
find = True
break
# if device model is not find, use the device model with the most tuned workloads
if not find and counts:
model = max(counts.items(), key=lambda k: k[1])[0]
for inp, res in load_from_file(filename):
if model == inp.target.model and inp.task.workload[0] == workload_name:
tmp.append((inp, res))
REFERENCE_LOG_CACHE[key] = tmp
return REFERENCE_LOG_CACHE[key]
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A tuner takes a task as input. It proposes some promising :any:`ConfigEntity`
in the :any:`ConfigSpace` and measure them on the real hardware. Then it
proposed the next batch of :any:`ConfigEntity` according to the measure results.
This tuning loop is repeated.
"""
from . import callback
from .tuner import Tuner
from .index_based_tuner import GridSearchTuner, RandomTuner
from .ga_tuner import GATuner
from .xgboost_tuner import XGBTuner
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/callback.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name
"""Namespace of callback utilities of AutoTVM"""
import sys
import time
import logging
import numpy as np
from .. import record
from ..utils import format_si_prefix
logger = logging.getLogger("autotvm")
def log_to_file(file_out, protocol="json"):
"""Log the tuning records into file.
The rows of the log are stored in the format of autotvm.record.encode.
Parameters
----------
file_out : File or str
The file to log to.
protocol: str, optional
The log protocol. Can be 'json' or 'pickle'
Returns
-------
callback : callable
Callback function to do the logging.
"""
def _callback(_, inputs, results):
"""Callback implementation"""
if isinstance(file_out, str):
with open(file_out, "a") as f:
for inp, result in zip(inputs, results):
f.write(record.encode(inp, result, protocol) + "\n")
else:
for inp, result in zip(inputs, results):
file_out.write(record.encode(inp, result, protocol) + "\n")
# pylint: disable=import-outside-toplevel
from pathlib import Path
if isinstance(file_out, Path):
file_out = str(file_out)
return _callback
def log_to_database(db):
"""Save the tuning records to a database object.
Parameters
----------
db: Database
The database
"""
def _callback(_, inputs, results):
"""Callback implementation"""
for inp, result in zip(inputs, results):
db.save(inp, result)
return _callback
class Monitor(object):
"""A monitor to collect statistic during tuning"""
def __init__(self):
self.scores = []
self.timestamps = []
def __call__(self, tuner, inputs, results):
for inp, res in zip(inputs, results):
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
self.scores.append(flops)
else:
self.scores.append(0)
self.timestamps.append(res.timestamp)
def reset(self):
self.scores = []
self.timestamps = []
def trial_scores(self):
"""get scores (currently is flops) of all trials"""
return np.array(self.scores)
def trial_timestamps(self):
"""get wall clock time stamp of all trials"""
return np.array(self.timestamps)
def progress_bar(total, prefix="", si_prefix="G"):
"""Display progress bar for tuning
Parameters
----------
total: int
The total number of trials
prefix: str
The prefix of output message
si_prefix: str
SI prefix for flops
"""
class _Context(object):
"""Context to store local variables"""
def __init__(self):
self.best_flops = 0
self.cur_flops = 0
self.ct = 0
self.total = total
def __del__(self):
if logger.level < logging.DEBUG: # only print progress bar in non-debug mode
sys.stdout.write(" Done.\n")
ctx = _Context()
tic = time.time()
# Validate si_prefix argument
format_si_prefix(0, si_prefix)
if logger.level < logging.DEBUG: # only print progress bar in non-debug mode
sys.stdout.write(
"\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) "
"| %.2f s" % (prefix, 0, 0, si_prefix, 0, total, time.time() - tic)
)
sys.stdout.flush()
def _callback(tuner, inputs, results):
ctx.ct += len(inputs)
flops = 0
for inp, res in zip(inputs, results):
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
if not logger.isEnabledFor(logging.DEBUG): # only print progress bar in non-debug mode
ctx.cur_flops = flops
ctx.best_flops = tuner.best_flops
sys.stdout.write(
"\r%s Current/Best: %7.2f/%7.2f %sFLOPS | Progress: (%d/%d) "
"| %.2f s"
% (
prefix,
format_si_prefix(ctx.cur_flops, si_prefix),
format_si_prefix(ctx.best_flops, si_prefix),
si_prefix,
ctx.ct,
ctx.total,
time.time() - tic,
)
)
sys.stdout.flush()
return _callback
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/ga_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate,invalid-name,abstract-method
"""Tuner with genetic algorithm"""
import numpy as np
from .tuner import Tuner
class GATuner(Tuner):
"""Tuner with genetic algorithm.
This tuner does not have a cost model so it always run measurement on real machines.
This tuner expands the :code:`ConfigEntity` as gene.
Parameters
----------
pop_size: int
number of genes in one generation
elite_num: int
number of elite to keep
mutation_prob: float
probability of mutation of a knob in a gene
"""
def __init__(self, task, pop_size=100, elite_num=3, mutation_prob=0.1):
super(GATuner, self).__init__(task)
# algorithm configurations
self.pop_size = pop_size
self.elite_num = elite_num
self.mutation_prob = mutation_prob
assert elite_num <= pop_size, "The number of elites must be less than population size"
# random initialization
self.pop_size = min(self.pop_size, len(self.space))
self.elite_num = min(self.pop_size, self.elite_num)
self.visited = set(self.space.sample_ints(self.pop_size))
# current generation
self.genes = [self.space.point2knob(idx) for idx in self.visited]
self.scores = []
self.elites = []
self.elite_scores = []
self.trial_pt = 0
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
gene = self.genes[self.trial_pt % self.pop_size]
self.trial_pt += 1
ret.append(self.space.get(self.space.knob2point(gene)))
return ret
def update(self, inputs, results):
for inp, res in zip(inputs, results):
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
self.scores.append(y)
else:
self.scores.append(0.0)
if len(self.scores) >= len(self.genes) and len(self.visited) < len(self.space):
next_genes = []
# There is no reason to crossover or mutate since the size of the unvisited
# is no larger than the size of the population.
if len(self.space) - len(self.visited) <= self.pop_size:
for idx in range(self.space.range_length):
if self.space.is_index_valid(idx) and idx not in self.visited:
next_genes.append(self.space.point2knob(idx))
self.visited.add(idx)
else:
genes = self.genes + self.elites
scores = np.array(self.scores[: len(self.genes)] + self.elite_scores)
# reserve elite
self.elites, self.elite_scores = [], []
elite_indexes = np.argpartition(scores, -self.elite_num)[-self.elite_num :]
for ind in elite_indexes:
self.elites.append(genes[ind])
self.elite_scores.append(scores[ind])
indices = np.arange(len(genes))
scores += 1e-8
scores /= np.max(scores)
probs = scores / np.sum(scores)
while len(next_genes) < self.pop_size:
# cross over
p1, p2 = np.random.choice(indices, size=2, replace=False, p=probs)
p1, p2 = genes[p1], genes[p2]
point = np.random.randint(len(self.space.dims))
tmp_gene = p1[:point] + p2[point:]
# mutation
for j, dim in enumerate(self.space.dims):
if np.random.random() < self.mutation_prob:
tmp_gene[j] = np.random.randint(dim)
if self.space.is_index_valid(self.space.knob2point(tmp_gene)):
next_genes.append(tmp_gene)
self.visited.add(self.space.knob2point(tmp_gene))
self.genes = next_genes
self.trial_pt = 0
self.scores = []
def has_next(self):
return len(self.visited) - (len(self.genes) - self.trial_pt) < len(self.space)
def load_history(self, data_set, min_seed_records=500):
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/index_based_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=abstract-method
"""Grid search tuner and random tuner"""
from .tuner import Tuner
class IndexBaseTuner(Tuner):
"""Base class for index based tuner
This type of tuner determine the next batch of configs based on config indices.
Parameters
----------
task: autotvm.task.Task
The tuning task
range_idx: Optional[Tuple[int, int]]
A tuple of index range that this tuner can select from [begin_idx, end_idx]
"""
def __init__(self, task, range_idx=None):
super(IndexBaseTuner, self).__init__(task)
assert range_idx is None or isinstance(
range_idx, tuple
), "range_idx must be None or (int, int)"
self.visited = []
self.begin_idx, self.end_idx = range_idx or (0, self.space.range_length - 1)
assert self.begin_idx >= 0, "Start index must be positive"
self.end_idx += 1 # Further end_idx is exclusive
assert (
self.end_idx <= self.space.range_length
), "Finish index must be less the space range length "
self.range_length = self.end_idx - self.begin_idx
assert self.range_length > 0, "Index range must be positive"
self.visited_max = self.space.subrange_length(self.begin_idx, self.end_idx)
def has_next(self):
return len(self.visited) < self.visited_max
def load_history(self, data_set, min_seed_records=500):
pass
class GridSearchTuner(IndexBaseTuner):
"""Enumerate the search space in a grid search order"""
def __init__(self, task, range_idx=None):
super(GridSearchTuner, self).__init__(task, range_idx)
self.index = self.begin_idx
if not self.space.is_index_valid(self.index):
self.index = self.space.get_next_index(
self.index, start=self.begin_idx, end=self.end_idx
)
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
self.visited.append(self.index)
ret.append(self.space.get(self.index))
self.index = self.space.get_next_index(
self.index, start=self.begin_idx, end=self.end_idx
)
return ret
class RandomTuner(IndexBaseTuner):
"""Enumerate the search space in a random order
Parameters
----------
task: autotvm.task.Task
Tuning Task
range_idx: Optional[Tuple[int, int]]
A tuple of index range to random
"""
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
index = self.space.get_rand_index(self.begin_idx, self.end_idx, to_exclude=self.visited)
self.visited.append(index)
ret.append(self.space.get(index))
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/metric.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Metrics for evaluating tuning process"""
import numpy as np
from ..utils import get_rank
def max_curve(trial_scores):
"""f(n) = max([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = -1e9
for i, score in enumerate(trial_scores):
keep = max(keep, score)
ret[i] = keep
return ret
def mean_curve(trial_scores):
"""f(n) = mean([s[i] fo i < n])
Parameters
----------
trial_scores: Array of float
the score of i th trial
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_scores))
keep = 0
for i, score in enumerate(trial_scores):
keep += score
ret[i] = keep / (i + 1)
return ret
def recall_curve(trial_ranks, top=None):
"""
if top is None, f(n) = sum([I(rank[i] < n) for i < n]) / n
if top is K, f(n) = sum([I(rank[i] < K) for i < n]) / K
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
top: int or None
top-n recall
Returns
-------
curve: Array of float
function values
"""
if not isinstance(trial_ranks, np.ndarray):
trial_ranks = np.array(trial_ranks)
ret = np.zeros(len(trial_ranks))
if top is None:
for i in range(len(trial_ranks)):
ret[i] = np.sum(trial_ranks[:i] <= i) / (i + 1)
else:
for i in range(len(trial_ranks)):
ret[i] = 1.0 * np.sum(trial_ranks[:i] < top) / top
return ret
def cover_curve(trial_ranks):
"""
f(n) = max k s.t. {1,2,...,k} is a subset of {ranks[i] for i < n}
Parameters
----------
trial_ranks: Array of int
the rank of i th trial in labels
Returns
-------
curve: Array of float
function values
"""
ret = np.empty(len(trial_ranks))
keep = -1
cover = set()
for i, rank in enumerate(trial_ranks):
cover.add(rank)
while keep + 1 in cover:
keep += 1
ret[i] = keep + 1
return ret / len(trial_ranks)
def average_recall(preds, labels, N):
"""evaluate average recall-n for predictions and labels"""
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return np.sum(curve[:N]) / N
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/model_based_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,consider-using-enumerate,abstract-method
"""Base class for model-based tuner
This type of tuner will fit a cost model and use some optimization methods to
find optimums points of cost model in space.
"""
import gc
import numpy as np
from .tuner import Tuner
from ..env import GLOBAL_SCOPE
class FeatureCache(object):
"""Feature cache manager for cache sharing between different cost models"""
def __init__(self):
self.feature_cache = {}
def get(self, key):
"""Get feature cache dictionary for a key
Parameters
----------
key: str
The key of a feature type
Returns
-------
fea_cache: dict
cache dictionary
"""
if key not in self.feature_cache:
self.feature_cache[key] = {}
return self.feature_cache[key]
def size(self, key):
""" " Get the size of a feature cache dictionary
Parameters
----------
key: str
The key of a feature type
Returns
-------
n: int
"""
return len(self.feature_cache.get(key, tuple()))
def clear(self, key):
"""Clear feature cache for a key
Parameters
----------
key: str
The key of a feature type
"""
del self.feature_cache[key]
self.feature_cache[key] = {}
gc.collect()
class CostModel(object):
"""Cost model to predict the speed of a config"""
def __init__(self):
pass
def fit(self, xs, ys, plan_size):
"""Fit to training data
Parameters
----------
xs: Array of int
indexes of configs in the config space
ys: Array of float
The speed (flop, float number operations per second)
plan_size: int
The plan size of tuner
"""
raise NotImplementedError()
def fit_log(self, records, plan_size, min_seed_records=500):
"""Fit training data from log.
Parameters
----------
records: Array of Tuple(MeasureInput, MeasureResult)
The tuning records
plan_size: int
The plan size of tuner
min_seed_records: int
Defaults to 500. Indicates the minimum number of records to
train the tuner with. If there are less than `min_seed_records`
number of records in `data_set`, no training of the tuner
will be done.
"""
raise NotImplementedError()
def predict(self, xs, output_margin=False):
"""Predict the speed of configs
Parameters
----------
xs: Array of int
The indexes of configs to predict
output_margin: bool, optional
Whether output the untransformed margin.
When a model is used as base model, it should output untransformed margin
Returns
-------
preds: Array of float
The prediction
"""
raise NotImplementedError()
def load_basemodel(self, base_model):
"""Load base model for transfer learning
Parameters
----------
base_model: CostModel
base model
"""
raise NotImplementedError()
def spawn_base_model(self):
"""Clone a base model with the same parameters.
The base model is used to fit history data in transfer learning.
Returns
-------
model: CostModel
A model with the same hyperparameter (argument)
"""
raise NotImplementedError()
class ModelOptimizer(object):
"""Optimizer used to find optimal points of cost model"""
def __init__(self):
pass
def find_maximums(self, model, num, exclusive):
"""Find maximum of a cost model
Note we use cost model to predict GFLOPS, so we should find the maximum
Parameters
----------
model: CostModel
Cost model
num: int
The number of returned maximum points
exclusive: set, optional
The excluded set of this optimizer. Return results won't include any
elements in this set.
"""
raise NotImplementedError()
class ModelBasedTuner(Tuner):
"""Base class for model based tuner
This type of tuner will fit a cost model and use an optimizer to
find the maximums of the cost model as next trials
Parameters
----------
task: autotvm.task.Task
The tuning task
cost_model: CostModel
The cost model that predicts the speed of a config (IR)
model_optimizer:
The optimizer to find local optimum points of cost model in tuning search space
plan_size: int
Tuner will re-fit model per `plan_size` new measure samples
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick plan_size of them according to the diversity metric.
"""
def __init__(self, task, cost_model, model_optimizer, plan_size, diversity_filter_ratio=None):
super(ModelBasedTuner, self).__init__(task)
# space
self.task = task
self.target = task.target
self.plan_size = plan_size
self.cost_model = cost_model
self.model_optimizer = model_optimizer
self.diversity_filter_ratio = diversity_filter_ratio
if self.diversity_filter_ratio:
assert self.diversity_filter_ratio >= 1, (
"Diversity filter ratio " "must be larger than one"
)
# trial plan
self.trials = []
self.trial_pt = 0
self.visited = set()
# observed samples
self.xs = []
self.ys = []
self.flops_max = 0.0
self.train_ct = 0
def next_batch(self, batch_size):
ret = []
while len(ret) < batch_size and self.has_next():
while self.trial_pt < len(self.trials):
index = self.trials[self.trial_pt]
if index not in self.visited and self.space.is_index_valid(index):
break
self.trial_pt += 1
if self.trial_pt >= len(self.trials) - int(0.05 * self.plan_size):
# if the trial list is empty or
# the tuner is doing the last 5% trials (e-greedy), choose randomly
index = self.space.get_rand_index(to_exclude=self.visited)
ret.append(self.space.get(index))
self.visited.add(index)
return ret
def update(self, inputs, results):
for inp, res in zip(inputs, results):
index = inp.config.index
if res.error_no == 0:
self.xs.append(index)
flops = inp.task.flop / np.mean(res.costs)
self.flops_max = max(self.flops_max, flops)
self.ys.append(flops)
else:
self.xs.append(index)
self.ys.append(0.0)
# Usually the update function is called during the tune loop
# after the index is already added to the visited set.
# However, adding the index to visited again here enables us
# to also use this update function to resume tuning progress in
# case of interruption.
assert self.space.is_index_valid(index)
self.visited.add(index)
# if we have enough new training samples
if len(self.xs) >= self.plan_size * (self.train_ct + 1) and self.flops_max > 1e-6:
self.cost_model.fit(self.xs, self.ys, self.plan_size)
if self.diversity_filter_ratio:
candidate = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size * self.diversity_filter_ratio, self.visited
)
scores = self.cost_model.predict(candidate)
knobs = [self.space.point2knob(x) for x in candidate]
pick_index = submodular_pick(0 * scores, knobs, self.plan_size, knob_weight=1)
maximums = np.array(candidate)[pick_index]
else:
maximums = self.model_optimizer.find_maximums(
self.cost_model, self.plan_size, self.visited
)
self.trials = maximums
self.trial_pt = 0
self.train_ct += 1
def load_history(self, data_set, min_seed_records=500):
# set in_tuning as True to make the feature extraction consistent
GLOBAL_SCOPE.in_tuning = True
# fit base model
base_model = self.cost_model.spawn_base_model()
success = base_model.fit_log(data_set, self.plan_size, min_seed_records)
if not success:
GLOBAL_SCOPE.in_tuning = False
return
# use base model to select initial points
if not self.trials:
# no plan yet, use base model to select initial trials
maximums = self.model_optimizer.find_maximums(base_model, self.plan_size, self.visited)
self.trials = maximums
self.trial_pt = 0
self.cost_model.load_basemodel(base_model)
GLOBAL_SCOPE.in_tuning = False
def has_next(self):
return len(self.visited) < len(self.space)
def submodular_pick(scores, knobs, n_pick, knob_weight=1.0):
"""Run greedy optimization to pick points with regard to both score and diversity.
DiversityScore = knob_weight * number of unique knobs in the selected set
Obj = sum(scores[i] for i in pick) + DiversityScore
Note that this objective function is a monotone submodular function.
Parameters
----------
scores: Array of float
score of every points
knobs: Array of Array of int
feature vector (tunable knobs) of every points
n_pick: int
number of points to pick
knob_weight: float
weight of an unique knob feature
"""
n = len(scores)
assert n == len(knobs)
n_knobs = len(knobs[0])
knobs_set = [set() for _ in range(n_knobs)]
ret = []
remain = list(range(len(scores)))
for _ in range(n_pick):
max_x = -1
max_delta = -1e9
for x in remain:
tmp_delta = scores[x]
for i in range(n_knobs):
if knobs[x][i] not in knobs_set[i]:
tmp_delta += knob_weight
if tmp_delta > max_delta:
max_delta, max_x = tmp_delta, x
ret.append(max_x)
remain.remove(max_x)
for i in range(n_knobs):
knobs_set[i].add(knobs[max_x][i])
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/sa_model_optimizer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=consider-using-enumerate, invalid-name, invalid-sequence-index
"""
Cost model optimizer based on simulated annealing
"""
import heapq
import logging
import time
import numpy as np
from .model_based_tuner import ModelOptimizer
logger = logging.getLogger("autotvm")
class SimulatedAnnealingOptimizer(ModelOptimizer):
"""parallel simulated annealing optimization algorithm
Parameters
----------
task: Task
The tuning task
n_iter: int
The number of iterations of simulated annealing
temp: float or Array of float
If is a single float, then use a constant temperature.
If is an Array, then perform linear cooling from temp[0] to temp[1]
early_stop: int, optional
Stop iteration if the optimal set do not change in `early_stop` rounds
log_interval: int, optional
Print log every `log_interval` iterations
"""
def __init__(
self,
task,
n_iter=500,
temp=(1, 0),
persistent=True,
parallel_size=128,
early_stop=50,
log_interval=50,
):
super(SimulatedAnnealingOptimizer, self).__init__()
self.task = task
self.n_iter = n_iter
self.temp = temp
self.persistent = persistent
self.parallel_size = min(parallel_size, len(self.task.config_space))
self.early_stop = early_stop or 1e9
self.log_interval = log_interval
self.points = None
def find_maximums(self, model, num, exclusive):
tic = time.time()
temp, n_iter, early_stop, log_interval = (
self.temp,
self.n_iter,
self.early_stop,
self.log_interval,
)
if self.persistent and self.points is not None:
points = self.points
else:
points = self.task.config_space.sample_ints(self.parallel_size)
scores = model.predict(points)
# build heap and insert initial points
heap_items = [(float("-inf"), -1 - i) for i in range(num)]
heapq.heapify(heap_items)
in_heap = set(exclusive)
in_heap.update([x[1] for x in heap_items])
for s, p in zip(scores, points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k = 0
k_last_modify = 0
if isinstance(temp, (tuple, list, np.ndarray)):
t = temp[0]
cool = 1.0 * (temp[0] - temp[1]) / (n_iter + 1)
else:
t = temp
cool = 0
while k < n_iter and k < k_last_modify + early_stop:
new_points = np.empty_like(points)
for i, p in enumerate(points):
new_points[i] = self.task.config_space.random_walk(p)
new_scores = model.predict(new_points)
ac_prob = np.exp(np.minimum((new_scores - scores) / (t + 1e-5), 1))
ac_index = np.random.random(len(ac_prob)) < ac_prob
points[ac_index] = new_points[ac_index]
scores[ac_index] = new_scores[ac_index]
for s, p in zip(new_scores, new_points):
if s > heap_items[0][0] and p not in in_heap:
pop = heapq.heapreplace(heap_items, (s, p))
in_heap.remove(pop[1])
in_heap.add(p)
k_last_modify = k
k += 1
t -= cool
if log_interval and k % log_interval == 0:
t_str = "%.2f" % t
logger.debug(
"SA iter: %d\tlast_update: %d\tmax-0: %.2f\tmax-1: %.2f\ttemp: %s\t"
"elapsed: %.2f",
k,
k_last_modify,
heap_items[0][0],
np.max([v for v, _ in heap_items]),
t_str,
time.time() - tic,
)
heap_items.sort(key=lambda item: -item[0])
heap_items = [x for x in heap_items if x[0] >= 0]
logger.debug(
"SA iter: %d\tlast_update: %d\telapsed: %.2f", k, k_last_modify, time.time() - tic
)
logger.debug("SA Maximums: %s", heap_items)
if self.persistent:
self.points = points
return [x[1] for x in heap_items]
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, no-self-use, invalid-name
"""Base class of tuner"""
import logging
import tempfile
import numpy as np
from ..measure import MeasureInput, create_measure_batch
from ..utils import format_si_prefix
from ..env import GLOBAL_SCOPE
logger = logging.getLogger("autotvm")
class Tuner(object):
"""Base class for tuners
Parameters
----------
task: autotvm.task.Task
Tuning Task
"""
def __init__(self, task, **kwargs):
self.param = kwargs
self.recorder = None
self.task = task
self.space = self.task.config_space
# keep the current best
self.best_config = None
self.best_flops = 0
self.best_measure_pair = None
self.best_iter = 0
self.error_ct_threshold = 150
# time to leave
self.ttl = None
self.n_trial = None
self.early_stopping = None
def has_next(self):
"""Whether has next untried config in the space
Returns
-------
has_next: bool
"""
raise NotImplementedError()
def next_batch(self, batch_size):
"""get the next batch of configs to be measure on real hardware
Parameters
----------
batch_size: int
The size of the batch
Returns
-------
a batch of configs
"""
raise NotImplementedError()
def update(self, inputs, results):
"""Update parameters of the tuner according to measurement results
Parameters
----------
inputs: Array of autotvm.measure.MeasureInput
The input for measurement
results: Array of autotvm.measure.MeasureResult
result for measurement
"""
def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_prefix="G"):
"""Begin tuning
Parameters
----------
n_trial: int
Maximum number of configs to try (measure on real hardware)
measure_option: dict
The options for how to measure generated code.
You should use the return value ot autotvm.measure_option for this argument.
early_stopping: int, optional
Early stop the tuning when not finding better configs in this number of trials
callbacks: List of callable
A list of callback functions. The signature of callback function is
(Tuner, List of MeasureInput, List of MeasureResult)
with no return value. These callback functions will be called on
every measurement pair. See autotvm/tuner/callback.py for some examples.
si_prefix: str
One of tvm.autotvm.utils.SI_PREFIXES. The SI prefix to use when reporting FLOPS.
"""
measure_batch = create_measure_batch(self.task, measure_option)
n_parallel = getattr(measure_batch, "n_parallel", 1)
early_stopping = early_stopping or 1e9
self.n_trial = n_trial
self.early_stopping = early_stopping
# Validate si_prefix arg
format_si_prefix(0, si_prefix)
old_level = logger.level
GLOBAL_SCOPE.in_tuning = True
i = error_ct = 0
errors = []
while i < n_trial:
if not self.has_next():
break
configs = self.next_batch(min(n_parallel, n_trial - i))
inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]
results = measure_batch(inputs)
# keep best config
for k, (inp, res) in enumerate(zip(inputs, results)):
config = inp.config
if res.error_no == 0:
flops = inp.task.flop / np.mean(res.costs)
error_ct = 0
result_msg = res
else:
flops = 0
error_ct += 1
tb, error = res.costs
if isinstance(error, str):
errors.append(tb + "\n" + error)
else:
errors.append(tb + "\n" + str(error))
result_msg = errors[-1]
if flops > self.best_flops:
self.best_flops = flops
self.best_config = config
self.best_measure_pair = (inp, res)
self.best_iter = i + k
logger.debug(
"No: %d\t%sFLOPS: %.2f/%.2f\tresult: %s\t%s",
i + k + 1,
si_prefix,
format_si_prefix(flops, si_prefix),
format_si_prefix(self.best_flops, si_prefix),
result_msg,
config,
)
i += len(results)
self.ttl = min(early_stopping + self.best_iter, n_trial) - i
self.update(inputs, results)
for callback in callbacks:
callback(self, inputs, results)
if i >= self.best_iter + early_stopping:
logger.debug("Early stopped. Best iter: %d.", self.best_iter)
break
if error_ct > self.error_ct_threshold:
logging.basicConfig()
logger.warning("Too many errors happen in the tuning. Switching to debug mode.")
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(old_level)
if error_ct == i:
_, f = tempfile.mkstemp(prefix="tvm_tuning_errors_", suffix=".log", text=True)
with open(f, "w") as file:
file.write("\n".join(errors))
logging.warning(
"Could not find any valid schedule for task %s. "
"A file containing the errors has been written to %s.",
self.task,
f,
)
GLOBAL_SCOPE.in_tuning = False
del measure_batch
def reset(self):
"""reset the status of tuner"""
self.best_config = None
self.best_flops = 0
self.best_measure_pair = None
def load_history(self, data_set, min_seed_records=500):
"""load history data for transfer learning
Parameters
----------
data_set: Array of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) pair
Previous tuning records
min_seed_records: int
Defaults to 500. Indicates the minimum number of records to
train the tuner with. If there are less than `min_seed_records`
number of records in `data_set`, no training of the tuner
will be done.
"""
raise NotImplementedError()
def set_error_threshold(self, threshold):
"""Modify error counter threshold, which controls switch to debug mode
Parameters
----------
threshold: New threshold value
"""
self.error_ct_threshold = threshold
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/xgboost_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""XGBoost as cost model"""
import logging
import time
import numpy as np
from tvm.contrib.popen_pool import PopenPoolExecutor, StatusKind
from .. import feature
from ..utils import get_rank
from .metric import cover_curve, max_curve, recall_curve
from .model_based_tuner import CostModel, FeatureCache
xgb = None
logger = logging.getLogger("autotvm")
class XGBoostCostModel(CostModel):
"""XGBoost as cost model
Parameters
----------
task: Task
The tuning task
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
log_interval: int, optional
If is not none, the cost model will print training log every `log_interval` iterations.
upper_model: XGBoostCostModel, optional
The upper model used in transfer learning
"""
def __init__(
self, task, feature_type, loss_type, num_threads=None, log_interval=25, upper_model=None
):
global xgb
super(XGBoostCostModel, self).__init__()
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
raise ImportError(
"XGBoost is required for XGBoostCostModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
)
self.task = task
self.target = task.target
self.space = task.config_space
self.fea_type = feature_type
self.loss_type = loss_type
self.num_threads = num_threads
self.log_interval = log_interval
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type == "rank":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
self.bst = None
if feature_type == "itervar":
self.feature_extract_func = _extract_itervar_feature_index
elif feature_type == "knob":
self.feature_extract_func = _extract_knob_feature_index
elif feature_type == "curve":
self.feature_extract_func = _extract_curve_feature_index
else:
raise RuntimeError("Invalid feature type " + feature_type)
if upper_model: # share a same feature cache with upper model
self.feature_cache = upper_model.feature_cache
else:
self.feature_cache = FeatureCache()
self.upper_model = upper_model
self.feature_extra_ct = 0
self.pool = None
self.base_model = None
self._sample_size = 0
self._reset_pool(self.space, self.target, self.task)
def _reset_pool(self, space, target, task):
"""reset processing pool for feature extraction"""
if self.upper_model: # base model will reuse upper model's pool,
self.upper_model._reset_pool(space, target, task)
return
self._close_pool()
self.pool = PopenPoolExecutor(
max_workers=self.num_threads,
initializer=_extract_popen_initializer,
initargs=(space, target, task),
)
def _close_pool(self):
if self.pool:
self.pool = None
def _get_pool(self):
if self.upper_model:
return self.upper_model._get_pool()
return self.pool
def _base_model_discount(self):
return 1.0 / (2 ** (self._sample_size / 64.0))
def fit(self, xs, ys, plan_size):
tic = time.time()
self._reset_pool(self.space, self.target, self.task)
x_train = self._get_feature(xs)
y_train = np.array(ys)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
valid_index = y_train > 1e-6
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self._sample_size = len(x_train)
if self.base_model:
discount = self._base_model_discount()
if discount < 0.05: # discard base model
self.base_model.upper_model = None
self.base_model = None
else:
dtrain.set_base_margin(discount * self.base_model.predict(xs, output_margin=True))
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=8000,
callbacks=[
custom_callback(
stopping_rounds=20,
metric="tr-a-recall@%d" % plan_size,
evals=[(dtrain, "tr")],
maximize=True,
fevals=[
xgb_average_recalln_curve_score(plan_size),
],
verbose_eval=self.log_interval,
)
],
)
logger.debug(
"XGB train: %.2f\tobs: %d\terror: %d\tn_cache: %d",
time.time() - tic,
len(xs),
len(xs) - np.sum(valid_index),
self.feature_cache.size(self.fea_type),
)
def fit_log(self, records, plan_size, min_seed_records=500):
tic = time.time()
# filter data, only pick the data with a same task
data = []
for inp, res in records:
if inp.task.name == self.task.name:
data.append((inp, res))
logger.debug("XGB load %d entries from history log file", len(data))
# extract feature
self._reset_pool(self.space, self.target, self.task)
pool = self._get_pool()
if self.fea_type == "itervar":
feature_extract_func = _extract_itervar_feature_log
elif self.fea_type == "knob":
feature_extract_func = _extract_knob_feature_log
elif self.fea_type == "curve":
feature_extract_func = _extract_curve_feature_log
else:
raise RuntimeError("Invalid feature type: " + self.fea_type)
result = pool.map_with_error_catching(feature_extract_func, data)
result = list(result) # store results so we can iterate through them twice
# get maximum feature length
fea_len = -1
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, _ = res.value
fea_len = max(fea_len, x.shape[0])
xs, ys = [], []
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, y = res.value
# Features may not be the same size, pad them until they are
if fea_len > len(x):
xs.append(np.pad(x, (0, fea_len - len(x))))
else:
xs.append(x)
ys.append(y)
if len(xs) < min_seed_records: # no enough samples
return False
xs, ys = np.array(xs), np.array(ys)
x_train = xs
y_train = ys
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
plan_size *= 2
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=400,
callbacks=[
custom_callback(
stopping_rounds=100,
metric="tr-a-recall@%d" % plan_size,
evals=[(dtrain, "tr")],
maximize=True,
fevals=[
xgb_average_recalln_curve_score(plan_size),
],
verbose_eval=self.log_interval,
)
],
)
logger.debug("XGB train: %.2f\tobs: %d", time.time() - tic, len(xs))
return True
def predict(self, xs, output_margin=False):
feas = self._get_feature(xs)
dtest = xgb.DMatrix(feas)
if self.base_model:
dtest.set_base_margin(
self._base_model_discount() * self.base_model.predict(xs, output_margin=True)
)
return self.bst.predict(dtest, output_margin=output_margin)
def load_basemodel(self, base_model):
self.base_model = base_model
self.base_model._close_pool()
self.base_model.upper_model = self
def spawn_base_model(self):
return XGBoostCostModel(
self.task, self.fea_type, self.loss_type, self.num_threads, self.log_interval, self
)
def _get_feature(self, indexes):
"""get features for indexes, run extraction if we do not have cache for them"""
# free feature cache
if self.feature_cache.size(self.fea_type) >= 100000:
self.feature_cache.clear(self.fea_type)
fea_cache = self.feature_cache.get(self.fea_type)
indexes = np.array(indexes)
need_extract = [x for x in indexes if x not in fea_cache]
if need_extract:
pool = self._get_pool()
feas = pool.map_with_error_catching(self.feature_extract_func, need_extract)
for i, fea in zip(need_extract, feas):
fea_cache[i] = fea.value if fea.status == StatusKind.COMPLETE else None
feature_len = -1
for idx in indexes:
if fea_cache[idx] is not None:
feature_len = max(fea_cache[idx].shape[-1], feature_len)
ret = np.empty((len(indexes), feature_len), dtype=np.float32)
for i, ii in enumerate(indexes):
t = fea_cache[ii]
if t is not None and t.shape[0] < feature_len:
t = np.pad(t, (0, feature_len - t.shape[0]))
ret[i, :] = t if t is not None else 0
return ret
def __del__(self):
self._close_pool()
# Global variables for passing arguments to extract functions.
_extract_space = None
_extract_target = None
_extract_task = None
def _extract_popen_initializer(space, target, task):
global _extract_space, _extract_target, _extract_task
_extract_space = space
_extract_target = target
_extract_task = task
def _extract_itervar_feature_index(args):
"""extract iteration var feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, fargs, take_log=True)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return fea
def _extract_itervar_feature_log(arg):
"""extract iteration var feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, args, take_log=True)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_knob_feature_index(args):
"""extract knob feature for an index in extract_space"""
config = _extract_space.get(args)
return config.get_flatten_feature()
def _extract_knob_feature_log(arg):
"""extract knob feature for log items"""
inp, res = arg
config = inp.config
x = config.get_flatten_feature()
if res.error_no == 0:
with inp.target: # necessary, for calculating flops of this task
inp.task.instantiate(config)
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_curve_feature_index(args):
"""extract sampled curve feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, fargs, sample_n=20)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return np.array(fea)
def _extract_curve_feature_log(arg):
"""extract sampled curve feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def custom_callback(
stopping_rounds, metric, fevals, evals=(), log_file=None, maximize=False, verbose_eval=True
):
"""callback function for xgboost to support multiple custom evaluation functions"""
# pylint: disable=import-outside-toplevel
from xgboost.callback import _fmt_metric
from xgboost.core import EarlyStopException
try:
from xgboost.training import aggcv
except ImportError:
from xgboost.callback import _aggcv as aggcv
state = {}
metric_shortname = metric.split("-")[1]
def init(env):
"""internal function"""
bst = env.model
state["maximize_score"] = maximize
state["best_iteration"] = 0
if maximize:
state["best_score"] = float("-inf")
else:
state["best_score"] = float("inf")
if bst is not None:
if bst.attr("best_score") is not None:
state["best_score"] = float(bst.attr("best_score"))
state["best_iteration"] = int(bst.attr("best_iteration"))
state["best_msg"] = bst.attr("best_msg")
else:
bst.set_attr(best_iteration=str(state["best_iteration"]))
bst.set_attr(best_score=str(state["best_score"]))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
if not state:
init(env)
bst = env.model
i = env.iteration
cvfolds = env.cvfolds
res_dict = {}
##### evaluation #####
if cvfolds is not None:
for feval in fevals:
tmp = aggcv([f.eval(i, feval) for f in cvfolds])
for k, mean, std in tmp:
res_dict[k] = [mean, std]
else:
for feval in fevals:
bst_eval = bst.eval_set(evals, i, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
infos = ["XGB iter: %3d" % i]
for item in eval_res:
if "null" in item[0]:
continue
infos.append("%s: %.6f" % (item[0], item[1]))
if not isinstance(verbose_eval, bool) and verbose_eval and i % verbose_eval == 0:
logger.debug("\t".join(infos))
if log_file:
with open(log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == metric:
score = item[1]
break
assert score is not None
best_score = state["best_score"]
best_iteration = state["best_iteration"]
maximize_score = state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = "[%d] %s" % (env.iteration, "\t".join([_fmt_metric(x) for x in eval_res]))
state["best_msg"] = msg
state["best_score"] = score
state["best_iteration"] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(
best_score=str(state["best_score"]),
best_iteration=str(state["best_iteration"]),
best_msg=state["best_msg"],
)
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state["best_msg"]
if verbose_eval and env.rank == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
raise EarlyStopException(best_iteration)
return callback
# feval wrapper for xgboost
def xgb_max_curve_score(N):
"""evaluate max curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
scores = labels[trials]
curve = max_curve(scores)
return "Smax@%d" % N, curve[N] / np.max(labels)
return feval
def xgb_recalln_curve_score(N):
"""evaluate recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return "recall@%d" % N, curve[N]
return feval
def xgb_average_recalln_curve_score(N):
"""evaluate average recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return "a-recall@%d" % N, np.sum(curve[:N]) / N
return feval
def xgb_recallk_curve_score(N, topk):
"""evaluate recall-k curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks, topk)
return "recall@%d" % topk, curve[N]
return feval
def xgb_cover_curve_score(N):
"""evaluate cover curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = cover_curve(ranks)
return "cover@%d" % N, curve[N]
return feval
def xgb_null_score(_):
"""empty score function for xgb"""
def feval(__, ___):
return "null", 0
return feval
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/tuner/xgboost_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
class XGBTuner(ModelBasedTuner):
"""Tuner that uses xgboost as cost model
Parameters
----------
task: Task
The tuning task
plan_size: int
The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
and do planing for the next `plan_size` trials.
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
optimizer: str or ModelOptimizer, optional
If is 'sa', use a default simulated annealing optimizer.
Otherwise it should be a ModelOptimizer object.
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick batch_size of them according to the diversity metric.
log_interval: int = 50
The verbose level.
If is 0, output nothing.
Otherwise, output debug information every `verbose` iterations.
"""
def __init__(
self,
task,
plan_size=64,
feature_type="itervar",
loss_type="rank",
num_threads=None,
optimizer="sa",
diversity_filter_ratio=None,
log_interval=50,
):
cost_model = XGBoostCostModel(
task,
feature_type=feature_type,
loss_type=loss_type,
num_threads=num_threads,
log_interval=log_interval // 2,
)
if optimizer == "sa":
optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
else:
assert isinstance(optimizer, ModelOptimizer), (
"Optimizer must be " "a supported name string" "or a ModelOptimizer object."
)
super(XGBTuner, self).__init__(
task, cost_model, optimizer, plan_size, diversity_filter_ratio
)
def tune(self, *args, **kwargs): # pylint: disable=arguments-differ
super(XGBTuner, self).tune(*args, **kwargs)
# manually close pool to avoid multiprocessing issues
self.cost_model._close_pool()
| https://github.com/zk-ml/tachikoma |
python/tvm/autotvm/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utilities"""
import logging
import time
import numpy as np
import tvm.arith
from tvm.tir import expr
from tvm.contrib.popen_pool import PopenPoolExecutor
logger = logging.getLogger("autotvm")
class EmptyContext(object):
"""An empty context"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def get_rank(values):
"""get rank of items
Parameters
----------
values: Array
Returns
-------
ranks: Array of int
the rank of this item in the input (the largest value ranks first)
"""
tmp = np.argsort(-values)
ranks = np.empty_like(tmp)
ranks[tmp] = np.arange(len(tmp))
return ranks
def pool_map(func, args, batch_size, verbose=False, pool=None):
"""A wrapper of multiprocessing.pool.Pool.map to support small-batch mapping
for large argument list. This can reduce memory usage
Parameters
----------
func: Func(arg) -> np.ndarray
mapping function
args: List
list of arguments
batch_size: int
batch size in mapping
verbose: bool, optional
whether print progress
pool: multiprocessing.Pool, optional
pool objection
Returns
-------
converted numpy array
"""
ret = None
tic = time.time()
local_pool = pool or PopenPoolExecutor()
if verbose:
logger.info("mapping begin")
for i in range(0, len(args), batch_size):
if verbose:
logger.info("mapping %d/%d elapsed %.2f", i, len(args), time.time() - tic)
tmp = np.array(local_pool.map(func, args[i : i + batch_size]))
ret = tmp if ret is None else np.concatenate((ret, tmp))
if verbose:
logger.info("mapping done")
if not pool:
local_pool.close()
return ret
def get_func_name(func):
"""Get name of a function
Parameters
----------
func: Function
The function
Returns
-------
name: str
The name
"""
return func.func_name if hasattr(func, "func_name") else func.__name__
def get_const_int(exp):
"""Verifies expr is integer and get the constant value.
Parameters
----------
exp : tvm.Expr or int
The input expression.
Returns
-------
out_value : int
The output.
"""
if isinstance(exp, int):
return exp
if not isinstance(exp, (expr.IntImm,)):
ana = tvm.arith.Analyzer()
exp = ana.simplify(exp)
if not isinstance(exp, (expr.IntImm,)):
raise ValueError("Expect value to be constant int")
return exp.value
def get_const_tuple(in_tuple):
"""Verifies input tuple is IntImm or Var, returns tuple of int or Var.
Parameters
----------
in_tuple : tuple of Expr
The input.
Returns
-------
out_tuple : tuple of int
The output.
"""
ret = []
for elem in in_tuple:
if isinstance(elem, expr.Var):
ret.append(elem)
elif not isinstance(elem, (expr.IntImm, int)):
ana = tvm.arith.Analyzer()
elem = ana.simplify(elem)
if not isinstance(elem, (expr.IntImm)):
ret.append(elem)
else:
ret.append(get_const_int(elem))
return tuple(ret)
SI_PREFIXES = "yzafpn\xb5m kMGTPEZY"
YOCTO_EXP10 = -24
def format_si_prefix(x, si_prefix):
exp10 = 10 ** (SI_PREFIXES.index(si_prefix) * 3 + YOCTO_EXP10)
return float(x) / exp10
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Contrib APIs of TVM python package.
Contrib API provides many useful not core features.
Some of these are useful utilities to interact with
thirdparty libraries and tools.
"""
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cblas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to BLAS libraries."""
import tvm
from tvm import te
def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs,
)
def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs):
"""Create an extern op that compute batched matrix mult of A and rhs with CBLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
b = te.max(lhs.shape[0], rhs.shape[0])
n = lhs.shape[2] if transa else lhs.shape[1]
m = rhs.shape[1] if transb else rhs.shape[2]
return te.extern(
(b, n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.batch_matmul"
if not iterative
else "tvm.contrib.cblas.batch_matmul_iterative",
ins[0],
ins[1],
outs[0],
transa,
transb,
),
name="C",
**kwargs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke C/C++ compilers in the system."""
# pylint: disable=invalid-name
import sys
import os
import subprocess
from .._ffi.base import py_str
def _is_linux_like():
return (
sys.platform == "darwin"
or sys.platform.startswith("linux")
or sys.platform.startswith("freebsd")
)
def get_cc():
"""Return the path to the default C/C++ compiler.
Returns
-------
out: Optional[str]
The path to the default C/C++ compiler, or None if none was found.
"""
if not _is_linux_like():
return None
env_cxx = os.environ.get("CXX") or os.environ.get("CC")
if env_cxx:
return env_cxx
cc_names = ["g++", "gcc", "clang++", "clang", "c++", "cc"]
dirs_in_path = os.get_exec_path()
for cc in cc_names:
for d in dirs_in_path:
cc_path = os.path.join(d, cc)
if os.path.isfile(cc_path) and os.access(cc_path, os.X_OK):
return cc_path
return None
def create_shared(output, objects, options=None, cc=None):
"""Create shared library.
Parameters
----------
output : str
The target shared library.
objects : List[str]
List of object files.
options : List[str]
The list of additional options string.
cc : Optional[str]
The compiler command.
"""
cc = cc or get_cc()
if _is_linux_like():
_linux_compile(output, objects, options, cc, compile_shared=True)
elif sys.platform == "win32":
_windows_compile(output, objects, options)
else:
raise ValueError("Unsupported platform")
def create_executable(output, objects, options=None, cc=None):
"""Create executable binary.
Parameters
----------
output : str
The target executable.
objects : List[str]
List of object files.
options : List[str]
The list of additional options string.
cc : Optional[str]
The compiler command.
"""
cc = cc or get_cc()
if _is_linux_like():
_linux_compile(output, objects, options, cc)
elif sys.platform == "win32":
_windows_compile(output, objects, options)
else:
raise ValueError("Unsupported platform")
def get_target_by_dump_machine(compiler):
"""Functor of get_target_triple that can get the target triple using compiler.
Parameters
----------
compiler : Optional[str]
The compiler.
Returns
-------
out: Callable
A function that can get target triple according to dumpmachine option of compiler.
"""
def get_target_triple():
"""Get target triple according to dumpmachine option of compiler."""
if compiler:
cmd = [compiler, "-dumpmachine"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "dumpmachine error:\n"
msg += py_str(out)
return None
return py_str(out)
return None
return get_target_triple
# assign so as default output format
create_shared.output_format = "so" if sys.platform != "win32" else "dll"
create_shared.get_target_triple = get_target_by_dump_machine(os.environ.get("CXX", get_cc()))
def cross_compiler(
compile_func, options=None, output_format=None, get_target_triple=None, add_files=None
):
"""Create a cross compiler function by specializing compile_func with options.
This function can be used to construct compile functions that
can be passed to AutoTVM measure or export_library.
Parameters
----------
compile_func : Union[str, Callable[[str, str, Optional[str]], None]]
Function that performs the actual compilation
options : Optional[List[str]]
List of additional optional string.
output_format : Optional[str]
Library output format.
get_target_triple: Optional[Callable]
Function that can target triple according to dumpmachine option of compiler.
add_files: Optional[List[str]]
List of paths to additional object, source, library files
to pass as part of the compilation.
Returns
-------
fcompile : Callable[[str, str, Optional[str]], None]
A compilation function that can be passed to export_library.
Examples
--------
.. code-block:: python
from tvm.contrib import cc, ndk
# export using arm gcc
mod = build_runtime_module()
mod.export_library(path_dso,
cc.cross_compiler("arm-linux-gnueabihf-gcc"))
# specialize ndk compilation options.
specialized_ndk = cc.cross_compiler(
ndk.create_shared,
["--sysroot=/path/to/sysroot", "-shared", "-fPIC", "-lm"])
mod.export_library(path_dso, specialized_ndk)
"""
base_options = [] if options is None else options
kwargs = {}
add_files = [] if add_files is None else add_files
# handle case where compile_func is the name of the cc
if isinstance(compile_func, str):
kwargs = {"cc": compile_func}
compile_func = create_shared
def _fcompile(outputs, objects, options=None):
all_options = base_options
if options is not None:
all_options += options
compile_func(outputs, objects + add_files, options=all_options, **kwargs)
if not output_format and hasattr(compile_func, "output_format"):
output_format = compile_func.output_format
output_format = output_format if output_format else "so"
if not get_target_triple and hasattr(compile_func, "get_target_triple"):
get_target_triple = compile_func.get_target_triple
_fcompile.output_format = output_format
_fcompile.get_target_triple = get_target_triple
return _fcompile
def _linux_compile(output, objects, options, compile_cmd, compile_shared=False):
cmd = [compile_cmd]
if compile_cmd != "nvcc":
if compile_shared or output.endswith(".so") or output.endswith(".dylib"):
cmd += ["-shared", "-fPIC"]
if sys.platform == "darwin":
cmd += ["-undefined", "dynamic_lookup"]
elif output.endswith(".obj"):
cmd += ["-c"]
else:
if compile_shared or output.endswith(".so") or output.endswith(".dylib"):
cmd += ["--shared"]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
if options:
cmd += options
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
msg += "\nCommand line: " + " ".join(cmd)
raise RuntimeError(msg)
def _windows_compile(output, objects, options):
cmd = ["clang"]
cmd += ["-O2"]
if output.endswith(".so") or output.endswith(".dll"):
cmd += ["-shared"]
elif output.endswith(".obj"):
cmd += ["-c"]
if isinstance(objects, str):
objects = [objects]
cmd += ["-o", output]
cmd += objects
if options:
cmd += options
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
except FileNotFoundError:
raise RuntimeError(
"Can not find the LLVM clang for Windows clang.exe)."
"Make sure it's installed"
" and the installation directory is in the %PATH% environment "
"variable. Prebuilt binaries can be found at: https://llvm.org/"
)
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += " ".join(cmd) + "\n"
msg += py_str(out)
raise RuntimeError(msg)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/clang.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke clang in the system."""
# pylint: disable=invalid-name
import subprocess
from tvm._ffi.base import py_str
import tvm.target
from . import utils
def find_clang(required=True):
"""Find clang in system.
Parameters
----------
required : bool
Whether it is required,
runtime error will be raised if the compiler is required.
Returns
-------
valid_list : list of str
List of possible paths.
Note
----
This function will first search clang that
matches the major llvm version that built with tvm
"""
cc_list = []
major = tvm.target.codegen.llvm_version_major(allow_none=True)
if major is not None:
cc_list += ["clang-%d.0" % major]
cc_list += ["clang-%d" % major]
cc_list += ["clang"]
cc_list += ["clang.exe"]
valid_list = [utils.which(x) for x in cc_list]
valid_list = [x for x in valid_list if x]
if not valid_list and required:
raise RuntimeError("cannot find clang, candidates are: " + str(cc_list))
return valid_list
def create_llvm(inputs, output=None, options=None, cc=None):
"""Create llvm text ir.
Parameters
----------
inputs : list of str
List of input files name or code source.
output : str, optional
Output file, if it is none
a temporary file is created
options : list
The list of additional options string.
cc : str, optional
The clang compiler, if not specified,
we will try to guess the matched clang version.
Returns
-------
code : str
The generated llvm text IR.
"""
cc = cc if cc else find_clang()[0]
cmd = [cc]
cmd += ["-S", "-emit-llvm"]
temp = utils.tempdir()
output = output if output else temp.relpath("output.ll")
inputs = [inputs] if isinstance(inputs, str) else inputs
input_files = []
for i, code in enumerate(inputs):
if utils.is_source_path(code):
input_files.append(code)
else:
temp_path = temp.relpath("input%d.cc" % i)
with open(temp_path, "w") as output_file:
output_file.write(code)
input_files.append(temp_path)
if options:
cmd += options
cmd += ["-o", output]
cmd += input_files
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
return open(output).read()
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/coreml_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CoreML runtime that load and run coreml models."""
import tvm._ffi
from ..rpc import base as rpc_base
def create(symbol, compiled_model_path, device):
"""Create a runtime executor module given a coreml model and context.
Parameters
----------
symbol : str
The symbol that represents the Core ML model.
compiled_model_path : str
The path of the compiled model to be deployed.
device : Device
The device to deploy the module. It can be local or remote when there
is only one Device.
Returns
-------
coreml_runtime : CoreMLModule
Runtime coreml module that can be used to execute the coreml model.
"""
device_type = device.device_type
runtime_func = "tvm.coreml_runtime.create"
if device_type >= rpc_base.RPC_SESS_MASK:
fcreate = device._rpc_sess.get_function(runtime_func)
else:
fcreate = tvm._ffi.get_global_func(runtime_func)
return CoreMLModule(fcreate(symbol, compiled_model_path))
class CoreMLModule(object):
"""Wrapper runtime module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
module : Module
The internal tvm module that holds the actual coreml functions.
Attributes
----------
module : Module
The internal tvm module that holds the actual coreml functions.
"""
def __init__(self, module):
self.module = module
self.invoke = module["invoke"]
self.set_input = module["set_input"]
self.get_output = module["get_output"]
self.get_num_outputs = module["get_num_outputs"]
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cublas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to cuBLAS libraries."""
import tvm
from tvm import te
def matmul(lhs, rhs, transa=False, transb=False, dtype=None):
"""Create an extern op that compute matrix mult of A and rhs with cuBLAS
Parameters
----------
lhs : Tensor
The left matrix operand
rhs : Tensor
The right matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
dtype = dtype if dtype is not None else lhs.dtype
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cublas.matmul", ins[0], ins[1], outs[0], transa, transb
),
dtype=dtype,
name="matmul_cublas",
)
def batch_matmul(lhs, rhs, transa=False, transb=False, dtype=None):
"""Create an extern op that compute batch matrix mult of A and rhs with cuBLAS
Parameters
----------
lhs : Tensor
The left matrix operand
rhs : Tensor
The right matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
b = lhs.shape[0]
n = lhs.shape[2] if transa else lhs.shape[1]
m = rhs.shape[1] if transb else rhs.shape[2]
dtype = dtype if dtype is not None else lhs.dtype
return te.extern(
(b, n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cublas.batch_matmul", ins[0], ins[1], outs[0], transa, transb
),
dtype=dtype,
name="batch_matmul_cublas",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cublaslt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to cuBLASlt libraries."""
import tvm
from tvm import te
def matmul(lhs, rhs, transa=False, transb=False, n=0, m=0, dtype=None):
"""Create an extern op that compute matrix mult of A and rhs with cuBLAS
Parameters
----------
lhs : Tensor
The left matrix operand
rhs : Tensor
The right matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
if n == 0:
n = lhs.shape[1] if transa else lhs.shape[0]
if m == 0:
m = rhs.shape[0] if transb else rhs.shape[1]
dtype = dtype if dtype is not None else lhs.dtype
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cublaslt.matmul", ins[0], ins[1], outs[0], transa, transb
),
dtype=dtype,
name="C",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cuda_graph/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cuda_graph/cuda_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph executor with CUDA Graph"""
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.contrib import graph_executor
def create(graph_json_str, libmod, device):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by json graph.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.runtime.Module
The module of the corresponding function
device : Device
The device to deploy the module, only supports CUDA GPU
Returns
-------
graph_module : GraphModuleCudaGraph
CUDA graph executor module that can be used to execute the graph.
Note
----
See also :py:class:`tvm.contrib.cuda_graph.cuda_graph_executor.GraphModuleCudaGraph`
for examples to directly construct a GraphModuleCudaGraph from an exported
relay compiled library.
"""
assert isinstance(graph_json_str, string_types)
try:
dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_cuda_graph.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor_cuda_graph.create")
except ValueError:
raise ValueError(
"To enable CUDA graph support (experimental), please set "
"'(USE_GRAPH_EXECUTOR_CUGRAPH ON)' in config.cmake and rebuild TVM"
)
return GraphModuleCudaGraph(fcreate(graph_json_str, libmod, *device_type_id))
class GraphModuleCudaGraph(graph_executor.GraphModule):
"""CUDA graph executor module.
This is a CUDA graph executor wrapper over the TVM runtime.
Runtime interfaces are wrapped with CUDA graph functionalities.
Parameters
----------
module : Module
The internal tvm module that holds the actual graph functions.
"""
def __init__(self, module):
self._start_capture = module["start_capture"]
self._end_capture = module["end_capture"]
self._run_cuda_graph = module["run_cuda_graph"]
self._cuda_graph_captured = False
graph_executor.GraphModule.__init__(self, module)
def capture_cuda_graph(self):
"""Capture a CUDA graph for tvm_op graph
This should be called before run_cuda_graph() to capture and
instantiate a CUDA graph instance.
"""
self._run() # call cuModuleLoadData before cudaStream API
self._start_capture()
self._run()
self._end_capture()
self._cuda_graph_captured = True
def run_cuda_graph(self):
"""Run the CUDA graph for tvm_op graph
Run the captured CUDA graph instance instead of the
for-loop kernel launch of default graph executor
"""
self._run_cuda_graph()
def run(self, **input_dict):
"""A run wrapper for graph capture / launch, user can just
change default graph executor to cuda graph executor, and
the first call will capture a cuda graph for future launch
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
if not self._cuda_graph_captured:
self.capture_cuda_graph()
else:
self._run_cuda_graph()
def debug_get_output(self, node, out):
"""Run graph up to node and get the output to out
Parameters
----------
node : int / str
The node index or name
out : NDArray
The output array container
"""
raise NotImplementedError("Please use debugger.debug_executor as graph_executor instead.")
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cudnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to CuDNN v7 library."""
# pylint: disable-msg=C0103
import ctypes
import numpy as np
import tvm
import tvm._ffi
from tvm import te
# algos can be read from cudnn.h
_FWD_ALGOS = [
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED",
"CUDNN_CONVOLUTION_FWD_ALGO_COUNT",
]
def exists():
"""
Checks whether the local machine can use CuDNN.
Returns
-------
exists: bool
True if CuDNN support is enabled and a CuDNN-capable GPU
exists. Otherwise, False.
"""
func = tvm.get_global_func("tvm.contrib.cudnn.exists", allow_missing=True)
if func is None:
return False
return bool(func())
def algo_to_index(algo_type, algo_name):
"""Return a index represents the algorithm, which can be used in
calling CuDNN function
Parameters
----------
algo_type : str
["fwd", "bwd_filter", "bwd_data]
algo_name : str
algorithm name in cudnn definition
fwd = [
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_GEMM",
"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT",
"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED",
"CUDNN_CONVOLUTION_FWD_ALGO_COUNT",
]
bwd_filter = [
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0",
# non-deterministic
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3",
# non-deterministic, algo0 with workspaceS
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD",
# not implemented
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT",
]
bwd_data = [
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_0",
# non-deterministic
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED",
"CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT",
]
Returns
-------
algo: int
algorithm index
"""
idx = -1
if algo_type == "fwd":
idx = _FWD_ALGOS.index(algo_name)
elif algo_type == "bwd_filter":
idx = _BWD_FILTER_ALGOS.index(algo_name)
elif algo_type == "bwd_data":
idx = _BWD_DATA_ALGOS.index(algo_name)
assert idx >= 0
return idx
def _get_np_int32_array_handle(arr):
"""Return a void_p handle for a numpy array
Parameters
----------
arr: numpy.NDArray
source numpy array
Returns
-------
ptr: ctypes.c_void_p
pointer to the data
"""
assert arr.dtype == np.int32
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
return ctypes.cast(ptr, ctypes.c_void_p)
def _prepare_global_func_params(dims, pad, stride, dilation, x_shape=None, w_shape=None):
full_dims = dims + 2
if x_shape:
assert isinstance(x_shape, list)
assert len(x_shape) == full_dims
if w_shape:
assert isinstance(w_shape, list)
assert len(w_shape) == full_dims
pad = (
np.full(dims, pad, dtype=np.int32)
if isinstance(pad, int)
else np.array(pad, dtype=np.int32)
)
stride = (
np.full(dims, stride, dtype=np.int32)
if isinstance(stride, int)
else np.array(stride, dtype=np.int32)
)
dilation = (
np.full(dims, dilation, dtype=np.int32)
if isinstance(dilation, int)
else np.array(dilation, dtype=np.int32)
)
xshape = np.array(x_shape, dtype=np.int32) if x_shape else None
wshape = np.array(w_shape, dtype=np.int32) if x_shape else None
return pad, stride, dilation, xshape, wshape
def conv_output_shape(
tensor_format, pad, stride, dilation, x_shape, w_shape, data_dtype, conv_dtype, groups=1
):
"""Get output shape of 2D or 3D convolution
Paramters
---------
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
2: CUDNN_TENSOR_NCHW_VECT_C
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
x_shape: list
input shape
w_shape: list
weight shape
data_dtype: str
data type
conv_dtype: str
convolution type
groups: int
number of groups
Returns
-------
oshape: list
output shape
"""
assert len(x_shape) == len(w_shape)
assert len(x_shape) in (4, 5)
if tensor_format == 0:
n_output = x_shape[0]
c_output = w_shape[0]
x_chan = x_shape[1]
w_chan_input = w_shape[1]
x_shape = x_shape[2:]
w_shape = w_shape[2:]
elif tensor_format == 1:
n_output = x_shape[0]
c_output = w_shape[0]
x_chan = x_shape[-1]
w_chan_input = w_shape[-1]
assert len(x_shape) == 4, "CuDNN layout NHWC is only well-defined for 4d tensors"
x_shape = x_shape[1:-1]
w_shape = w_shape[1:-1]
elif tensor_format == 2:
n_output = x_shape[0]
c_output = w_shape[0]
x_chan = x_shape[1]
w_chan_input = w_shape[1]
w_lanes = tvm.runtime.DataType(conv_dtype).lanes
assert w_lanes == 1
x_shape = x_shape[2:]
w_shape = w_shape[2:]
else:
raise ValueError("Unknown CuDNN tensor format: '{}'".format(tensor_format))
x_lanes = tvm.runtime.DataType(data_dtype).lanes
assert x_chan * x_lanes == w_chan_input * groups, (
"Mismatched dimensions, data has {} channels/group "
"(dimension {} with {} lanes/value, {} groups), "
"but weights require {} input channels/group"
).format(x_chan // groups, x_chan, x_lanes, groups, w_chan_input)
output_dims = []
for x_shape_i, w_shape_i, pad_i, stride_i, dilation_i in zip(
x_shape, w_shape, pad, stride, dilation
):
output_dim = 1 + (x_shape_i + 2 * pad_i - (((w_shape_i - 1) * dilation_i) + 1)) // stride_i
output_dims.append(output_dim)
if tensor_format in [0, 2]:
output = [n_output, c_output, *output_dims]
elif tensor_format == 1:
output = [n_output, *output_dims, c_output]
else:
raise ValueError("Unknown CuDNN tensor format: '{}'".format(tensor_format))
return output
def conv_dgrad_shape(
tensor_format, pad, stride, dilation, dy_shape, w_shape, output_padding=(0, 0), groups=1
):
"""Get output shape of conv2d gradient with respect to data
Paramters
---------
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
dy_shape: list
output gradient shape
w_shape: list
weight shape
data_dtype: str
data type
conv_dtype: str
convolution type
groups: int
number of groups
Returns
-------
oshape: list
output shape
"""
assert len(dy_shape) == len(w_shape)
assert len(dy_shape) == 4
if tensor_format == 0:
N = dy_shape[0]
C = w_shape[1] * groups
dy_shape = dy_shape[2:]
w_shape = w_shape[2:]
elif tensor_format == 1:
N = dy_shape[0]
C = w_shape[-1] * groups
dy_shape = dy_shape[1:-1]
w_shape = w_shape[1:-1]
else:
raise ValueError("Unsupported CuDNN tensor format: '{}'".format(tensor_format))
input_dims = []
for dy_shape_i, w_shape_i, pad_i, stride_i, dilation_i, out_pad in zip(
dy_shape, w_shape, pad, stride, dilation, output_padding
):
input_dim = (
(dy_shape_i - 1) * stride_i - 2 * pad_i + (((w_shape_i - 1) * dilation_i) + 1) + out_pad
)
input_dims.append(input_dim)
if tensor_format == 0:
output = [N, C, *input_dims]
else:
output = [N, *input_dims, C]
return output
def _conv_find_algo(
func_name,
tensor_format,
pad,
stride,
dilation,
x_shape,
w_shape,
y_shape,
data_dtype,
conv_dtype,
groups=1,
):
"""
Common function to choose the best cudnn convolution algorithm for the given input
and the convolution type.
"""
dims = len(x_shape)
assert dims in (4, 5)
pad, stride, dilation, xshape, wshape = _prepare_global_func_params(
dims - 2, pad, stride, dilation, x_shape, w_shape
)
yshape = np.array(y_shape, dtype=np.int32)
func = tvm._ffi.get_global_func(func_name)
return func(
tensor_format,
dims - 2,
_get_np_int32_array_handle(pad),
_get_np_int32_array_handle(stride),
_get_np_int32_array_handle(dilation),
_get_np_int32_array_handle(xshape),
_get_np_int32_array_handle(wshape),
_get_np_int32_array_handle(yshape),
data_dtype,
conv_dtype,
groups,
)
def conv_forward_find_algo(
tensor_format,
pad,
stride,
dilation,
x_shape,
w_shape,
y_shape,
data_dtype,
conv_dtype,
groups=1,
):
"""Choose the best forward algorithm for the given input.
Paramters
---------
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
2: CUDNN_TENSOR_NCHW_VECT_C
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
x_shape: list
input shape
w_shape: list
weight shape
y_shape: list
output shape
data_dtype: str
data type
conv_dtype: str
convolution type
groups: int
number of groups
Returns
-------
algo: int
algo chosen by CUDNN
"""
return _conv_find_algo(
"tvm.contrib.cudnn.conv.forward_find_algo",
tensor_format,
pad,
stride,
dilation,
x_shape,
w_shape,
y_shape,
data_dtype,
conv_dtype,
groups,
)
def conv_backward_data_find_algo(
tensor_format,
pad,
stride,
dilation,
dy_shape,
w_shape,
dx_shape,
data_dtype,
conv_dtype,
groups=1,
):
"""Choose the best backward data algorithm for the given input.
Paramters
---------
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
2: CUDNN_TENSOR_NCHW_VECT_C
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
dy_shape: list
output gradient shape
w_shape: list
weight shape
dx_shape: list
dgrad shape
data_dtype: str
data type
conv_dtype: str
convolution type
groups: int
number of groups
Returns
-------
algo: int
algo chosen by CUDNN
"""
return _conv_find_algo(
"tvm.contrib.cudnn.conv.backward_data_find_algo",
tensor_format,
pad,
stride,
dilation,
dy_shape,
w_shape,
dx_shape,
data_dtype,
conv_dtype,
groups,
)
def conv_backward_filter_find_algo(
tensor_format,
pad,
stride,
dilation,
dy_shape,
x_shape,
dw_shape,
data_dtype,
conv_dtype,
groups=1,
):
"""Choose the best backward filter algorithm for the given input.
Paramters
---------
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
2: CUDNN_TENSOR_NCHW_VECT_C
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
dy_shape: list
output gradient shape
x_shape: list
weight shape
dw_shape: list
wgrad shape
data_dtype: str
data type
conv_dtype: str
convolution type
groups: int
number of groups
Returns
-------
algo: int
algo chosen by CUDNN
"""
return _conv_find_algo(
"tvm.contrib.cudnn.conv.backward_filter_find_algo",
tensor_format,
pad,
stride,
dilation,
dy_shape,
x_shape,
dw_shape,
data_dtype,
conv_dtype,
groups,
)
def conv_forward(x, w, pad, stride, dilation, conv_mode, tensor_format, algo, conv_dtype, groups=1):
"""Create an extern op that compute 2D or 3D convolution with CuDNN
Parameters
----------
x: Tensor
input feature map
w: Tensor
convolution weight
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
conv_mode: int
0: CUDNN_CONVOLUTION
1: CUDNN_CROSS_CORRELATION
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
2: CUDNN_TENSOR_NCHW_VECT_C
algo: int
Forward algorithm, get index from ```algo_to_index``` function
if algo == -1, the best algo will be chosen by CUDNN
conv_dtype: str
convolution type
groups: int
the number of groups
Returns
-------
y: Tensor
The result tensor
"""
dims = len(x.shape)
assert dims in (4, 5)
conv_dtype = x.dtype if conv_dtype is None else conv_dtype
pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)
x_shape = list(x.shape)
if isinstance(x.shape[0], tvm.tir.expr.IntImm):
oshape = conv_output_shape(
tensor_format,
pad,
stride,
dilation,
x_shape,
list(w.shape),
x.dtype,
conv_dtype,
groups,
)
if algo == -1:
# For now if we try to call `cudnnFindConvolutionForwardAlgorithm` when
# using INT8 data type, CuDNN will crash down.
# On the other hand, CuDNN only support IMPLICIT_PRECOMP_GEMM at NHWC format
if tensor_format == 1 and conv_dtype == "int32":
algo = 1
else:
algo = conv_forward_find_algo(
tensor_format,
pad,
stride,
dilation,
list(x.shape),
list(w.shape),
oshape,
x.dtype,
conv_dtype,
groups,
)
else:
# The dynamic batch size case, pretend this is a single batch
x_shape[0] = 1
oshape = conv_output_shape(
tensor_format,
pad,
stride,
dilation,
x_shape,
list(w.shape),
x.dtype,
conv_dtype,
groups,
)
oshape[0] = x.shape[0]
# This picks CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM
# It seems this is the fastest among algorithms that are always applicable
algo = 1
if dims == 4:
return te.extern(
oshape,
[x, w],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv2d.forward",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
ins[0],
ins[1],
outs[0],
conv_dtype,
groups,
),
name="y",
)
return te.extern(
oshape,
[x, w],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv3d.forward",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
pad[2],
stride[0],
stride[1],
stride[2],
dilation[0],
dilation[1],
dilation[2],
ins[0],
ins[1],
outs[0],
conv_dtype,
groups,
),
name="y",
)
def conv_backward_data(
dy,
w,
pad,
stride,
dilation,
conv_mode,
tensor_format,
conv_dtype,
groups=1,
output_padding=(0, 0),
):
"""Create a CuDNN extern op that computes the gradient of 2D convolution with respect to data.
Parameters
----------
dy: Tensor
output gradient
w: Tensor
convolution weight
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
conv_mode: int
0: CUDNN_CONVOLUTION
1: CUDNN_CROSS_CORRELATION
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
conv_dtype: str
convolution type
groups: int
the number of groups
Returns
-------
dx: Tensor
dgrad tensor
"""
dims = len(dy.shape)
assert dims == 4
conv_dtype = dy.dtype if conv_dtype is None else conv_dtype
pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)
assert isinstance(
dy.shape[0], tvm.tir.expr.IntImm
), "Dynamic batch is not supported for cudnn conv2d backwad data yet."
dx_shape = conv_dgrad_shape(
tensor_format, pad, stride, dilation, dy.shape, w.shape, output_padding, groups
)
if exists():
# When cudnn exists, find the backward data algo
algo = conv_backward_data_find_algo(
tensor_format,
pad,
stride,
dilation,
list(dy.shape),
list(w.shape),
dx_shape,
dy.dtype,
conv_dtype,
groups,
)
else:
algo = 1
return te.extern(
dx_shape,
[dy, w],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv2d.backward_data",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
ins[0],
ins[1],
outs[0],
conv_dtype,
groups,
),
name="dx",
)
def conv_backward_filter(
dy, x, kernel_size, pad, stride, dilation, conv_mode, tensor_format, conv_dtype, groups=1
):
"""Create a CuDNN extern op that computes the gradient of 2D convolution with respect to weight.
Parameters
----------
dy: Tensor
output gradient
x: Tensor
input tensor
kernel_size: a pair of int
The spatial size of the corresponding forward convolution kernel
pad: int or list
padding
stride: int or list
stride
dilation: int or list
dilation
conv_mode: int
0: CUDNN_CONVOLUTION
1: CUDNN_CROSS_CORRELATION
tensor_format: int
0: CUDNN_TENSOR_NCHW
1: CUDNN_TENSOR_NHWC
conv_dtype: str
convolution type
groups: int
the number of groups
Returns
-------
dw: Tensor
wgrad tensor
"""
dims = len(x.shape)
assert dims == 4
conv_dtype = x.dtype if conv_dtype is None else conv_dtype
pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation)
filter_h, filter_w = kernel_size
x_shape = list(x.shape)
assert isinstance(
x.shape[0], tvm.tir.expr.IntImm
), "Dynamic batch is not supported for cudnn conv2d backwad filter yet."
ic_ind = 1 if tensor_format == 0 else 3
if groups > 1:
assert (
x_shape[ic_ind] == dy.shape[ic_ind] and x_shape[ic_ind] == groups
), "Only depthwise wgrad supported for groups > 1."
ic = 1
else:
ic = x_shape[ic_ind]
if tensor_format == 0:
dw_shape = [dy.shape[1], ic, filter_h, filter_w]
else:
dw_shape = [dy.shape[3], filter_h, filter_w, ic]
algo = conv_backward_filter_find_algo(
tensor_format,
pad,
stride,
dilation,
list(dy.shape),
list(x.shape),
dw_shape,
x.dtype,
conv_dtype,
groups,
)
return te.extern(
dw_shape,
[dy, x],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv2d.backward_filter",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
ins[0],
ins[1],
outs[0],
conv_dtype,
groups,
),
name="dw",
)
def softmax(x, axis=-1):
"""Compute softmax using CuDNN
Parameters
----------
x : tvm.te.Tensor
The input tensor
axis : int
The axis to compute the softmax
Returns
-------
ret : tvm.te.Tensor
The result tensor
"""
return te.extern(
x.shape,
[x],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.softmax.forward", ins[0], outs[0], axis
),
name="y",
)
def log_softmax(x, axis=-1):
"""Compute log_softmax using CuDNN
Parameters
----------
x : tvm.te.Tensor
The input tensor
axis : int
The axis to compute log softmax over
Returns
-------
ret : tvm.te.Tensor
The result tensor
"""
return te.extern(
x.shape,
[x],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.log_softmax.forward", ins[0], outs[0], axis
),
name="y",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""BYOC support for CUTLASS."""
from .build import has_cutlass, num_cutlass_partitions, finalize_modules, finalize_modules_vm
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, dangerous-default-value
"""Driver for partitioning and building a Relay module for CUTLASS offload."""
import logging
import os
import multiprocessing
import tvm
from tvm import runtime, relay
from tvm.contrib.nvcc import get_cuda_version
from tvm._ffi.registry import register_func
from .gen_gemm import CutlassGemmProfiler
from .gen_conv2d import CutlassConv2DProfiler
from .library import ConvKind
logger = logging.getLogger("cutlass")
def has_cutlass():
"""Returns true if the CUTLASS custom codegen is available"""
return tvm.get_global_func("relay.ext.cutlass.create_c_source_module", True) is not None
def _get_cutlass_path():
tvm_root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../")
cutlass_path = os.path.join(tvm_root, "3rdparty/cutlass")
assert os.path.exists(
cutlass_path
), """The CUTLASS root directory not found in {}.
Currently, using CUTLASS requires building TVM from source.""".format(
cutlass_path
)
return cutlass_path
def _get_cutlass_compile_options(sm, threads, use_fast_math=False):
cutlass_root = _get_cutlass_path()
cutlass_include = os.path.join(cutlass_root, "include")
cutlass_util_include = os.path.join(cutlass_root, "tools/util/include")
kwargs = {}
kwargs["cc"] = "nvcc"
kwargs["options"] = [
"-c",
"-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1",
"-gencode=arch=compute_%d,code=[sm_%d,compute_%d]" % (sm, sm, sm),
"-Xcompiler=-fPIC",
"-Xcompiler=-Wconversion",
"-Xcompiler=-fno-strict-aliasing",
"-O3",
"-std=c++17",
"-I" + cutlass_include,
"-I" + cutlass_util_include,
]
if use_fast_math:
kwargs["options"].append("-DCUTLASS_USE_TANH_FOR_SIGMOID")
cuda_ver = get_cuda_version()
if cuda_ver >= (11, 2):
ncpu = multiprocessing.cpu_count() if threads < 0 else threads
kwargs["options"].append("-t %d" % ncpu)
return kwargs
class OpAnnotator(tvm.relay.ExprVisitor):
"""Annotates partitioned functions with shape and dtype information."""
def __init__(self):
super().__init__()
self.signature = {}
def visit_call(self, call):
op = call.op
if isinstance(op, relay.Function) and "Composite" in op.attrs:
self.signature["op_type"] = op.attrs["Composite"]
for i, arg in enumerate(op.params):
self.signature["arg%d_shape" % i] = arg.checked_type.shape
self.signature["arg%d_dtype" % i] = arg.checked_type.dtype
self.signature["ret_shape"] = op.ret_type.shape
self.signature["ret_dtype"] = op.ret_type.dtype
self.visit(op.body)
if str(op) in ["nn.conv2d", "nn.conv2d_transpose", "nn.conv2d_backward_weight"]:
self.op_attrs = call.attrs
for arg in call.args:
self.visit(arg)
def select_gemm_kernel(
cutlass_profiler,
op_type,
MM,
KK,
NN,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
batched,
find_first_valid,
use_multiprocessing,
):
"""Run CUTLASS profiler to select the best kernel, or return the default one for dynamic
workloads."""
if any(isinstance(s, tvm.tir.Any) for s in [MM, KK, NN]):
out = cutlass_profiler.get_default(
op_type, out_dtype, arg0_dtype, arg1_dtype, use_3xtf32, batched=batched
)
name, cutlass_op_def = out["name"], out["opdef"]
logger.info("Picked the default kernel %s", name)
else:
name, cutlass_op_def, _ = cutlass_profiler.profile(
op_type,
MM,
NN,
KK,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
batched=batched,
find_first_valid=find_first_valid,
use_multiprocessing=use_multiprocessing,
)
if not find_first_valid:
logger.info("The best kernel is %s", name)
else:
logger.info("Picked the first kernel found %s", name)
return name, cutlass_op_def
def handle_batch_matmul(
cutlass_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
find_first_valid,
use_multiprocessing,
):
"""Profile and select a kernel for batch_matmul op workload."""
MM = arg0_shape[1]
KK = arg0_shape[2]
NN = arg1_shape[1]
name, cutlass_op_def = select_gemm_kernel(
cutlass_profiler,
op_type,
MM,
KK,
NN,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
True,
find_first_valid,
use_multiprocessing,
)
return {
"batch": arg0_shape[0],
"batch_stride_A": arg0_shape[1] * arg0_shape[2],
"batch_stride_B": arg1_shape[1] * arg1_shape[2],
"batch_stride_C": arg0_shape[1] * arg1_shape[1],
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
"lda": "K",
"ldb": "K",
"ldc": "N",
}
def handle_dense(
cutlass_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
find_first_valid,
use_multiprocessing,
):
"""Profile and select a kernel for dense op workload."""
MM = arg0_shape[0]
KK = arg0_shape[1]
NN = arg1_shape[0]
name, cutlass_op_def = select_gemm_kernel(
cutlass_profiler,
op_type,
MM,
KK,
NN,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
False,
find_first_valid,
use_multiprocessing,
)
assert "tn_align" in name, "Only supports (row_major, col_major) input layout for now."
return {
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
"lda": "K",
"ldb": "K",
"ldc": "N",
}
def handle_conv2d(
cutlass_profiler,
op_type,
d_shape,
w_shape,
padding,
strides,
dilation,
out_dtype,
data_dtype,
weight_dtype,
use_3xtf32,
split_k_slices,
profile_all_alignments,
find_first_valid,
use_multiprocessing,
):
"""Profile and select a kernel for conv2d op workload."""
if "conv2d_transpose" in op_type:
conv_kind = ConvKind.Dgrad
elif "backward_weight" in op_type:
conv_kind = ConvKind.Wgrad
else:
conv_kind = ConvKind.Fprop
if any(isinstance(s, tvm.tir.Any) for s in d_shape):
out = cutlass_profiler.get_default(
op_type, out_dtype, data_dtype, weight_dtype, use_3xtf32, conv_kind, strides
)
name, cutlass_op_def = out["name"], out["opdef"]
logger.info("Picked the default kernel %s", name)
else:
name, cutlass_op_def, _ = cutlass_profiler.profile(
op_type,
d_shape,
w_shape,
padding,
strides,
dilation,
out_dtype,
data_dtype,
weight_dtype,
use_3xtf32,
conv_kind,
split_k_slices,
profile_all_alignments,
find_first_valid=find_first_valid,
use_multiprocessing=use_multiprocessing,
)
if not find_first_valid:
logger.info("The best kernel is %s", name)
else:
logger.info("Picked the first kernel found %s", name)
return {
"cutlass_op_def": cutlass_op_def,
"cutlass_op_name": name,
}
def num_cutlass_partitions(mod):
return sum([(1 if "cutlass" in var.name_hint else 0) for var in mod.get_global_vars()])
def tune_cutlass_kernels(
mod,
sm,
use_3xtf32=True,
split_k_slices=[1],
profile_all_alignments=False,
find_first_valid=False,
use_multiprocessing=False,
tmp_dir="./tmp",
):
"""Given a module partitioned for CUTLASS offloading, profile each workload to select which
kernels to emit.
Parameters
----------
mod : IRModule
The Relay module with cutlass partitions.
sm : int
An integer specifying the compute capability. For example, 75 for Turing and
80 or 86 for Ampere.
use_3xtf32 : bool
Wheter or not use slower but very accurate (compared to tf32) 3xtf32 mode for
fp32 inputs on tensorcore.
split_k_slices : list of int
Split factor candidates for split-K GEMM. If split-K > 1, the GEMM K-loop is computed in
parallel across split-K blocks, and a separate global reduction kernel is launched to
accumulate partial reductions. The profiler will pick the best split-k factor from the
given candidate list. Note that the larger split-K factor requires a larger workspace.
Currently, parallel split-k has been tested only for wgrad. For GEMM and other conv2d
kinds, split_k_slices is ignored.
profile_all_alignments : bool
When True, profile all kernal variants with smaller alignments than the largest possible.
find_first_valid : bool
Whether or not profile all candidate kernels, or stop profiling after
the first applicable kernel is found.
use_multiprocessing : bool
Whether or not compile profiler executables for different kernels in parallel.
tmp_dir : string, optional
A temporary directory where intermediate compiled artifacts will be stored.
Returns
-------
mod : IRModule
The updated module annotated with cutlass profiling information.
num_cutlass_partition : int
The number of partitioned functions created for CUTLASS.
"""
gemm_profiler = CutlassGemmProfiler(sm, _get_cutlass_path(), tmp_dir)
conv2d_profiler = CutlassConv2DProfiler(sm, _get_cutlass_path(), tmp_dir)
num_cutlass_partition = 0
for var in mod.get_global_vars():
fun_name = var.name_hint
func = mod[fun_name]
if "cutlass" in fun_name:
num_cutlass_partition += 1
new_func = tune_cutlass_function(
func,
use_3xtf32,
split_k_slices,
profile_all_alignments,
find_first_valid,
use_multiprocessing,
gemm_profiler,
conv2d_profiler,
)
mod.update_func(var, new_func)
return mod, num_cutlass_partition
def tune_cutlass_function(
func,
use_3xtf32,
split_k_slices,
profile_all_alignments,
find_first_valid,
use_multiprocessing,
gemm_profiler,
conv2d_profiler,
):
"""Given a function intended to be offloaded to CUTLASS, profile each workload to select which
kernels to emit.
Parameters
----------
func : IRModule
The Relay Function to tune for.
use_3xtf32 : bool
Wheter or not use slower but very accurate (compared to tf32) 3xtf32 mode for
fp32 inputs on tensorcore.
split_k_slices : list of int
Split factor candidates for split-K GEMM. If split-K > 1, the GEMM K-loop is computed in
parallel accross split-K blocks, and a seperate global reduction kernel is launched to
accumulate partial reductions. The profiler will pick the best split-k factor from the
given candidate list. Note that the larger split-K factor requires a larger workspace.
Currently, parallel split-k has been tested only for wgrad. For GEMM and other conv2d
kinds, split_k_slices is ignored.
profile_all_alignments : bool
When True, profile all kernal variants with smaller alignments than the largest possible.
find_first_valid : bool
Whether or not profile all candidate kernels, or stop profiling after
the first applicable kernel is found.
use_multiprocessing : bool
Whether or not compile profiler executables for different kernels in parallel.
gemm_profiler : CutlassGemmProfiler
Profiler for dense operators. May cache results between tuned functions.
conv2d_profiler : CutlassConv2DProfiler
Profiler for conv2d operators. May cach results between tuned functions.
Returns
-------
annot_func : Function
The input function with attributes capturing the best CUTLASS kernel found by tuning.
"""
annotator = OpAnnotator()
annotator.visit(func)
out_shape = annotator.signature["ret_shape"]
out_dtype = annotator.signature["ret_dtype"]
op_type = annotator.signature["op_type"]
new_attrs = {"op_type": op_type}
new_attrs.update(annotator.signature)
new_attrs.update(func.attrs)
arg0_shape = new_attrs["arg0_shape"]
arg1_shape = new_attrs["arg1_shape"]
arg0_dtype = new_attrs["arg0_dtype"]
arg1_dtype = new_attrs["arg1_dtype"]
if "conv2d" in op_type:
new_attrs["padding"] = annotator.op_attrs.padding
new_attrs["strides"] = annotator.op_attrs.strides
new_attrs["dilation"] = annotator.op_attrs.dilation
if "conv2d_transpose" in op_type:
d_shape = out_shape
w_shape = arg1_shape
elif "conv2d_backward_weight" in op_type:
d_shape = arg1_shape
w_shape = out_shape
else:
d_shape = arg0_shape
w_shape = arg1_shape
new_attrs.update(
handle_conv2d(
conv2d_profiler,
op_type,
d_shape,
w_shape,
annotator.op_attrs.padding,
annotator.op_attrs.strides,
annotator.op_attrs.dilation,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
split_k_slices,
profile_all_alignments,
find_first_valid,
use_multiprocessing,
)
)
elif "batch_matmul" in op_type:
new_attrs.update(
handle_batch_matmul(
gemm_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
find_first_valid,
use_multiprocessing,
)
)
elif "dense" in op_type:
new_attrs.update(
handle_dense(
gemm_profiler,
op_type,
arg0_shape,
arg1_shape,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
find_first_valid,
use_multiprocessing,
)
)
else:
raise ValueError("%s unsupported composite" % op_type)
new_attrs = tvm.ir.make_node("DictAttrs", **new_attrs)
return relay.Function(
func.params,
func.body,
ret_type=func.ret_type,
type_params=func.type_params,
attrs=new_attrs,
)
@register_func("relay.ext.cutlass.compile_for_cutlass")
def compile_for_cutlass(mod, cutlass_target):
"""Given an IRModule with at least one Compiler='cutlass' Relay function, return a
LibraryModule with all such functions compiled into their PackedFunc-compatible form.
- First runs CUTLASS tuning to decide on the best kernels, which itself requires the
repeated compilation and execution of CUDA code using nvcc. The results of this
is captured as annotation on each relevant function. Kernel performance is cached
overall all functions.
- Then generates a single CSourceModule containing C code implementing all the
Compiler='cutlass' Relay functions, accounting for the tuning done above.
- Then compiles that CSourceModule with the appropriate nvcc arguments to yield
a static .o library. An export_library step will be required on the final runtime
module to link that library into the overall .so library.
See CompileForCutlass in src/relay/backend/contrib/cutlass/codegen.cc for where this
helper function is used to implement the RelayToTIR pass hook for CUTLASS."""
# Recover options from the current 'cutlass' Target
assert cutlass_target.kind.name == "cutlass"
tuning_config = {
key: cutlass_target.attrs.get(key)
for key in [
"sm",
"use_3xtf32",
"split_k_slices",
"profile_all_alignments",
"find_first_valid",
"use_multiprocessing",
]
}
compile_config = {
key: cutlass_target.attrs.get(key) for key in ["sm", "threads", "use_fast_math"]
}
tmp_dir = cutlass_target.attrs.get("tmp_dir")
# Tune
logger.info("Tuning for CUTLASS")
mod, _ = tune_cutlass_kernels(mod, tmp_dir=tmp_dir, **tuning_config)
# Compile
logger.info("Creating CSource module for CUTLASS")
create_c_source_module = tvm._ffi.get_global_func("relay.ext.cutlass.create_c_source_module")
c_module = create_c_source_module(mod)
function_names = c_module.get_function("get_func_names")()
compile_options = _get_cutlass_compile_options(**compile_config)
lib_path = os.path.join(tmp_dir, "cutlass.o")
logger.info("Compiling generated CUTLASS code")
c_module.export_library(lib_path, workspace_dir=tmp_dir, **compile_options)
# Recover static library
logger.info("Loading compiled CUTLASS code")
final_mod = tvm.runtime.load_static_library(lib_path, function_names)
logger.info("Done with CUTLASS compilation")
return final_mod
def finalize_modules(lib, lib_path="compile.so", tmp_dir="./tmp"):
"""Returns lib with any C source, LLVM and static library modules complied and linked in ready
for use by the graph or AOT executors. This method is not specific to CUTLASS, however it does
assume nvcc will be used for final compilation and linking. It is provided here for
convenience.
Parameters
----------
lib : runtime.Module
The output from relay.build.
lib_path : string
The path to a shared library which will be generated as the result of the build process.
tmp_dir : string
A temporary directory where intermediate compiled artifacts will be stored.
Returns
-------
updated_lib : runtime.Module
The updated library with all compilation and linking completed.
"""
lib_path = os.path.join(tmp_dir, lib_path)
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
return runtime.load_module(lib_path)
def finalize_modules_vm(vm_exec, lib_path="compile.so", vmcode_path="vmcode.ro", tmp_dir="./tmp"):
"""Returns vm_exec with any C source, LLVM and static library modules compiled and linked in
ready for use by the VM executor. This method is not specific to CUTLASS, however it does
assume nvcc will be used for final compilation and linking. It is provided here for
convenience.
Parameters
----------
vm_exec : vm.Executable
The output from relay.vm.compile containing compiled host code and kernels.
lib_path : string
The path to a shared library which will be generated as the result of the build process.
vmcode_path : string
The path where the VM bytecode will be serialized to as a side-effect.
tmp_dir : string
A temporary directory where intermediate compiled artifacts will be stored.
Returns
-------
updated_vm_exec : vm.Executable
The updated VM executable with all compilation and linking completed.
"""
code, lib = vm_exec.save()
lib_path = os.path.join(tmp_dir, lib_path)
vmcode_path = os.path.join(tmp_dir, vmcode_path)
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
with open(vmcode_path, "wb") as fo:
fo.write(code)
lib = tvm.runtime.load_module(lib_path)
return tvm.runtime.vm.Executable.load_exec(code, lib)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/conv2d_operation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-wildcard-import, wildcard-import
"""Generator for CUTLASS Conv2D kernels."""
from .library import *
class Conv2dOperation:
"""Describes various attributes for instantiating Conv2d kernels."""
def __init__(
self,
conv_kind,
iterator_algorithm,
arch,
tile_description,
A,
B,
C,
element_epilogue,
stride_support,
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity1,
split_k_slices=1,
):
self.operation_kind = OperationKind.Conv2d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
self.split_k_slices = split_k_slices
def accumulator_type(self):
return self.tile_description.math_instruction.element_accumulator
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
intermediate_type = ""
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if (
self.tile_description.math_instruction.element_a != self.A.element
and self.tile_description.math_instruction.element_a != self.accumulator_type()
):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ""
return "%s%s%s%s_%s" % (
ShortDataTypeNames[self.accumulator_type()],
inst_shape,
intermediate_type,
ConvKindNames[self.conv_kind],
IteratorAlgorithmNames[self.iterator_algorithm],
)
def extended_name(self):
"""Append data types if they differ from compute type."""
if (
self.C.element != self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (
self.C.element == self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = substitute_template(
extended_name,
{
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
},
)
return extended_name
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
def procedural_name(self):
"""
The full procedural name indicates architecture, extended name, tile size, and layout.
"""
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages,
)
if self.stride_support == StrideSupport.Unity:
configuration_name = (
"cutlass_${opcode_class}_${extended_name}_${threadblock}"
"_${layout}_align${alignment}_unity_stride"
)
else:
configuration_name = (
"cutlass_${opcode_class}_${extended_name}_${threadblock}"
"_${layout}_align${alignment}"
)
if self.split_k_slices > 1:
configuration_name += "_splitk%d" % self.split_k_slices
return substitute_template(
configuration_name,
{
"opcode_class": opcode_class_name,
"extended_name": self.extended_name(),
"threadblock": threadblock,
"layout": self.layout_name(),
"alignment": "%d" % self.A.alignment,
},
)
class EmitConv2dInstance:
"""Responsible for emitting a CUTLASS template definition."""
def __init__(self):
self.epilogue_default = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>"""
self.epilogue_no_beta_scaling = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>"""
self.epilogue_residual_block = """
${epilogue_functor}<
${element_c},
${element_accumulator},
${element_epilogue},
${element_c},
${epilogue_vector_length},
${activation},
${binary_op},
${unary_op}
>"""
self.epilogue_wgrad = """
${epilogue_functor}<
${element_c},
4,
float,
float
>"""
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name} =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}${conv_kernel_postfix}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue},
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
${reduction}
"""
self.reduction_template = """
using EpilogueOutputOp = ${epilogue};
using ReductionOp = cutlass::reduction::thread::ReduceAdd<
${element_accumulator},
${element_accumulator},
EpilogueOutputOp::kCount
>;
using ReductionKernel = cutlass::reduction::kernel::ReduceSplitK<
cutlass::MatrixShape<4, 32 * EpilogueOutputOp::kCount>,
EpilogueOutputOp,
ReductionOp
>;
using ReductionDevice = cutlass::reduction::device::ReduceSplitK<ReductionKernel>;
using ReductionStrideIndex = typename ReductionDevice::StrideIndex;
"""
def emit(
self, operation, no_beta_scaling=False, residual_block_info=False, emit_reduction=False
):
"""Instantiate a Conv2d kernel from given `operation`."""
warp_shape = [
int(
operation.tile_description.threadblock_shape[idx]
/ operation.tile_description.warp_count[idx]
)
for idx in range(3)
]
epilogue_vector_length = int(
min(operation.C.alignment * DataTypeSize[operation.C.element], 128)
/ DataTypeSize[operation.C.element]
)
element_c = operation.C.element
use_split_k_wgrad = operation.conv_kind == ConvKind.Wgrad and operation.split_k_slices > 1
# Gemm output always fp32 in wgrad with split k
element_c_gemm = DataType.f32 if use_split_k_wgrad else element_c
if emit_reduction:
epilogue_reduction = substitute_template(
self.epilogue_wgrad,
{
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor],
"element_c": DataTypeTag[element_c],
},
)
reduction = substitute_template(
self.reduction_template,
{
"epilogue": epilogue_reduction,
"operation_name": operation.procedural_name(),
"element_accumulator": DataTypeTag[operation.accumulator_type()],
},
)
gemm_template = substitute_template(self.template, {"reduction": reduction})
else:
gemm_template = substitute_template(self.template, {"reduction": ""})
values = {
"operation_name": operation.procedural_name(),
"conv_kind": ConvKindTag[operation.conv_kind],
"conv_kind_name": ConvKindNames[operation.conv_kind].capitalize(),
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[element_c_gemm],
"layout_c": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[
operation.tile_description.math_instruction.opcode_class
],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(
operation.tile_description.math_instruction.instruction_shape[0]
),
"instruction_shape_n": str(
operation.tile_description.math_instruction.instruction_shape[1]
),
"instruction_shape_k": str(
operation.tile_description.math_instruction.instruction_shape[2]
),
"epilogue_vector_length": str(epilogue_vector_length),
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor],
"element_epilogue": str(DataTypeTag[operation.element_epilogue]),
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"iterator_algorithm": IteratorAlgorithmTag[operation.iterator_algorithm],
"iterator_algorithm_name": IteratorAlgorithmNames[
operation.iterator_algorithm
].capitalize(),
"stride_support": StrideSupportTag[operation.stride_support],
"math_operator": MathOperationTag[
operation.tile_description.math_instruction.math_operation
],
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"conv_kernel_postfix": "",
}
if use_split_k_wgrad:
# Even if the output is fp16, gemm output is always fp32 for split k wgrad.
epilogue_gemm = substitute_template(
self.epilogue_wgrad,
{
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor],
"element_c": "float",
},
)
template = substitute_template(gemm_template, {"epilogue": epilogue_gemm})
elif residual_block_info:
template = substitute_template(
gemm_template, {"epilogue": self.epilogue_residual_block}
)
values.update(
{
"unary_op": residual_block_info["unary_op"],
"binary_op": residual_block_info["binary_op"],
"activation": residual_block_info["activation"],
"conv_kernel_postfix": "WithBroadcast",
}
)
elif no_beta_scaling:
template = substitute_template(
gemm_template, {"epilogue": self.epilogue_no_beta_scaling}
)
else:
template = substitute_template(gemm_template, {"epilogue": self.epilogue_default})
return substitute_template(template, values)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/conv2d_profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel, invalid-name
"""Instantiate a C++ source for profiling CUTLASS kernels."""
from .library import DataTypeTag
class Conv2dProfilerEmitter(object):
"""Emit a C++ source for profiling CUTLASS kernels."""
def __init__(self):
from jinja2 import Template
self.reduction = """
ReductionDevice reduction_op;
static cutlass::conv::Operator const kConvolutionalOperator = ImplicitGemm::kConvolutionalOperator;
typename ReductionDevice::Arguments reduction_args(
cutlass::conv::implicit_gemm_problem_size(kConvolutionalOperator, problem_size).mn(),
problem_size.split_k_slices,
cutlass::conv::implicit_gemm_tensor_c_size(kConvolutionalOperator, problem_size),
{
reinterpret_cast<ImplicitGemm::ElementC*> (workspace.get()),
ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::ImplicitGemmKernel::kTensorCStrideIdx])
},
{
tensor_d.device_data(),
ReductionStrideIndex(tensor_d.stride()[ImplicitGemm::ImplicitGemmKernel::kTensorCStrideIdx])
},
{
tensor_c.device_data(),
ReductionStrideIndex(tensor_c.stride()[ImplicitGemm::ImplicitGemmKernel::kTensorCStrideIdx])
},
{ElementComputeEpilogue(1), ElementComputeEpilogue(0)}
);
reduction_op.initialize(reduction_args, nullptr);
reduction_op();
"""
self.template = Template(
"""
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/conv/kernel/default_conv2d_fprop.h"
#include "cutlass/conv/kernel/default_conv2d_wgrad.h"
#include "cutlass/conv/kernel/default_conv2d_dgrad.h"
#include "cutlass/conv/device/implicit_gemm_convolution.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/reduction/device/reduce_split_k.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#define CUTLASS_CHECK(status) \
{ \
cutlass::Status error = status; \
if (error != cutlass::Status::kSuccess) { \
std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \
<< std::endl; \
exit(EXIT_FAILURE); \
} \
}
{{OperatorDef}}
using ImplicitGemm = cutlass::conv::device::ImplicitGemmConvolution<{{OperatorName}}>;
struct Options {
cutlass::Tensor4DCoord input_size;
cutlass::Tensor4DCoord filter_size;
cutlass::Tensor4DCoord padding;
cutlass::MatrixCoord conv_stride;
cutlass::MatrixCoord dilation;
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
cmd.get_cmd_line_argument("n", input_size.n());
cmd.get_cmd_line_argument("h", input_size.h());
cmd.get_cmd_line_argument("w", input_size.w());
cmd.get_cmd_line_argument("c", input_size.c());
cmd.get_cmd_line_argument("k", filter_size.n());
cmd.get_cmd_line_argument("r", filter_size.h());
cmd.get_cmd_line_argument("s", filter_size.w());
int pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w;
cmd.get_cmd_line_argument("pad_h", pad_h);
cmd.get_cmd_line_argument("pad_w", pad_w);
cmd.get_cmd_line_argument("stride_h", stride_h);
cmd.get_cmd_line_argument("stride_w", stride_w);
cmd.get_cmd_line_argument("dilation_h", dilation_h);
cmd.get_cmd_line_argument("dilation_w", dilation_w);
filter_size.c() = input_size.c();
padding = {pad_h, pad_h, pad_w, pad_w};
conv_stride = {stride_h, stride_w};
dilation = {dilation_h, dilation_w};
}
cutlass::Tensor4DCoord output_size() const {
auto dilated_h = (filter_size.h() - 1) * dilation.row() + 1;
auto dilated_w = (filter_size.w() - 1) * dilation.column() + 1;
auto h = (input_size.h() + padding.n() + padding.h() - dilated_h) / conv_stride.row() + 1;
auto w = (input_size.w() + padding.w() + padding.c() - dilated_w) / conv_stride.column() + 1;
return cutlass::Tensor4DCoord(input_size.n(), h, w, filter_size.n());
}
};
double profile_convolution(Options const &options) {
using ElementOutput = {{ElementOutput}};
using ElementInputA = typename ImplicitGemm::ElementA;
using ElementInputB = typename ImplicitGemm::ElementB;
int split_k_slices = {{SplitK}};
cutlass::conv::Conv2dProblemSize problem_size(
options.input_size,
options.filter_size,
options.padding,
options.conv_stride,
options.dilation,
options.output_size(),
cutlass::conv::Mode::kCrossCorrelation,
split_k_slices
);
auto conv_kind = ImplicitGemm::kConvolutionalOperator;
auto a_extent = implicit_gemm_tensor_a_extent(conv_kind, problem_size);
auto b_extent = implicit_gemm_tensor_b_extent(conv_kind, problem_size);
auto c_extent = implicit_gemm_tensor_c_extent(conv_kind, problem_size);
using LayoutC = typename ImplicitGemm::LayoutC;
cutlass::HostTensor<ElementInputA, typename ImplicitGemm::LayoutA> tensor_a(a_extent);
cutlass::HostTensor<ElementInputB, typename ImplicitGemm::LayoutB> tensor_b(b_extent);
cutlass::HostTensor<ElementOutput, typename ImplicitGemm::LayoutC> tensor_c(c_extent);
cutlass::HostTensor<ElementOutput, LayoutC> tensor_d(c_extent);
cutlass::HostTensor<ImplicitGemm::ElementC, LayoutC> tensor_c_gemm(c_extent);
using ElementComputeEpilogue = typename ImplicitGemm::ElementCompute;
cutlass::conv::SplitKMode const split_k_mode = split_k_slices > 1 ?
cutlass::conv::SplitKMode::kParallel : cutlass::conv::SplitKMode::kSerial;
typename ImplicitGemm::Arguments arguments{
problem_size,
tensor_a.device_ref(),
tensor_b.device_ref(),
tensor_c_gemm.device_ref(),
tensor_c_gemm.device_ref(),
{ElementComputeEpilogue(1), ElementComputeEpilogue(0)},
split_k_mode,
};
ImplicitGemm implicit_gemm_op;
size_t workspace_size = implicit_gemm_op.get_workspace_size(arguments);
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
auto status = implicit_gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
status = implicit_gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
status = implicit_gemm_op();
CUTLASS_CHECK(status);
cudaEvent_t events[2];
for (auto & event : events) {
cudaEventCreate(&event);
}
cudaEventRecord(events[0]);
for (int iteration = 0; iteration < 100; ++iteration) {
auto status = implicit_gemm_op();
CUTLASS_CHECK(status);
{{Reduction}}
}
cudaEventRecord(events[1]);
cudaEventSynchronize(events[1]);
float runtime_ms = 0;
cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
for (auto event : events) {
(void)cudaEventDestroy(event);
}
return double(runtime_ms) / 100.0;
}
int main(int argc, char const **args) {
Options options;
options.parse(argc, args);
std::cout << profile_convolution(options) << std::endl;
return 0;
}
"""
)
def emit(self, op_def, op_name, element_output, split_k_slices=1):
src = self.template.render(
OperatorDef=op_def,
OperatorName=op_name,
ElementOutput=DataTypeTag[element_output],
SplitK=split_k_slices,
Reduction=self.reduction if split_k_slices > 1 else "",
)
return src
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/gemm_operation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-wildcard-import, wildcard-import
"""Generator for CUTLASS GEMM kernels."""
from .library import *
class GemmOperation:
"""Describes various attributes for instantiating GEMM kernels."""
def __init__(
self,
arch,
tile_description,
A,
B,
C,
element_epilogue,
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity8,
):
self.operation_kind = OperationKind.Gemm
self.arch = arch
self.tile_description = tile_description
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
def accumulator_type(self):
return self.tile_description.math_instruction.element_accumulator
def short_math_name(self):
return ShortDataTypeNames[self.accumulator_type()]
def core_name(self):
"""The basic operation kind is prefixed with a letter indicating the accumulation type."""
inst_shape = ""
intermediate_type = ""
if (
self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp
or self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp
):
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if (
self.tile_description.math_instruction.element_a != self.A.element
and self.tile_description.math_instruction.element_a
!= self.tile_description.math_instruction.element_accumulator
):
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (
self.short_math_name(),
inst_shape,
intermediate_type,
"gemm",
)
def extended_name(self):
"""Append data types if they differ from compute type."""
if (
self.C.element != self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${element_c}_${core_name}_${element_a}"
elif (
self.C.element == self.tile_description.math_instruction.element_accumulator
and self.A.element != self.tile_description.math_instruction.element_accumulator
):
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = substitute_template(
extended_name,
{
"element_a": DataTypeNames[self.A.element],
"element_c": DataTypeNames[self.C.element],
"core_name": self.core_name(),
},
)
return extended_name
def layout_name(self):
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
def procedural_name(self):
"""The full procedural name indicates architecture, extended name, tile size,
and layout.
"""
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
return substitute_template(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_align${alignment}",
{
"opcode_class": opcode_class_name,
"extended_name": self.extended_name(),
"threadblock": threadblock,
"layout": self.layout_name(),
"alignment": "%d" % self.A.alignment,
},
)
def leading_dim(self):
"""lda, ldb, ldc, according to the leading dimension."""
if self.A.layout == LayoutType.RowMajor:
lda = "K"
elif self.A.layout == LayoutType.ColumnMajor:
lda = "M"
else:
ValueError("The layout of A is not implemented.")
if self.B.layout == LayoutType.RowMajor:
ldb = "N"
elif self.B.layout == LayoutType.ColumnMajor:
ldb = "K"
else:
ValueError("The layout of B is not implemented.")
if self.C.layout == LayoutType.RowMajor:
ldc = "N"
elif self.C.layout == LayoutType.ColumnMajor:
ldc = "M"
else:
ValueError("The layout of B is not implemented.")
return substitute_template(
"int lda = ${lda_val};\n\tint ldb = ${ldb_val};\n\tint ldc = ${ldc_val};\n",
{
"lda_val": lda,
"ldb_val": ldb,
"ldc_val": ldc,
},
)
class EmitGemmInstance:
"""Responsible for emitting a CUTLASS template definition."""
def __init__(self):
self.epilogue_default = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>"""
self.epilogue_no_beta_scaling = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::NoBetaScaling
>"""
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::${kernel_name}<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue},
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial}
${math_operation}
>;
"""
def emit(self, operation, no_beta_scaling=False, batched=False):
"""Instantiate a GEMM kernel from given `operation`."""
warp_shape = [
operation.tile_description.threadblock_shape[idx]
// operation.tile_description.warp_count[idx]
for idx in range(3)
]
epilogue_vector_length = (
min(operation.C.alignment * DataTypeSize[operation.C.element], 128)
// DataTypeSize[operation.C.element]
)
values = {
"operation_name": operation.procedural_name(),
"element_a": DataTypeTag[operation.A.element],
"layout_a": LayoutTag[operation.A.layout],
"element_b": DataTypeTag[operation.B.element],
"layout_b": LayoutTag[operation.B.layout],
"element_c": DataTypeTag[operation.C.element],
"layout_c": LayoutTag[operation.C.layout],
"element_accumulator": DataTypeTag[operation.accumulator_type()],
"opcode_class": OpcodeClassTag[
operation.tile_description.math_instruction.opcode_class
],
"arch": "cutlass::arch::Sm%d" % operation.arch,
"threadblock_shape_m": str(operation.tile_description.threadblock_shape[0]),
"threadblock_shape_n": str(operation.tile_description.threadblock_shape[1]),
"threadblock_shape_k": str(operation.tile_description.threadblock_shape[2]),
"warp_shape_m": str(warp_shape[0]),
"warp_shape_n": str(warp_shape[1]),
"warp_shape_k": str(warp_shape[2]),
"instruction_shape_m": str(
operation.tile_description.math_instruction.instruction_shape[0]
),
"instruction_shape_n": str(
operation.tile_description.math_instruction.instruction_shape[1]
),
"instruction_shape_k": str(
operation.tile_description.math_instruction.instruction_shape[2]
),
"epilogue_vector_length": str(epilogue_vector_length),
"element_epilogue": str(DataTypeTag[operation.element_epilogue]),
"epilogue_functor": EpilogueFunctorTag[operation.epilogue_functor],
"swizzling_functor": SwizzlingFunctorTag[operation.swizzling_functor],
"stages": str(operation.tile_description.stages),
"align_a": str(operation.A.alignment),
"align_b": str(operation.B.alignment),
"math_operation": MathOperationTag[
operation.tile_description.math_instruction.math_operation
],
}
values["kernel_name"] = "GemmBatched" if batched else "Gemm"
values["split_k_serial"] = "" if batched else "false,"
gemm_template = substitute_template(
self.gemm_template,
{
"epilogue": self.epilogue_no_beta_scaling
if no_beta_scaling
else self.epilogue_default
},
)
return substitute_template(gemm_template, values)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/gemm_profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel, invalid-name
"""Instantiate a C++ source for profiling CUTLASS kernels."""
class GemmProfilerEmitter(object):
"""Emit a C++ source for profiling CUTLASS kernels."""
def __init__(self):
from jinja2 import Template
self.template = Template(
"""
#include <iostream>
#include <sstream>
#include <vector>
#include <chrono>
#include "cuda_runtime.h"
#include "cutlass/gemm/device/gemm.h"
#define CUTLASS_CHECK(status) \\
{ \\
cutlass::Status error = status; \\
if (error != cutlass::Status::kSuccess) { \\
std::cerr << "Got cutlass error: " << cutlassGetStatusString(error) << " at: " << __LINE__ \\
<< std::endl; \\
exit(EXIT_FAILURE); \\
} \\
}
#define CUDA_CHECK(status) \\
{ \\
cudaError_t error = status; \\
if (error != cudaSuccess) { \\
std::cerr << "Got bad cuda status: " << cudaGetErrorString(error) \\
<< " at line: " << __LINE__ << std::endl; \\
exit(EXIT_FAILURE); \\
} \\
}
template<typename DTypeA, typename DTypeB, typename DTypeC>
cudaError_t CutlassGemmRCR(
int M,
int N,
int K,
DTypeC alpha,
DTypeA const *A,
int lda,
DTypeB const *B,
int ldb,
DTypeC beta,
DTypeC *C,
int ldc) {
using namespace std::chrono;
{{OperatorDef}}
Operation_{{OperatorName}} gemm_operator;
Operation_{{OperatorName}}::Arguments args({M, N, K},
{A, lda},
{B, ldb},
{C, ldc},
{C, ldc},
{alpha, beta});
cutlass::Status status = gemm_operator(args);
CUTLASS_CHECK(status)
high_resolution_clock::time_point t1 = high_resolution_clock::now();
for (int i = 0; i < 100; ++i) {
status = gemm_operator(args);
}
cudaDeviceSynchronize();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
duration<double> time_span = duration_cast<duration<double>>(t2 - t1);
std::cout << time_span.count() << std::endl;
return cudaSuccess;
}
template<typename DType>
cudaError_t AllocateMatrix(DType **matrix, int ldm, int rows, int columns, int seed = 0) {
cudaError_t result;
size_t sizeof_matrix = sizeof(DType) * rows * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
template<typename DTypeA, typename DTypeB, typename DTypeC>
cudaError_t TestCutlassGemm(int M, int N, int K, DTypeC alpha, DTypeC beta) {
cudaError_t result;
{{LeadingDim}}
// size_t sizeof_C = sizeof(DTypeC) * ldc * N;
DTypeA *A;
DTypeB *B;
DTypeC *C_cutlass;
result = AllocateMatrix<DTypeA>(&A, lda, M, K, 0);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix<DTypeB>(&B, ldb, K, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix<DTypeC>(&C_cutlass, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = CutlassGemmRCR<DTypeA, DTypeB, DTypeC>(M, N, K, alpha, A, lda, B, ldb,
beta, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS GEMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return cudaSuccess;
}
int main(int argc, const char *arg[]) {
int problem[3] = { 4096, 4096, 4096 };
for (int i = 1; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
float scalars[2] = { 1, 0 };
cudaError_t result = TestCutlassGemm< {{DTypeA}}, {{DTypeB}}, {{DTypeC}}>(
problem[0], // GEMM M dimension
problem[1], // GEMM N dimension
problem[2], // GEMM K dimension
static_cast<{{DTypeC}}>(scalars[0]), // alpha
static_cast<{{DTypeC}}>(scalars[1]) // beta
);
return result == cudaSuccess ? 0 : -1;
}
"""
)
def emit(self, op_name, op_def, dtype_a, dtype_b, dtype_c, ld):
src = self.template.render(
OperatorName=op_name,
OperatorDef=op_def,
DTypeA=dtype_a,
DTypeB=dtype_b,
DTypeC=dtype_c,
LeadingDim=ld,
)
return src
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/gen_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, dangerous-default-value
"""Conv2d kernel generator and profiler for CUTLASS."""
from functools import partial
from .conv2d_operation import Conv2dOperation, EmitConv2dInstance
from .gen_gemm import CutlassGemmProfiler
from .conv2d_profiler import Conv2dProfilerEmitter
from .gen_tensor_op import ProfilerEngine, GENERATOR_FUNC_TABLE, EPILOGUE_MAP
from .library import (
DataType,
EpilogueFunctor,
SwizzlingFunctor,
TensorDescription,
LayoutType,
ConvKind,
StrideSupport,
IteratorAlgorithm,
)
def create_conv2d_operator_with_epilogue(
conv_kind,
stride_support,
op_type,
tile_description,
data_type,
alignment,
swizzling_functor,
split_k_slices,
):
"""
Instantiate a cutlass kernel from the given configuration,
along with the epilouge functor
"""
if "residual" in op_type:
activation_map = {
"cutlass.conv2d_bias_hardswish": "cutlass::epilogue::thread::HardSwish",
"cutlass.conv2d_bias_silu": "cutlass::epilogue::thread::SiLu",
"cutlass.conv2d_bias_sigmoid": "cutlass::epilogue::thread::Sigmoid",
"cutlass.conv2d_bias_relu": "cutlass::epilogue::thread::ReLu",
"cutlass.conv2d_bias": "cutlass::epilogue::thread::Identity",
}
prefix = op_type[: op_type.find("_residual")]
activation = activation_map[prefix]
binary_op = "cutlass::multiplies" if "residual_multiply" in op_type else "cutlass::plus"
unary_op = (
"cutlass::epilogue::thread::ReLu"
if op_type.endswith("relu")
else "cutlass::epilogue::thread::Identity"
)
residual_block_info = {
"activation": activation,
"binary_op": binary_op,
"unary_op": unary_op,
}
epilogue = EpilogueFunctor.LinearCombinationResidualBlock
no_beta_scaling = False
else:
residual_block_info = None
epilogue, no_beta_scaling = EPILOGUE_MAP[op_type]
element_a, element_b, element_c, element_epilogue = data_type
A = TensorDescription(element_a, LayoutType.TensorNHWC, alignment)
B = TensorDescription(element_b, LayoutType.TensorNHWC, alignment)
C = TensorDescription(element_c, LayoutType.TensorNHWC, alignment)
op = Conv2dOperation(
conv_kind,
IteratorAlgorithm.Optimized,
tile_description.minimum_compute_capability,
tile_description,
A,
B,
C,
element_epilogue,
stride_support,
epilogue,
swizzling_functor,
split_k_slices,
)
name = op.procedural_name()
opdef = EmitConv2dInstance().emit(
op,
no_beta_scaling=no_beta_scaling,
residual_block_info=residual_block_info,
emit_reduction=split_k_slices > 1,
)
return name, opdef
def enumerate_conv2d_operators(
conv_kind,
stride_support,
split_k_slices,
tile_descriptions,
data_type,
alignment_constraints,
swizzling_functor=SwizzlingFunctor.Identity4,
):
"""Exhaustively instantiate all kernels from a given configuration."""
ret = []
kernel_emitter = EmitConv2dInstance()
profiler_emitter = Conv2dProfilerEmitter()
element_a, element_b, element_c, element_epilogue = data_type
if conv_kind == ConvKind.Dgrad and stride_support == StrideSupport.Strided:
swizzling_functor = SwizzlingFunctor.StridedDgradIdentity1
for split_k_slice in split_k_slices:
for tile in tile_descriptions:
for alignment in alignment_constraints:
A = TensorDescription(element_a, LayoutType.TensorNHWC, alignment)
B = TensorDescription(element_b, LayoutType.TensorNHWC, alignment)
C = TensorDescription(element_c, LayoutType.TensorNHWC, alignment)
if element_c == DataType.s32 and A.alignment == 1:
tile.threadblock_shape[0] = min(tile.threadblock_shape[0], 128)
tile.threadblock_shape[1] = min(tile.threadblock_shape[1], 128)
op = Conv2dOperation(
conv_kind,
IteratorAlgorithm.Optimized,
tile.minimum_compute_capability,
tile,
A,
B,
C,
element_epilogue,
stride_support,
EpilogueFunctor.LinearCombination,
swizzling_functor,
split_k_slice,
)
ret.append(
{
"src": profiler_emitter.emit(
kernel_emitter.emit(op, emit_reduction=split_k_slice > 1),
op.procedural_name(),
element_output=element_c,
split_k_slices=split_k_slice,
),
"name": op.procedural_name(),
"tile_description": tile,
"alignment": alignment,
"data_type": data_type,
"swizzle_functor": swizzling_functor,
"split_k_slices": split_k_slice,
}
)
return ret
class CutlassConv2DProfiler:
"""Profile all candidate kernels and select the best one."""
def __init__(self, sm, cutlass_path, binary_path):
self.gemm_profiler = CutlassGemmProfiler(sm, cutlass_path, binary_path)
self.sm = sm
assert sm in GENERATOR_FUNC_TABLE, "sm%d not supported yet." % sm
self.engine = ProfilerEngine(sm, cutlass_path, binary_path)
self.cache = {}
def get_default(
self,
op_type,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
conv_kind=ConvKind.Fprop,
stride=(1, 1),
):
"""Return the default kernel for the requested architecture.
For now, the default kernel was picked arbitrary.
"""
gemm_profile_result = self.gemm_profiler.get_default(
op_type, out_dtype, arg0_dtype, arg1_dtype, use_3xtf32
)
tile_description = gemm_profile_result["tile_description"]
alignment = gemm_profile_result["alignment"]
data_type = gemm_profile_result["data_type"]
stride_support = StrideSupport.Strided if stride[0] > 1 else StrideSupport.Unity
if conv_kind == ConvKind.Dgrad and stride_support == StrideSupport.Strided:
swizzling_functor = SwizzlingFunctor.StridedDgradIdentity1
else:
swizzling_functor = SwizzlingFunctor.Identity4
name, opdef = create_conv2d_operator_with_epilogue(
conv_kind,
stride_support,
op_type,
tile_description,
data_type,
alignment,
swizzling_functor,
split_k_slices=1,
)
return {"name": name, "opdef": opdef}
def select_op(
self,
d_shape,
w_shape,
padding,
stride,
dilation,
out_dtype,
data_dtype,
weight_dtype,
use_3xtf32,
conv_kind,
stride_support,
split_k_slices,
profile_all_alignments=False,
find_first_valid=False,
use_multiprocessing=False,
):
"""
Profile and select the best kernel from candidate kernels.
See the documentation for the profile method below.
"""
N, H, W, IC = d_shape
OC, R, S, _ = w_shape
workload = (
N,
H,
W,
IC,
OC,
R,
S,
padding[0],
padding[1],
stride[0],
stride[1],
dilation[0],
dilation[1],
)
if workload in self.cache:
return self.cache[workload]
ops = GENERATOR_FUNC_TABLE[self.sm](
out_dtype,
data_dtype,
weight_dtype,
partial(enumerate_conv2d_operators, conv_kind, stride_support, split_k_slices),
lambda align: all([dim % align == 0 for dim in [IC, OC]]),
use_3xtf32,
profile_all_alignments,
# Use fp32 accumulation for wgrad to align with cuDNN
accumlator_dtype="float32" if conv_kind == ConvKind.Wgrad else out_dtype,
)
if not find_first_valid:
self.engine.compile_all(ops, use_multiprocessing)
args = (
"--n=%d --h=%d --w=%d --c=%d --k=%d --r=%d --s=%d --pad_h=%d --pad_w=%d "
"--stride_h=%d --stride_w=%d --dilation_h=%d --dilation_w=%d"
) % workload
for op in ops:
out = self.engine.evaluate(op, args.split(" "))
op["runtime"] = out
if out < float("inf") and find_first_valid:
self.cache[workload] = op
return op
op = min(ops, key=lambda i: i["runtime"])
self.cache[workload] = op
return op
def profile(
self,
op_type,
d_shape,
w_shape,
padding,
stride,
dilation,
out_dtype,
data_dtype,
weight_dtype,
use_3xtf32=True,
conv_kind=ConvKind.Fprop,
split_k_slices=[1],
profile_all_alignments=False,
find_first_valid=False,
use_multiprocessing=False,
):
"""Profile and select the best kernel from candidate kernels.
If find_first_valid is True, return immediately after the first applicable kernel is found.
If use_multiprocessing is True, compile all profiler executables in parallel.
"""
# Dgrad requires Unity stride when stride == (1, 1)
stride_support = (
StrideSupport.Unity
if stride[0] == 1 and stride[1] == 1 and conv_kind == ConvKind.Dgrad
else StrideSupport.Strided
)
op = self.select_op(
d_shape,
w_shape,
padding,
stride,
dilation,
out_dtype,
data_dtype,
weight_dtype,
use_3xtf32,
conv_kind,
stride_support,
split_k_slices,
profile_all_alignments,
find_first_valid,
use_multiprocessing,
)
name, opdef = create_conv2d_operator_with_epilogue(
conv_kind,
stride_support,
op_type,
op["tile_description"],
op["data_type"],
op["alignment"],
op["swizzle_functor"],
op["split_k_slices"],
)
return name, opdef, op["runtime"]
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/gen_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""GEMM kernel generator and profiler for CUTLASS."""
from .gemm_operation import GemmOperation, EmitGemmInstance
from .gemm_profiler import GemmProfilerEmitter
from .gen_tensor_op import ProfilerEngine, GENERATOR_FUNC_TABLE, EPILOGUE_MAP
from .library import (
DataType,
EpilogueFunctor,
SwizzlingFunctor,
TensorDescription,
DataTypeTag,
LayoutType,
)
def create_gemm_operator_with_epilogue(
op_type,
tile_description,
data_type,
alignment,
swizzling_functor,
batched=False,
):
"""
Instantiate a cutlass kernel from the given configuration,
along with the epilouge functor
"""
element_a, element_b, element_c, element_epilogue = data_type
A = TensorDescription(element_a, LayoutType.RowMajor, alignment)
B = TensorDescription(element_b, LayoutType.ColumnMajor, alignment)
C = TensorDescription(element_c, LayoutType.RowMajor, alignment)
if batched:
swizzling_functor = SwizzlingFunctor.Batched
epilogue, no_beta_scaling = EPILOGUE_MAP[op_type]
op = GemmOperation(
tile_description.minimum_compute_capability,
tile_description,
A,
B,
C,
element_epilogue,
epilogue,
swizzling_functor,
)
return (
op.procedural_name(),
EmitGemmInstance().emit(op, no_beta_scaling=no_beta_scaling, batched=batched),
)
def enumerate_gemm_operators(
tile_descriptions,
data_type,
alignment_constraints,
swizzling_functor=SwizzlingFunctor.Identity8,
):
"""Exhaustively instantiate all kernels from a given configuration."""
ret = []
kernel_emitter = EmitGemmInstance()
profiler_emitter = GemmProfilerEmitter()
element_a, element_b, element_c, element_epilogue = data_type
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
A = TensorDescription(element_a, LayoutType.RowMajor, alignment)
B = TensorDescription(element_b, LayoutType.ColumnMajor, alignment)
C = TensorDescription(element_c, LayoutType.RowMajor, alignment)
if element_c == DataType.s32 and A.alignment == 1:
tile_description.threadblock_shape[0] = min(
tile_description.threadblock_shape[0], 128
)
tile_description.threadblock_shape[1] = min(
tile_description.threadblock_shape[1], 128
)
op = GemmOperation(
tile_description.minimum_compute_capability,
tile_description,
A,
B,
C,
element_epilogue,
EpilogueFunctor.LinearCombination,
swizzling_functor,
)
src = profiler_emitter.emit(
op.procedural_name(),
kernel_emitter.emit(op, batched=False),
DataTypeTag[element_a],
DataTypeTag[element_b],
DataTypeTag[element_c],
op.leading_dim(),
)
ret.append(
{
"src": src,
"op": op,
"name": op.procedural_name(),
"tile_description": tile_description,
"alignment": alignment,
"data_type": data_type,
"swizzle_functor": swizzling_functor,
}
)
return ret
# TODO(masahi): A sensible way to pick reasonable default kernels
DEFAULT_KERNELS = {
75: {
("float16", "float16"): "cutlass_tensorop_h1688gemm_128x64_32x2_tn_align1",
("float16", "float32"): "cutlass_tensorop_s1688gemm_f16_64x64_32x2_tn_align1",
},
# align1 variants do not seem to be available for sm80
80: {
("float16", "float16"): "cutlass_tensorop_h1688gemm_128x64_32x2_tn_align1",
("float16", "float32"): "cutlass_tensorop_s1688gemm_f16_64x64_32x2_tn_align1",
# two kernels for tf32 and 3xtf32
("float32", "float32"): (
"cutlass_tensorop_s1688gemm_128x64_32x3_tn_align1",
"cutlass_tensorop_s1688gemm_64x64_16x3_tn_align1",
),
},
}
class CutlassGemmProfiler:
"""Profile all candidate kernels and select the best one."""
def __init__(self, sm, cutlass_path, binary_path):
assert sm in GENERATOR_FUNC_TABLE and sm in DEFAULT_KERNELS, "sm%d not supported yet." % sm
self.engine = ProfilerEngine(sm, cutlass_path, binary_path)
self.sm = sm
self.cache = {}
def get_default(
self, op_type, out_dtype, arg0_dtype, arg1_dtype, use_3xtf32=True, batched=False
):
"""Return the default kernel for the requested architecture.
For now, the default kernel was picked arbitrary.
"""
ops = GENERATOR_FUNC_TABLE[self.sm](
out_dtype,
arg0_dtype,
arg1_dtype,
enumerate_gemm_operators,
lambda align: align == 1, # Only request align1 kernels
use_3xtf32,
profile_all_alignments=True, # To include all align1 kernels
# TODO(masahi): Invesitigate when fp32 accumulation is needed for gemm
accumlator_dtype=out_dtype,
)
default_kernel_name = DEFAULT_KERNELS[self.sm][(arg0_dtype, out_dtype)]
if arg0_dtype == "float32":
default_kernel_name = (
default_kernel_name[0] if not use_3xtf32 else default_kernel_name[1]
)
filtered = list(filter(lambda op: op["name"] == default_kernel_name, ops))
assert len(filtered) == 1
op = filtered[0]
name, opdef = create_gemm_operator_with_epilogue(
op_type,
op["tile_description"],
op["data_type"],
op["alignment"],
op["swizzle_functor"],
batched=batched,
)
op.update({"name": name, "opdef": opdef})
return op
def select_op(
self,
M,
N,
K,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
profile_all_alignments=False,
find_first_valid=False,
use_multiprocessing=False,
):
"""
Profile and select the best kernel from candidate kernels.
See the documentation for the profile method below.
"""
if (M, N, K) in self.cache:
op = self.cache[(M, N, K)]
return op
# TODO(masahi): CUTLASS alignment check on gemm kernels is too restrictive.
# See https://github.com/NVIDIA/cutlass/issues/362.
# When the above issue is resolved, we can remove the alignment check on M below.
ops = GENERATOR_FUNC_TABLE[self.sm](
out_dtype,
arg0_dtype,
arg1_dtype,
enumerate_gemm_operators,
lambda align: all([dim % align == 0 for dim in [M, N, K]]),
use_3xtf32,
profile_all_alignments=profile_all_alignments,
# TODO(masahi): Invesitigate when fp32 accumulation is needed for gemm
accumlator_dtype=out_dtype,
)
if not find_first_valid:
self.engine.compile_all(ops, use_multiprocessing)
for op in ops:
out = self.engine.evaluate(op, [M, N, K])
op["runtime"] = out
if out < float("inf") and find_first_valid:
self.cache[(M, N, K)] = op
return op
op = min(ops, key=lambda i: i["runtime"])
self.cache[(M, N, K)] = op
return op
def profile(
self,
op_type,
M,
N,
K,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32=True,
profile_all_alignments=False,
find_first_valid=False,
use_multiprocessing=False,
batched=False,
):
"""Profile and select the best kernel from candidate kernels.
If find_first_valid is True, return immediately after the first applicable kernel is found.
If use_multiprocessing is True, compile all profiler executables in parallel.
"""
op = self.select_op(
M,
N,
K,
out_dtype,
arg0_dtype,
arg1_dtype,
use_3xtf32,
profile_all_alignments=profile_all_alignments,
find_first_valid=find_first_valid,
use_multiprocessing=use_multiprocessing,
)
name, opdef = create_gemm_operator_with_epilogue(
op_type,
op["tile_description"],
op["data_type"],
op["alignment"],
op["swizzle_functor"],
batched=batched,
)
return name, opdef, op["runtime"]
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/gen_tensor_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Common functions and classes for CUTLASS GEMM and Conv2d geneator."""
import logging
import os
import tempfile
import subprocess
import multiprocessing
from .library import (
MathInstruction,
DataType,
OpcodeClass,
MathOperation,
TileDescription,
EpilogueFunctor,
)
logger = logging.getLogger("cutlass")
dtype_map = {
"int8": DataType.s8,
"uint8": DataType.u8,
"float32": DataType.f32,
"float16": DataType.f16,
}
def generate_tensor_op_common(
math_instructions, alignment_constraints, get_tile_descriptions, op_creator
):
"""Common kernel generator to be used by archtecture specific generators."""
ops = []
for math_inst in math_instructions:
tile_descriptions = get_tile_descriptions(math_inst)
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_c,
math_inst.element_accumulator,
]
out = op_creator(tile_descriptions, data_type, alignment_constraints)
ops.extend(out)
return ops
def generate_sm75_tensor_op_1688(
out_dtype,
arg0_dtype,
arg1_dtype,
op_creator,
check_align,
_,
profile_all_alignments=False,
accumlator_dtype="float32",
):
"""Generate GEMM or Conv2D kernels for Turing."""
assert out_dtype in ["float32", "float16", "int32"]
min_cc = 75
max_cc = 1024
if arg0_dtype == "float16" and arg1_dtype == "float16":
math_instructions = [
MathInstruction(
[16, 8, 8],
DataType.f16,
DataType.f16,
dtype_map[out_dtype],
dtype_map[accumlator_dtype],
OpcodeClass.TensorOp,
MathOperation.multiply_add,
)
]
alignment_constraints = [8, 4, 2, 1]
tile_descriptions = [
([256, 128, 32], 2, [4, 2, 1], min_cc, max_cc),
([128, 256, 32], 2, [2, 4, 1], min_cc, max_cc),
([128, 128, 32], 2, [2, 2, 1], min_cc, max_cc),
([64, 128, 32], 2, [2, 2, 1], min_cc, max_cc),
([128, 64, 32], 2, [2, 2, 1], min_cc, max_cc),
([64, 64, 32], 2, [2, 2, 1], min_cc, max_cc),
([64, 128, 64], 2, [1, 2, 2], min_cc, max_cc),
]
else:
assert out_dtype == "int32"
math_instructions = [
MathInstruction(
[8, 8, 16],
dtype_map[arg0_dtype],
dtype_map[arg1_dtype],
DataType.s32,
DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_saturate,
),
]
alignment_constraints = [16, 8, 4, 2, 1]
tile_descriptions = [
([256, 128, 64], 2, [4, 2, 1], min_cc, max_cc),
([128, 256, 64], 2, [2, 4, 1], min_cc, max_cc),
([128, 128, 64], 2, [2, 2, 1], min_cc, max_cc),
([64, 256, 64], 2, [1, 4, 1], min_cc, max_cc),
([256, 64, 64], 2, [4, 1, 1], min_cc, max_cc),
([64, 128, 64], 2, [2, 2, 1], min_cc, max_cc),
([128, 64, 64], 2, [2, 2, 1], min_cc, max_cc),
([64, 64, 64], 2, [2, 2, 1], min_cc, max_cc),
]
alignment_constraints = [align for align in alignment_constraints if check_align(align)]
assert len(alignment_constraints) > 0
if not profile_all_alignments:
alignment_constraints = [alignment_constraints[0]]
def get_tile_descriptions(math_inst):
return [
TileDescription(threadblock_shape, stages, warp_count, math_inst, min_cc, max_cc)
for threadblock_shape, stages, warp_count, min_cc, max_cc in tile_descriptions
]
return generate_tensor_op_common(
math_instructions, alignment_constraints, get_tile_descriptions, op_creator
)
def generate_sm80_tensor_op_16816(
out_dtype,
arg0_dtype,
arg1_dtype,
op_creator,
check_align,
use_3xtf32=True,
profile_all_alignments=False,
accumlator_dtype="float32",
):
"""Generate GEMM or Conv2D kernels for Ampere."""
min_cc = 80
max_cc = 1024
max_cc_smem_limited = 80
def get_default_tile_descriptions(block_k_factor):
return [
([256, 128, int(32 * block_k_factor)], 3, [4, 2, 1], min_cc, max_cc),
([128, 256, int(32 * block_k_factor)], 3, [2, 4, 1], min_cc, max_cc),
([256, 64, int(32 * block_k_factor)], 4, [4, 1, 1], min_cc, max_cc),
([64, 256, int(32 * block_k_factor)], 4, [1, 4, 1], min_cc, max_cc),
([128, 128, int(32 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc),
([128, 128, int(32 * block_k_factor)], 4, [2, 2, 1], min_cc, max_cc),
([128, 128, int(32 * block_k_factor)], 5, [2, 2, 1], min_cc, max_cc),
([128, 64, int(32 * block_k_factor)], 6, [2, 2, 1], min_cc, max_cc),
([64, 128, int(32 * block_k_factor)], 6, [2, 2, 1], min_cc, max_cc),
([64, 64, int(32 * block_k_factor)], 10, [2, 2, 1], min_cc, max_cc),
([256, 128, int(64 * block_k_factor)], 3, [4, 2, 1], min_cc, max_cc_smem_limited),
([128, 256, int(64 * block_k_factor)], 3, [2, 4, 1], min_cc, max_cc_smem_limited),
([256, 64, int(64 * block_k_factor)], 4, [4, 1, 1], min_cc, max_cc_smem_limited),
([64, 256, int(64 * block_k_factor)], 4, [1, 4, 1], min_cc, max_cc_smem_limited),
([128, 128, int(64 * block_k_factor)], 4, [2, 2, 1], min_cc, max_cc),
([128, 64, int(64 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc),
([64, 128, int(64 * block_k_factor)], 3, [2, 2, 1], min_cc, max_cc),
([64, 64, int(64 * block_k_factor)], 5, [2, 2, 1], min_cc, max_cc),
]
if arg0_dtype == "float16" and arg1_dtype == "float16":
math_instructions = [
MathInstruction(
[16, 8, 16],
DataType.f16,
DataType.f16,
dtype_map[out_dtype],
dtype_map[accumlator_dtype],
OpcodeClass.TensorOp,
MathOperation.multiply_add,
)
]
alignment_constraints = [8, 4, 2]
tile_descriptions = get_default_tile_descriptions(1)
elif arg0_dtype == "float32" and arg1_dtype == "float32":
math_instructions = [
MathInstruction(
[16, 8, 8],
DataType.f32,
DataType.f32,
DataType.f32,
DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_fast_f32 if use_3xtf32 else MathOperation.multiply_add,
),
]
alignment_constraints = [4, 2, 1]
if use_3xtf32:
tile_descriptions = [
([128, 128, 16], 4, [4, 2, 1], min_cc, max_cc),
([128, 128, 16], 3, [4, 2, 1], min_cc, max_cc),
([256, 64, 16], 3, [4, 2, 1], min_cc, max_cc),
([64, 256, 16], 3, [2, 4, 1], min_cc, max_cc),
([128, 64, 16], 4, [2, 2, 1], min_cc, max_cc),
([64, 128, 16], 4, [2, 2, 1], min_cc, max_cc),
([64, 64, 16], 3, [2, 2, 1], min_cc, max_cc),
([128, 128, 32], 3, [4, 2, 1], min_cc, max_cc),
([256, 64, 32], 3, [4, 2, 1], min_cc, max_cc_smem_limited),
([64, 256, 32], 3, [2, 4, 1], min_cc, max_cc_smem_limited),
([128, 64, 32], 3, [2, 2, 1], min_cc, max_cc),
([64, 128, 32], 3, [2, 2, 1], min_cc, max_cc),
([64, 64, 32], 3, [2, 2, 1], min_cc, max_cc),
]
else:
tile_descriptions = get_default_tile_descriptions(0.5)
else:
assert out_dtype == "int32"
math_instructions = [
MathInstruction(
[16, 8, 32],
dtype_map[arg0_dtype],
dtype_map[arg1_dtype],
DataType.s32,
DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add_saturate,
),
]
alignment_constraints = [16, 8, 4]
tile_descriptions = get_default_tile_descriptions(2)
def get_tile_descriptions(math_inst):
return [
TileDescription(threadblock_shape, stages, warp_count, math_inst, min_cc, max_cc)
for threadblock_shape, stages, warp_count, min_cc, max_cc in tile_descriptions
]
alignment_constraints = [align for align in alignment_constraints if check_align(align)]
if len(alignment_constraints) > 0 and not profile_all_alignments:
alignment_constraints = [alignment_constraints[0]]
if arg0_dtype != "float32" and arg1_dtype != "float32":
sm75_kernels = generate_sm75_tensor_op_1688(
out_dtype,
arg0_dtype,
arg1_dtype,
op_creator,
check_align,
False,
profile_all_alignments,
accumlator_dtype=accumlator_dtype,
)
else:
# TF32 (float32 + float32 case) is only supported on sm80
sm75_kernels = []
if len(alignment_constraints) > 0:
sm80_kernels = generate_tensor_op_common(
math_instructions, alignment_constraints, get_tile_descriptions, op_creator
)
else:
sm80_kernels = []
# TODO(masahi): For int8 kernels, The CUTLASS generator modifies the output tensor alignment
# after ops are created. Revisit how important this modification is.
# for op in operations:
# if op.tile_description.threadblock_shape[1] >= 128:
# op.C.alignment = 16
# else:
# op.C.alignment = 8
return sm75_kernels + sm80_kernels
GENERATOR_FUNC_TABLE = {
75: generate_sm75_tensor_op_1688,
80: generate_sm80_tensor_op_16816,
}
# (Epilogue functor name, no_beta_scaling)
EPILOGUE_MAP = {
"cutlass.dense": (EpilogueFunctor.LinearCombination, False),
"cutlass.dense_bias": (EpilogueFunctor.LinearCombinationBias, True),
"cutlass.dense_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True),
"cutlass.dense_bias_gelu_fp16": (EpilogueFunctor.LinearCombinationGelu, False),
"cutlass.dense_bias_gelu_fp32": (EpilogueFunctor.LinearCombinationGelu, False),
"cutlass.batch_matmul": (EpilogueFunctor.LinearCombination, False),
"cutlass.conv2d_bias_hardswish": (EpilogueFunctor.LinearCombinationHardSwish, False),
"cutlass.conv2d_bias_silu": (EpilogueFunctor.LinearCombinationSilu, False),
"cutlass.conv2d_bias_sigmoid": (EpilogueFunctor.LinearCombinationSigmoid, False),
"cutlass.conv2d_bias_relu": (EpilogueFunctor.LinearCombinationRelu, True),
"cutlass.conv2d_bias": (EpilogueFunctor.LinearCombinationBias, True),
"cutlass.conv2d": (EpilogueFunctor.LinearCombination, False),
"cutlass.conv2d_transpose": (EpilogueFunctor.LinearCombination, False),
"cutlass.conv2d_backward_weight": (EpilogueFunctor.LinearCombination, False),
}
class ProfilerEngine:
"""Compile and run a given profiler executable."""
def __init__(self, cuda_arch, cutlass_path, binary_prefix):
self.cuda_arch = cuda_arch
self.binary_prefix = binary_prefix
self.cutlass = cutlass_path
self.cflags = "-I{cutlass}/include -I{cutlass}/tools/util/include -O3 -std=c++11".format(
cutlass=cutlass_path
)
self.cflags += " -DCUTLASS_ENABLE_TENSOR_CORE_MMA=1"
self.cflags += " -gencode=arch=compute_{arch},code=[sm_{arch},compute_{arch}]".format(
arch=cuda_arch
)
self.cflags += " -Xcompiler=-Wconversion -Xcompiler=-fno-strict-aliasing"
self.cmd = "nvcc {cflags} {src} -o {output}"
def _compile(self, op):
os.makedirs(self.binary_prefix, exist_ok=True)
opath = os.path.join(self.binary_prefix, op["name"])
if os.path.exists(opath):
return
fi = tempfile.NamedTemporaryFile("w", delete=False, prefix=self.binary_prefix, suffix=".cu")
fi.write(op["src"])
fi.close()
cmd = self.cmd.format(cflags=self.cflags, src=fi.name, output=opath)
logger.info("invoking compilation %s", cmd)
os.system(cmd)
os.unlink(fi.name)
def compile_all(self, ops, use_multiprocessing=False):
"""Compile all profiler executables."""
if use_multiprocessing:
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.map(self._compile, ops)
else:
for op in ops:
self._compile(op)
def evaluate(self, op, args):
"""Run the profiler executable corresponding to op_name with args."""
op_name = op["name"]
opath = os.path.join(self.binary_prefix, op_name)
if not os.path.exists(opath):
self._compile(op)
if not os.path.exists(opath):
# Bail out if compilation fails for a whatever reason (e.g. static assert failure)
return float("inf")
cmd = [opath]
for arg in args:
cmd.append(str(arg))
try:
logger.info("invoking evaluation %s", cmd)
sp = subprocess.run(cmd, capture_output=True, check=True)
rt = float(sp.stdout)
if rt == 0.0:
# This seems to happen with split-k using invalid split-k-slices
rt = float("inf")
logger.info("%s, %f", op_name, rt)
except subprocess.CalledProcessError:
rt = float("inf")
return rt
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/cutlass/library.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,line-too-long
"""Various type definitions to help instantiate CUTLASS kernels."""
import re
import enum
from enum import auto as enum_auto
class GeneratorTarget(enum.Enum):
Library = enum_auto()
class DataType(enum.Enum):
f16 = enum_auto()
f32 = enum_auto()
s8 = enum_auto()
u8 = enum_auto()
s32 = enum_auto()
ShortDataTypeNames = {
DataType.f16: "h",
DataType.f32: "s",
DataType.s32: "i",
}
DataTypeNames = {
DataType.f16: "f16",
DataType.f32: "f32",
DataType.s8: "s8",
DataType.u8: "u8",
DataType.s32: "s32",
}
DataTypeTag = {
DataType.f16: "cutlass::half_t",
DataType.f32: "float",
DataType.s8: "int8_t",
DataType.s32: "int32_t",
DataType.u8: "uint8_t",
}
DataTypeSize = {
DataType.f16: 16,
DataType.f32: 32,
DataType.u8: 8,
DataType.s8: 8,
DataType.s32: 32,
}
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
multiply_add_fast_f32 = enum_auto()
MathOperationTag = {
MathOperation.multiply_add: "cutlass::arch::OpMultiplyAdd",
MathOperation.multiply_add_saturate: "cutlass::arch::OpMultiplyAddSaturate",
MathOperation.multiply_add_fast_f32: "cutlass::arch::OpMultiplyAddFastF32",
}
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
TensorNHWC = enum_auto()
LayoutTag = {
LayoutType.ColumnMajor: "cutlass::layout::ColumnMajor",
LayoutType.RowMajor: "cutlass::layout::RowMajor",
LayoutType.TensorNHWC: "cutlass::layout::TensorNHWC",
}
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.TensorNHWC: LayoutType.TensorNHWC,
}
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: "n",
LayoutType.RowMajor: "t",
LayoutType.TensorNHWC: "nhwc",
}
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: "simt",
OpcodeClass.TensorOp: "tensorop",
OpcodeClass.WmmaTensorOp: "wmma_tensorop",
}
OpcodeClassTag = {
OpcodeClass.Simt: "cutlass::arch::OpClassSimt",
OpcodeClass.TensorOp: "cutlass::arch::OpClassTensorOp",
OpcodeClass.WmmaTensorOp: "cutlass::arch::OpClassWmmaTensorOp",
}
class OperationKind(enum.Enum):
Gemm = enum_auto()
Conv2d = enum_auto()
OperationKindNames = {OperationKind.Gemm: "gemm", OperationKind.Conv2d: "conv2d"}
class Target(enum.Enum):
library = enum_auto()
def substitute_template(template, values):
"""Instantiate a kernel template using `values`."""
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
class GemmKind(enum.Enum):
Gemm = enum_auto()
GemmKindNames = {
GemmKind.Gemm: "gemm",
}
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationRelu = enum_auto()
LinearCombinationBias = enum_auto()
LinearCombinationGelu = enum_auto()
LinearCombinationSigmoid = enum_auto()
LinearCombinationSilu = enum_auto()
LinearCombinationHardSwish = enum_auto()
LinearCombinationResidualBlock = enum_auto()
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: "cutlass::epilogue::thread::LinearCombination",
EpilogueFunctor.LinearCombinationRelu: "cutlass::epilogue::thread::LinearCombinationRelu",
EpilogueFunctor.LinearCombinationBias: "cutlass::epilogue::thread::LinearCombination",
EpilogueFunctor.LinearCombinationGelu: "cutlass::epilogue::thread::LinearCombinationGELU",
EpilogueFunctor.LinearCombinationSigmoid: "cutlass::epilogue::thread::LinearCombinationSigmoid",
EpilogueFunctor.LinearCombinationSilu: "cutlass::epilogue::thread::LinearCombinationSilu",
EpilogueFunctor.LinearCombinationHardSwish: "cutlass::epilogue::thread::LinearCombinationHardSwish",
EpilogueFunctor.LinearCombinationResidualBlock: "cutlass::epilogue::thread::LinearCombinationResidualBlock",
}
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Batched = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.Identity2: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>",
SwizzlingFunctor.Identity4: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>",
SwizzlingFunctor.Identity8: "cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>",
SwizzlingFunctor.Batched: "cutlass::gemm::threadblock::GemmBatchedIdentityThreadblockSwizzle",
SwizzlingFunctor.StridedDgradIdentity1: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>",
SwizzlingFunctor.StridedDgradIdentity4: "cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>",
}
class ConvKind(enum.Enum):
Fprop = enum_auto()
Dgrad = enum_auto()
Wgrad = enum_auto()
ConvKindTag = {
ConvKind.Fprop: "cutlass::conv::Operator::kFprop",
ConvKind.Dgrad: "cutlass::conv::Operator::kDgrad",
ConvKind.Wgrad: "cutlass::conv::Operator::kWgrad",
}
ConvKindNames = {
ConvKind.Fprop: "fprop",
ConvKind.Dgrad: "dgrad",
ConvKind.Wgrad: "wgrad",
}
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
StrideSupportTag = {
StrideSupport.Strided: "cutlass::conv::StrideSupport::kStrided",
StrideSupport.Unity: "cutlass::conv::StrideSupport::kUnity",
}
StrideSupportNames = {
StrideSupport.Strided: "",
StrideSupport.Unity: "unity_stride",
}
class IteratorAlgorithm(enum.Enum):
Analytic = enum_auto()
Optimized = enum_auto()
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: "cutlass::conv::IteratorAlgorithm::kAnalytic",
IteratorAlgorithm.Optimized: "cutlass::conv::IteratorAlgorithm::kOptimized",
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: "analytic",
IteratorAlgorithm.Optimized: "optimized",
}
class MathInstruction:
"""Describe characteristics of a math instruction."""
def __init__(
self,
instruction_shape,
element_a,
element_b,
element_c,
element_accumulator,
opcode_class,
math_operation=MathOperation.multiply_add,
):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_c = element_c
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
class TileDescription:
"""Describe characteristics of a GEMM tile."""
def __init__(
self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute
):
self.threadblock_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
return "%dx%d_%dx%d" % (
self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.stages,
)
class TensorDescription:
def __init__(self, element, layout, alignment=1):
self.element = element
self.layout = layout
self.alignment = alignment
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/debugger/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/debugger/debug_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph debug runtime executes TVM debug packed functions."""
import logging
import os
import shutil
import struct
import tempfile
import tvm._ffi
from tvm._ffi.base import string_types
from tvm.contrib import graph_executor
from tvm.runtime.module import BenchmarkResult
from ...runtime.profiling import Report
from . import debug_result
_DUMP_ROOT_PREFIX = "tvmdbg_"
_DUMP_PATH_PREFIX = "_tvmdbg_"
def create(graph_json_str, libmod, device, dump_root=None):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function.
device : Device
The device to deploy the module, can be local or remote.
dump_root : str
To select which folder the outputs should be kept.
None will make a temp folder in /tmp/tvmdbg<rand_string> and does the dumping
Returns
-------
graph_module : GraphModuleDebug
Debug Runtime graph module that can be used to execute the graph.
"""
assert isinstance(graph_json_str, string_types)
try:
dev, num_rpc_dev, device_type_id = graph_executor.get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor_debug.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor_debug.create")
except ValueError:
raise ValueError(
"Please set '(USE_PROFILER ON)' in " "config.cmake and rebuild TVM to enable debug mode"
)
func_obj = fcreate(graph_json_str, libmod, *device_type_id)
gmod = GraphModuleDebug(func_obj, dev, graph_json_str, dump_root)
# Automatically set params if they can be extracted from the libmod
try:
params = libmod["get_graph_params"]()
except (AttributeError, tvm.error.RPCError):
# Params can not be extracted from the libmod and must be set somewhere else manually
# Do not set params during RPC communication
pass
else:
gmod.set_input(**params)
return gmod
class GraphModuleDebug(graph_executor.GraphModule):
"""Graph debug runtime module.
This is a debug wrapper over the TVM runtime.
Runtime interfaces are wrapped with debug functionalities.
Manage the debug framework to format the debug data and
trigger the user interfaces.
Parameters
----------
module : Module
The internal tvm module that holds the actual graph functions.
device : Device
The device that this module is under.
graph_json_str : str or graph class
Content of graph json file in string format
dump_root : str
To select which folder the outputs should be kept.
None will make a temp folder in /tmp/tvmdbg<rand_string> and does the dumping
"""
def __init__(self, module, device, graph_json_str, dump_root):
self._dump_root = dump_root
self._dump_path = None
self._run_individual = module["run_individual"]
self._run_individual_node = module["run_individual_node"]
self._debug_get_output = module["debug_get_output"]
self._execute_node = module["execute_node"]
self._get_node_output = module["get_node_output"]
self._profile = module["profile"]
self._profile_rpc = module["profile_rpc"]
graph_executor.GraphModule.__init__(self, module)
self._create_debug_env(graph_json_str, device)
def _format_device(self, device):
return str(device[0]).upper().replace("(", ":").replace(")", "")
def _ensure_dir(self, directory):
"""Create a directory if not exists
Parameters
----------
directory : str
File path to create
"""
if not os.path.exists(directory):
os.makedirs(directory, 0o700)
def _get_dump_path(self, device):
"""Make the graph and tensor dump folder and return the path.
Parameters
----------
device : Device
The device that this module is under.
Returns
-------
path : str
Directory path where the graph and node outputs will be stored.
"""
# save to file
folder_name = _DUMP_PATH_PREFIX + "device_"
folder_name = folder_name + device.replace(":", "_")
path = os.path.join(self._dump_root, folder_name)
self._ensure_dir(path)
return path
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _create_debug_env(self, graph_json, device):
"""Create UI wrapper framework to handle multiple UI frontends for tvmdbg
Parameters
----------
graph_json : json format
json formatted NNVM graph contain list of each node's name, shape and type.
nodes_list : list
List of all the nodes presented in the graph
device : Device
The device that this module is under.
"""
# make the dump folder if not given
if not self._dump_root:
self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX)
# format the device
device = self._format_device(device)
# updates the dumping directories
self._dump_path = self._get_dump_path(device)
# init the debug dumping environment
self.debug_datum = debug_result.DebugResult(graph_json, self._dump_path)
def _execute_next_node(self, node_index, output_index):
"""Execute node assuming all previous nodes has been executed.
Return the output of this node.
Parameters
----------
node_index : int
The node index
output_index: int
The node output index
Return
------
output_tensors : Array<NDarray>
Array of output tensors
"""
output_tensors = self._execute_next_node_get_output(node_index, output_index)
return output_tensors
def _run_per_layer(self):
"""Execute up to each node and each debug output will be
copied to the buffer.
"""
output_tensors = []
for i, node in enumerate(self.debug_datum.get_graph_nodes()):
self._execute_node(i)
num_outputs = self.debug_datum.get_graph_node_output_num(node)
for j in range(num_outputs):
logging.info(
"running node=%d, output_ind=%d, with node_name: %s", i, j, node["name"]
)
output_tensors.append(self._get_node_output(i, j))
self.debug_datum.update_output_tensors(output_tensors)
def _run_debug(
self,
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
):
"""Execute the node specified with index will be executed.
Each debug output will be copied to the buffer
Time consumed for each execution will be set as debug output.
"""
# Get timing.
self.debug_datum._time_list = self.run_individual(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
cooldown_interval_ms=cooldown_interval_ms,
repeats_to_cooldown=repeats_to_cooldown,
)
# Get outputs.
self._run_per_layer()
def debug_get_output(self, node, out=None):
"""Run graph up to node and get the output to out
Parameters
----------
node : int / str
The node index or name
out : NDArray
The output array container
"""
if isinstance(node, str):
node_index = None
for i, graph_node in enumerate(self.debug_datum.get_graph_nodes()):
if graph_node["name"] == node:
node_index = i
break
else:
raise AttributeError(f"Could not find a node named {node} in this graph.")
elif isinstance(node, int):
node_index = node
else:
raise RuntimeError(f"Require node index or name only.")
self._debug_get_output(node_index, out)
# pylint: disable=arguments-differ
def run(
self,
number=10,
repeat=1,
min_repeat_ms=1,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
sort_by_time=True,
**input_dict,
):
"""Run forward execution of the graph with debug
Parameters
----------
number: int, optional
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
sort_by_time: bool, optional
Whether to sort the debug output by time.
input_dict : dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
# Step 1. Execute the graph
self._run_debug(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
cooldown_interval_ms=cooldown_interval_ms,
repeats_to_cooldown=repeats_to_cooldown,
)
# Step 2. Dump the output tensors to the dump folder
self.debug_datum.dump_output_tensor()
# Step 3. Dump the Chrome trace to the dump folder
self.debug_datum.dump_chrome_trace()
# Step 4. Display the collected information
self.debug_datum.display_debug_result(sort_by_time)
def run_individual(
self,
number,
repeat=1,
min_repeat_ms=0,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
):
"""Run each operation in the graph and get the time per op for all ops.
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
Returns
-------
A 2-dimensional array where the dimensions are: the index of the operation and
the repeat of the measurement.
"""
res = self._run_individual(
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
)
results = []
offset = 0
format_size = "@q"
(nodes_count,) = struct.unpack_from(format_size, res, offset)
offset += struct.calcsize(format_size)
format_data = "@" + repeat * "d"
for _ in range(0, nodes_count):
ret = struct.unpack_from(format_data, res, offset)
offset += struct.calcsize(format_data)
results.append([*ret])
return results
def run_individual_node(
self,
index,
number=10,
repeat=1,
min_repeat_ms=0,
limit_zero_time_iterations=100,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
):
"""Benchmark a single node in the serialized graph.
This does not do any data transfers and uses arrays already on the device.
Parameters
----------
index : int
The index of the node, see `self.debug_datum.get_graph_nodes`
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int, optional
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
limit_zero_time_iterations: int, optional
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
cooldown_interval_ms: int, optional
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: int, optional
The number of repeats before the cooldown is activated.
Returns
-------
A module BenchmarkResult
"""
# Results are returned as serialized strings which we deserialize
res = self._run_individual_node(
index,
number,
repeat,
min_repeat_ms,
limit_zero_time_iterations,
cooldown_interval_ms,
repeats_to_cooldown,
)
fmt = "@" + ("d" * repeat)
results = struct.unpack(fmt, res)
return BenchmarkResult(list(results))
def profile(self, collectors=None, **input_dict):
"""Run forward execution of the graph and collect overall and per-op
performance metrics.
Parameters
----------
collectors : Optional[Sequence[MetricCollector]]
Extra metrics to collect. If profiling over RPC, collectors must be `None`.
input_dict : dict of str to NDArray
List of input values to be feed to
Return
------
timing_results : str
Per-operator and whole graph timing results in a table format.
"""
if input_dict:
self.set_input(**input_dict)
if self.module.type_key == "rpc":
# We cannot serialize MetricCollectors over RPC
assert collectors is None, "Profiling with collectors is not supported over RPC"
return Report.from_json(self._profile_rpc())
return self._profile(collectors)
def exit(self):
"""Exits the dump folder and all its contents"""
self._remove_dump_root()
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/debugger/debug_result.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph debug results dumping class."""
import collections
import json
import os
import numpy as np
import tvm
GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json"
CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json"
ChromeTraceEvent = collections.namedtuple("ChromeTraceEvent", ["ts", "tid", "pid", "name", "ph"])
class DebugResult(object):
"""Graph debug data module.
Data dump module manage all the debug data formatting.
Output data and input graphs are formatted and dumped to file.
Frontend read these data and graph for visualization.
Parameters
----------
graph_json : str
The graph to be deployed in json format output by graph compiler. Each operator (tvm_op)
in the graph will have a one to one mapping with the symbol in libmod which is used
to construct a "PackedFunc" .
dump_path : str
Output data path is read/provided from frontend
"""
def __init__(self, graph_json, dump_path):
self._dump_path = dump_path
self._output_tensor_list = []
self._time_list = []
json_obj = self._parse_graph(graph_json)
# dump the json information
self._dump_graph_json(json_obj)
def _parse_graph(self, graph_json):
"""Parse and extract the JSON graph and update the nodes, shapes and dltype.
Parameters
----------
graph_json : str or graph class
The graph to be deployed in json format output by JSON graph.
"""
json_obj = json.loads(graph_json)
self._nodes_list = json_obj["nodes"]
self._shapes_list = json_obj["attrs"]["shape"]
self._dtype_list = json_obj["attrs"]["dltype"]
self._update_graph_json()
return json_obj
def _update_graph_json(self):
"""update the nodes_list with name, shape and data type,
for temporarily storing the output.
"""
nodes_len = len(self._nodes_list)
for i in range(nodes_len):
node = self._nodes_list[i]
input_list = []
for input_node in node["inputs"]:
input_list.append(self._nodes_list[input_node[0]]["name"])
node["inputs"] = input_list
dtype = str("type: " + self._dtype_list[1][i])
if "attrs" not in node:
node["attrs"] = {}
node["op"] = "param"
else:
node["op"] = node["attrs"]["func_name"]
node["attrs"].update({"T": dtype})
node["shape"] = self._shapes_list[1][i]
def _cleanup_tensors(self):
"""Remove the tensor dump file (graph wont be removed)"""
for filename in os.listdir(self._dump_path):
if os.path.isfile(filename) and not filename.endswith(".json"):
os.remove(filename)
def get_graph_nodes(self):
"""Return the nodes list"""
return self._nodes_list
def get_graph_node_shapes(self):
"""Return the nodes shapes list"""
return self._shapes_list
def get_graph_node_output_num(self, node):
"""Return the number of outputs of a node"""
return 1 if node["op"] == "param" else int(node["attrs"]["num_outputs"])
def get_graph_node_dtypes(self):
"""Return the nodes dtype list"""
return self._dtype_list
def get_output_tensors(self):
"""Get the output tensors of each operation in numpy format"""
eid = 0
output_tensors = {}
for i, node in enumerate(self._nodes_list):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
# the node name is not unique, so we need a consistent
# indexing based on the list ordering in the nodes
key = f"{node['name']}____topo-index:{i}____output-num:{j}"
output_tensors[key] = self._output_tensor_list[eid]
eid += 1
return output_tensors
def update_output_tensors(self, tensors):
"""Update output tensors list
Parameters
----------
tensors : list[NDArray]
"""
if not isinstance(tensors, list):
AttributeError("tensors with incorrect type.")
for output_array in tensors:
self._output_tensor_list.append(output_array)
def dump_output_tensor(self):
"""Dump the outputs to a temporary folder, the tensors are in numpy format"""
# cleanup existing tensors before dumping
self._cleanup_tensors()
output_tensors = self.get_output_tensors()
with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f:
param_f.write(save_tensors(output_tensors))
def dump_chrome_trace(self):
"""Dump the trace to the Chrome trace.json format."""
def s_to_us(t):
return t * 10**6
starting_times = np.zeros(len(self._time_list) + 1)
starting_times[1:] = np.cumsum([np.mean(times) for times in self._time_list])
def node_to_events(node, times, starting_time):
return [
ChromeTraceEvent(
ts=s_to_us(starting_time),
tid=1,
pid=1,
ph="B",
name=node["name"],
),
ChromeTraceEvent(
# Use start + duration instead of end to ensure precise timings.
ts=s_to_us(np.mean(times) + starting_time),
tid=1,
pid=1,
ph="E",
name=node["name"],
),
]
events = [
e
for (node, times, starting_time) in zip(
self._nodes_list, self._time_list, starting_times
)
for e in node_to_events(node, times, starting_time)
]
result = dict(displayTimeUnit="ns", traceEvents=[e._asdict() for e in events])
with open(os.path.join(self._dump_path, CHROME_TRACE_FILE_NAME), "w") as trace_f:
json.dump(result, trace_f)
def _dump_graph_json(self, graph):
"""Dump json formatted graph.
Parameters
----------
graph : json format
json formatted JSON graph contain list of each node's
name, shape and type.
"""
graph_dump_file_name = GRAPH_DUMP_FILE_NAME
with open(os.path.join(self._dump_path, graph_dump_file_name), "w") as outfile:
json.dump(graph, outfile, indent=4, sort_keys=False)
def get_debug_result(self, sort_by_time=True):
"""Return the debugger result"""
header = [
"Node Name",
"Ops",
"Time(us)",
"Time(%)",
"Shape",
"Inputs",
"Outputs",
"Measurements(us)",
]
lines = [
"---------",
"---",
"--------",
"-------",
"-----",
"------",
"-------",
"----------------",
]
eid = 0
data = []
total_time = sum([np.mean(time) for time in self._time_list])
for node, time in zip(self._nodes_list, self._time_list):
time_mean = np.mean(time)
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
op = node["op"]
if node["op"] == "param":
eid += 1
continue
name = node["name"]
shape = str(self._output_tensor_list[eid].shape)
time_us = round(time_mean * 1e6, 3)
time_percent = round(((time_mean / total_time) * 100), 3)
inputs = str(node["attrs"]["num_inputs"])
outputs = str(node["attrs"]["num_outputs"])
measurements = str([round(repeat_data * 1e6, 3) for repeat_data in time])
node_data = [name, op, time_us, time_percent, shape, inputs, outputs, measurements]
data.append(node_data)
eid += 1
if sort_by_time:
# Sort on the basis of execution time. Prints the most expensive ops in the start.
data = sorted(data, key=lambda x: x[2], reverse=True)
# Insert a row for total time at the end.
rounded_total_time_us = round(total_time * 1e6, 3)
data.append(["Total_time", "-", rounded_total_time_us, "-", "-", "-", "-", "-", "-"])
fmt = ""
for i, _ in enumerate(header):
max_len = len(header[i])
for j, _ in enumerate(data):
item_len = len(str(data[j][i]))
if item_len > max_len:
max_len = item_len
fmt = fmt + "{:<" + str(max_len + 2) + "}"
log = [fmt.format(*header)]
log.append(fmt.format(*lines))
for row in data:
log.append(fmt.format(*row))
return "\n".join(log)
def display_debug_result(self, sort_by_time=True):
"""Displays the debugger result"""
print(self.get_debug_result(sort_by_time))
def save_tensors(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
"""
_save_tensors = tvm.get_global_func("tvm.relay._save_param_dict")
return _save_tensors(params)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/debugger/debug_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Deprecated Python API for DebugExecutor."""
import warnings
from . import debug_executor
def create(*args, **kwargs):
warnings.warn(
"This function has been moved to tvm.contrib.graph_executor and will be removed "
"in the next TVM release"
)
return debug_executor.create(*args, **kwargs)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping functions to bridge frameworks with DLPack support to TVM"""
from tvm.runtime import ndarray
def convert_func(tvm_func, tensor_type, to_dlpack_func):
"""Convert a tvm function into one that accepts a tensor from another
framework, provided the other framework supports DLPACK
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
tensor_type: Type
Type of the tensors of the target framework
to_dlpack_func: Function
Function to convert the source tensors to DLPACK
"""
assert callable(tvm_func)
def _wrapper(*args):
args = tuple(
ndarray.from_dlpack(to_dlpack_func(arg)) if isinstance(arg, tensor_type) else arg
for arg in args
)
return tvm_func(*args)
return _wrapper
def to_pytorch_func(tvm_func):
"""Convert a tvm function into one that accepts PyTorch tensors
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
Returns
-------
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
# pylint: disable=import-outside-toplevel
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/dnnl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to BLAS libraries."""
import tvm
from tvm import te
from ..topi.nn.utils import get_pad_tuple
def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.dnnl.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs,
)
def dnnl_conv2d(
src,
weights,
stride,
padding,
dilation,
groups,
channel_last=False,
out_dtype="float32",
**kwargs,
):
"""Convolution operator in NCHW layout.
Parameters
----------
src : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
weights : tvm.te.Tensor
4-D with shape [num_filter, in_channel, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
dilation: int or a list/tuple of two ints
dilation size, or [dilation_height, dilation_width]
groups: str
input data layout: NCHW or NHWC
channel_last: bool
chose if input/output data format is in channel_last format(NHWC) or
in plain format(NCHW)
out_dtype: str
output datatype: now only support float32
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
pre_cast = src.dtype == "float32"
post_cast = out_dtype == "float32"
if channel_last:
batch, in_height, in_width, _ = src.shape
kernel_h, kernel_w, _, num_filter = weights.shape
else:
batch, _, in_height, in_width = src.shape
num_filter, _, kernel_h, kernel_w = weights.shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel = num_filter
out_height = (in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1
out_width = (in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1
if channel_last:
out_shape = (batch, out_height, out_width, out_channel)
else:
out_shape = (batch, out_channel, out_height, out_width)
return te.extern(
out_shape,
[src, weights],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.dnnl.conv2d",
ins[0],
ins[1],
outs[0],
pad_top,
pad_down,
pad_left,
pad_right,
stride[0],
stride[1],
groups,
channel_last,
pre_cast,
post_cast,
),
name="C",
dtype=out_dtype,
**kwargs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/download.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper utility for downloading"""
import logging
import os
from pathlib import Path
import shutil
import tempfile
import time
LOG = logging.getLogger("download")
def download(url, path, overwrite=False, size_compare=False, retries=3):
"""Downloads the file from the internet.
Set the input options correctly to overwrite or do the size comparison
Parameters
----------
url : str
Download url.
path : str
Local file path to save downloaded file.
overwrite : bool, optional
Whether to overwrite existing file, defaults to False.
size_compare : bool, optional
Whether to do size compare to check downloaded file, defaults
to False
retries: int, optional
Number of time to retry download, defaults to 3.
"""
# pylint: disable=import-outside-toplevel
import urllib.request as urllib2
path = Path(path).resolve()
if path.exists() and path.is_file() and not overwrite:
if size_compare:
import requests
file_size = path.stat().st_size
res_head = requests.head(url)
res_get = requests.get(url, stream=True)
if "Content-Length" not in res_head.headers:
res_get = urllib2.urlopen(url)
url_file_size = int(res_get.headers["Content-Length"])
if url_file_size != file_size:
LOG.warning("Existing file %s has incorrect size, downloading fresh copy", path)
download(url, path, overwrite=True, size_compare=False, retries=retries)
return
LOG.info("File %s exists, skipping.", path)
return
LOG.info("Downloading from url %s to %s", url, path)
# Stateful start time
start_time = time.time()
dirpath = path.parent
dirpath.mkdir(parents=True, exist_ok=True)
def _download_progress(count, block_size, total_size):
# pylint: disable=unused-argument
"""Show the download progress."""
if count == 0:
return
duration = time.time() - start_time
progress_bytes = int(count * block_size)
progress_megabytes = progress_bytes / (1024.0 * 1024)
speed_kbps = int(progress_bytes / (1024 * duration))
percent = min(int(count * block_size * 100 / total_size), 100)
# Temporarily suppress newlines on the output stream.
prev_terminator = logging.StreamHandler.terminator
logging.StreamHandler.terminator = ""
LOG.debug(
"\r...%d%%, %.2f MB, %d KB/s, %d seconds passed",
percent,
progress_megabytes,
speed_kbps,
duration,
)
logging.StreamHandler.terminator = prev_terminator
with tempfile.TemporaryDirectory() as tempdir:
tempdir = Path(tempdir)
download_loc = tempdir.joinpath(path.name)
for i_retry in range(retries):
# pylint: disable=broad-except
try:
urllib2.urlretrieve(url, download_loc, reporthook=_download_progress)
LOG.debug("")
try:
download_loc.rename(path)
except OSError:
# Prefer a move, but if the tempdir and final
# location are in different drives, fall back to a
# copy.
shutil.copy2(download_loc, path)
return
except Exception as err:
if i_retry == retries - 1:
raise err
LOG.warning(
"%s\nDownload attempt %d/%d failed, retrying.", repr(err), i_retry, retries
)
if "TEST_DATA_ROOT_PATH" in os.environ:
TEST_DATA_ROOT_PATH = Path(os.environ.get("TEST_DATA_ROOT_PATH"))
else:
TEST_DATA_ROOT_PATH = Path(Path("~").expanduser(), ".tvm_test_data")
TEST_DATA_ROOT_PATH.mkdir(parents=True, exist_ok=True)
def download_testdata(url, relpath, module=None, overwrite=False):
"""Downloads the test data from the internet.
Parameters
----------
url : str
Download url.
relpath : str
Relative file path.
module : Union[str, list, tuple], optional
Subdirectory paths under test data folder.
overwrite : bool, defaults to False
If True, will download a fresh copy of the file regardless of
the cache. If False, will only download the file if a cached
version is missing.
Returns
-------
abspath : str
Absolute file path of downloaded file
"""
global TEST_DATA_ROOT_PATH
if module is None:
module_path = ""
elif isinstance(module, str):
module_path = module
elif isinstance(module, (list, tuple)):
module_path = Path(*module)
else:
raise ValueError("Unsupported module: " + module)
abspath = Path(TEST_DATA_ROOT_PATH, module_path, relpath)
download(url, abspath, overwrite=overwrite, size_compare=False)
return str(abspath)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/emcc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke emscripten compilers in the system."""
# pylint: disable=invalid-name
import subprocess
from tvm._ffi.base import py_str
from tvm._ffi.libinfo import find_lib_path
def create_tvmjs_wasm(output, objects, options=None, cc="emcc"):
"""Create wasm that is supposed to run with the tvmjs.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : str
The additional options.
cc : str, optional
The compile string.
"""
cmd = [cc]
cmd += ["-O3"]
cmd += ["-std=c++17"]
cmd += ["--no-entry"]
cmd += ["-s", "ERROR_ON_UNDEFINED_SYMBOLS=0"]
cmd += ["-s", "STANDALONE_WASM=1"]
cmd += ["-s", "ALLOW_MEMORY_GROWTH=1"]
objects = [objects] if isinstance(objects, str) else objects
with_runtime = False
for obj in objects:
if obj.find("wasm_runtime.bc") != -1:
with_runtime = True
if not with_runtime:
objects += [find_lib_path("wasm_runtime.bc")[0]]
objects += [find_lib_path("tvmjs_support.bc")[0]]
objects += [find_lib_path("webgpu_runtime.bc")[0]]
cmd += ["-o", output]
cmd += objects
if options:
cmd += options
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
create_tvmjs_wasm.object_format = "bc"
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for Arm(R) Ethos(TM)-U NPU contrib functionality"""
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The NPU cascader.
This component performs inter-operator scheduling to optimize
for both performance and memory usage on Arm(R) Ethos(TM)-U NPUs.
"""
from .stripe_config import StripeConfig
from .block_config import BlockConfig
from .propagator import Propagator
from .graph import (
PerformanceInfo,
Tensor,
Part,
TESubgraph,
CascaderGraph,
BufferMode,
register_matcher,
create_cascader_graph,
)
from .parts import InlinePart, EthosuPart
from .device_config import EthosuDeviceConfig
from .tensor_config import TensorConfigState, MemoryRegion, TensorConfig
from .plan import Plan
from .scheduler import apply_proposal, cascade, extract_memory_info
from .logging import Logging
from .cascader_options import CascaderOptions
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for the NPU cascader."""
import tvm._ffi
tvm._ffi._init_api("contrib.ethosu.cascader", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/block_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Block config to hold an output block shape and a corresponding input block shape"""
from typing import List
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("contrib.ethosu.cascader.BlockConfig")
class BlockConfig(Object):
"""BlockConfig class"""
def __init__(
self,
input_shape: List[int],
output_shape: List[int],
compute_cycles: int,
output_cycles: int,
):
self.__init_handle_by_constructor__(
_ffi_api.BlockConfig, input_shape, output_shape, compute_cycles, output_cycles
)
@property
def input_shape(self) -> List[int]:
return list(self._input_shape)
@property
def output_shape(self) -> List[int]:
return list(self._output_shape)
@property
def compute_cycles(self) -> int:
return int(self._compute_cycles)
@property
def output_cycles(self) -> int:
return int(self._output_cycles)
def __ge__(self, other: "BlockConfig"):
if len(self.output_shape) != len(other.output_shape):
return False
return all(a >= b for a, b in zip(self.output_shape, other.output_shape))
def __lt__(self, other: "BlockConfig"):
if len(self.output_shape) != len(other.output_shape):
return False
return other >= self
def __repr__(self) -> str:
return f"BlockConfig(output_shape={self.output_shape})"
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/cascader_options.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Object to hold options for the NPU cascader"""
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
from .tensor_config import MemoryRegion
@tvm._ffi.register_object("contrib.ethosu.cascader.CascaderOptions")
class CascaderOptions(Object):
"""
A class to hold configuration options for the cascader.
Attributes
----------
cascade_region : MemoryRegion
The MemoryRegion to place cascading buffers into.
max_proposals : int
The maximum number of Proposals to generate.
stripe_factors : int
How many striping factors to try per axis.
max_plan_size : int
The maximum number of Parts in a Plan.
max_open_plans : int
The maximum number of open Plans to keep after culling.
max_closed_plans : int
The maxmum number of closed Plans to keep after culling.
always_copy_size : int
The maximum size of a Tensor that will always be copied into the cascade region.
disable_pareto_plans : bool
Disable pareto culling for Plans.
disable_pareto_proposals : bool
Disable pareto culling for Proposals.
enable_multi_dimensional_striping : bool
Enable striping in multiple dimensions simultaneously.
disable_block_culling : bool
Disable culling of block configs.
enable_striping : bool
A boolean option to enable striping
"""
def __init__(
self,
cascade_region: MemoryRegion,
max_proposals: int,
stripe_factors: int,
max_plan_size: int,
max_open_plans: int,
max_closed_plans: int,
always_copy_size: int,
disable_pareto_plans: bool = False,
disable_pareto_proposals: bool = False,
enable_multi_dimensional_striping: bool = False,
disable_block_culling: bool = True,
enable_striping: bool = False,
):
self.__init_handle_by_constructor__(
_ffi_api.CascaderOptions,
cascade_region,
max_proposals,
stripe_factors,
max_plan_size,
max_open_plans,
max_closed_plans,
always_copy_size,
disable_pareto_plans,
disable_pareto_proposals,
enable_multi_dimensional_striping,
disable_block_culling,
enable_striping,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/device_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=too-many-nested-blocks
"""Device config class to hold information about the target hardware"""
from typing import Tuple, List, Dict, Optional
from functools import reduce
import math
import numpy as np
import tvm
from . import BlockConfig
from . import StripeConfig
from . import Propagator
def _round_up(a: int, b: int) -> int:
"""Round up to a multiple of b"""
return ((a + b - 1) // b) * b
def _round_up_div(a: int, b: int) -> int:
"""Divide by b and round up to a multiple of b"""
return (a + b - 1) // b
class _Shape:
"""Helper class for dealing with Tensor shapes of different layouts"""
def __init__(self, shape: List[int], layout="NHWC"):
if layout == "NHCWB16":
self.height = int(shape[1])
self.width = int(shape[3])
self.depth = int(shape[2]) * int(shape[4])
else:
# identity layout is NHWC but the shape is not always 4
length = len(shape)
if length == 4:
self.height = int(shape[1])
self.width = int(shape[2])
self.depth = int(shape[3])
elif length == 3:
self.height = int(shape[0])
self.width = int(shape[1])
self.depth = int(shape[2])
elif length == 2:
self.height = int(shape[0])
self.width = int(shape[1])
self.depth = 1
elif length == 1:
self.height = int(shape[0])
self.width = 1
self.depth = 1
def round_up(self, other: "_Shape"):
self.height = _round_up(self.height, other.height)
self.width = _round_up(self.width, other.width)
self.depth = _round_up(self.depth, other.depth)
def area(self) -> int:
return self.height * self.width
def as_list(self):
return [1, self.height, self.width, self.depth]
class EthosuDeviceConfig:
"""Arm(R) Ethos(TM)-U NPU config class"""
def __init__(self, device: str, disable_block_bulling: bool = False):
self._device = device
self._subkernel_limits = (8, 8)
self._output_cycles = (1, 2, 3, 4, 6)
self._split_depth = 16
self._max_block_shape = _Shape([1, 32, 64, 128])
self._bank_size_bytes = 1024
self._disable_block_culling = disable_block_bulling
if self._device == "ethos-u55-256":
self._micro_block = _Shape([1, 2, 2, 8])
self._input_micro_block = _Shape([1, 2, 2, 8])
self._delay_cycles = (2, 2)
self._activation_cycles = (0.25, 1)
self._output_units = 8
self._total_banks = 48
self._reserved_banks = 4
self._input_granularity = {1: 8, 2: 8, 4: 16}
self._accumulator_granularity = {4: 16, 5: 20}
self._lut_reserved = True
elif self._device == "ethos-u55-128":
self._micro_block = _Shape([1, 1, 2, 8])
self._input_micro_block = _Shape([1, 1, 2, 8])
self._delay_cycles = (2, 3)
self._activation_cycles = (0.5, 1)
self._output_units = 4
self._total_banks = 24
self._reserved_banks = 4
self._input_granularity = {1: 4, 2: 4, 4: 8}
self._accumulator_granularity = {4: 8, 5: 12}
self._lut_reserved = True
elif self._device == "ethos-u55-64":
self._micro_block = _Shape([1, 1, 1, 8])
self._input_micro_block = _Shape([1, 1, 1, 8])
self._delay_cycles = (2, 3)
self._activation_cycles = (1, 1)
self._output_units = 2
self._total_banks = 16
self._reserved_banks = 2
self._input_granularity = {1: 2, 2: 2, 4: 4}
self._accumulator_granularity = {4: 4, 5: 8}
self._lut_reserved = False
elif self._device == "ethos-u55-32":
self._micro_block = _Shape([1, 1, 1, 4])
self._input_micro_block = _Shape([1, 1, 1, 8])
self._delay_cycles = (3, 7)
self._activation_cycles = (1, 2)
self._output_units = 1
self._total_banks = 16
self._reserved_banks = 2
self._input_granularity = {1: 2, 2: 2, 4: 4}
self._accumulator_granularity = {4: 4, 5: 4}
self._lut_reserved = False
def _get_output_cycles(
self, op_type: str, op_str: str, ifm_dtype: str, ofm_dtype: str, activation: str
) -> float:
"""Estimate cycles per output element for an NPU operator
Parameters
----------
op_type : str
The NPU primitive operator
"ethosu_pooling"
op_str : str
The type of NPU operator.
"MAX"
ifm_dtype: str
Datatype of the Input Feature Map tensor (IFM)
ofm_dtype: str
Datatype of the Output Feature Map tensor (OFM)
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
float
The cycles per output element
"""
cycles = 0
bw_limit = 0
if op_type == "ethosu_pooling" and op_str == "MAX":
cycles = self._output_cycles[0]
elif op_type in ("ethosu_pooling", "ethosu_conv2d", "ethosu_depthwise_conv2d"):
cycles = self._output_cycles[1] if ifm_dtype == "int8" else self._output_cycles[2]
elif op_type == "ethosu_binary_elementwise":
# Binary Bandwidth Limitations
if ifm_dtype == "int8":
bw_limit = 0.125 if ofm_dtype == "int8" else 0.75
elif ifm_dtype == "int16":
bw_limit = 0.75 if ofm_dtype == "int16" else 1
else:
bw_limit = 1.5
if op_str in ("MIN", "MAX"):
cycles = self._output_cycles[1]
elif op_str == "MUL":
cycles = self._output_cycles[2]
if op_str in ("ADD", "SUB"):
if ofm_dtype == "int32":
cycles = (
self._output_cycles[2] if ifm_dtype == "int32" else self._output_cycles[3]
)
else:
cycles = self._output_cycles[4]
elif op_type == "ethosu_unary_elementwise":
# Unary Bandwidth Limitations
if ifm_dtype == "int16":
bw_limit = 0.25
elif ifm_dtype == "int32":
bw_limit = 1
if op_str == "CLZ":
cycles = self._output_cycles[1]
elif op_str in ("SHL", "SHR"):
cycles = self._output_cycles[2]
elif op_str in ("LRELU", "ABS"):
cycles = self._output_cycles[1]
if ifm_dtype == "int16":
bw_limit = 0.5
act_cycles = 0
if activation == "CLIP":
act_cycles = self._activation_cycles[0]
elif activation in ("LUT", "TANH", "SIGMOID"):
act_cycles = self._activation_cycles[1]
return max((cycles / self._output_units), act_cycles, bw_limit)
def _get_delay_cycles(self, op_type: str, ifm_dtype: str) -> int:
"""Get the number of delay cycles during a bubble
Parameters
----------
op_type : str
The NPU primitive operator
"ethosu_pooling"
op_str : str
The type of NPU operator.
"MAX"
ifm_dtype: str
Datatype of the Input Feature Map tensor (IFM)
Returns
----------
int
The amount of delay cycles
"""
if op_type in ("ethosu_conv2d", "ethosu_depthwise2d", "ethosu_pooling"):
if ifm_dtype == "int16":
return self._delay_cycles[1]
return self._delay_cycles[0]
return 0
def _get_weight_decoder_cycles(self, op_type: str) -> int:
"""Get cycle estimate for weight decoding
Parameters
----------
op_type: str
The NPU primitive operator
"ethosu_pooling"
Returns
----------
int
Estimated cycles for weight decoding
"""
if op_type in ("ethosu_conv2d", "ethosu_depthwise2d"):
return 32 * self._micro_block.depth // 8
return 0
def get_output_quantum(self, ofm_layout: str) -> Tuple[int]:
"""Get the atomic output volume
Parameters
----------
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
----------
Tuple[int]
The atomic output volume formatted to the ofm_layout parameter
"""
if ofm_layout == "NHCWB16":
return [
1,
self._micro_block.height,
1,
self._micro_block.width,
self._micro_block.depth,
]
return self._micro_block.as_list()
def _align(self, x: int, n: int) -> int:
return int(math.ceil(x / n) * n)
def _get_input_size(
self, output_size: int, kernel_stride: int, border: int, upscaling_factor: int
) -> int:
return int(math.ceil(((output_size - 1) * kernel_stride + border)) / upscaling_factor)
def _get_dilated_kernel_size(self, kernel_size: int, dilation: int) -> int:
return (kernel_size - 1) * dilation + 1
def _get_input_block(
self,
output_block: _Shape,
input_shape: _Shape,
dtype: str,
op_type: str,
partkernel: bool,
stride_h: int,
stride_w: int,
dilated_kernel_h: int,
dilated_kernel_w: int,
upscaling_factor: int,
) -> _Shape:
height = self._get_input_size(
output_block.height,
stride_h,
min(dilated_kernel_h, self._subkernel_limits[0]),
upscaling_factor,
)
width = self._get_input_size(
output_block.width,
stride_w,
min(dilated_kernel_w, self._subkernel_limits[1]),
upscaling_factor,
)
if op_type == "ethosu_conv2d":
if dtype == "int8":
if partkernel:
depth = self._align(min(32, input_shape.depth), 8)
else:
depth = self._align(min(16, input_shape.depth), 8)
elif dtype == "int16":
depth = self._align(min(16, input_shape.depth), 4)
else:
depth = self._align(min(8, input_shape.depth), 2)
else:
depth = output_block.depth
return _Shape(
[
1,
self._align(height, self._micro_block.height),
self._align(width, self._micro_block.width),
depth,
]
)
def get_kernel_steps(
self,
op_type: str,
dilated_kernel_h: int,
dilated_kernel_w: int,
ifm_dtype: str,
partkernel: bool = False,
) -> List[int]:
"""Calculate the total number of subkernels and their sizes
Parameters
----------
op_type : str
The NPU primitive operator
"ethosu_pooling"
dilated_kernel_h: int
Height of dilated kernel
dilated_kernel_w: int
Width of dilated kernel
ifm_dtype: str
Datatype of the Input Feature Map tensor (IFM)
partkernel: bool
Flag showing whether part-kernel first traversal is used
Returns
----------
List[int]
List where each entry contains the amount of elements in one of the subkernels
"""
if op_type == "ethosu_binary_elementwise":
return [1]
subkernels = self._get_subkernels(dilated_kernel_h, dilated_kernel_w)
# Determine the number of kernel steps per subkernel
kernel_steps = []
for y, x in subkernels:
subkernel_elements = x * y
if op_type == "ethosu_conv2d" and partkernel:
# Part-kernel-first traversal conv2d
divisor = 4 if ifm_dtype == "int8" else 2
kernel_steps.append(int(_round_up_div(subkernel_elements, divisor)))
elif op_type == "ethosu_depthwise_conv2d":
kernel_steps.append(int(_round_up_div(subkernel_elements, 4)))
else:
# Depth-first traversal conv2d or pooling
kernel_steps.append(int(subkernel_elements))
return kernel_steps
def _get_subkernels(self, dilated_kernel_h: int, dilated_kernel_w: int):
num_subkernels_y = _round_up_div(dilated_kernel_h, self._subkernel_limits[0])
num_subkernels_x = _round_up_div(dilated_kernel_w, self._subkernel_limits[1])
subkernels_y = [
min((dilated_kernel_h - i * self._subkernel_limits[0]), self._subkernel_limits[0])
for i in range(num_subkernels_y)
]
subkernels_x = [
min((dilated_kernel_w - i * self._subkernel_limits[1]), self._subkernel_limits[1])
for i in range(num_subkernels_x)
]
subkernels = []
for y in subkernels_y:
for x in subkernels_x:
subkernels.append((y, x))
return subkernels
def _get_accumulator_width(self, op_type: str, ifm_dtype: str):
if ifm_dtype == "int16" and op_type != "ethosu_pooling":
return 5
return 4
def is_partkernel(
self, op_type: str, ifm_channels: int, ifm_dtype: str, kernel_elements: int
) -> bool:
"""Determine which block traversal strategy has better DPU utilization
Parameters
----------
op_type: str
The NPU primitive operator
"ethosu_pooling"
ifm_channels: int
Number of input channels
ifm_dtype: str
Datatype of the Input Feature Map tensor (IFM)
kernel_elements: int
Total number of elements in the kernel
Returns
----------
bool
True if partkernel first has best DPU utilization
"""
if op_type != "ethosu_conv2d":
return False
depth_first_utilization = ifm_channels / _round_up(
ifm_channels, 32 if ifm_dtype == "int8" else 16
)
part_kernel_first_utilization = (ifm_channels / _round_up(ifm_channels, 8)) * (
kernel_elements / _round_up(kernel_elements, 4 if ifm_dtype == "int8" else 2)
)
return part_kernel_first_utilization > depth_first_utilization or ifm_channels <= 8
def _get_input_banks(self, input_block_shape, input_bytewidth):
input_bytes = input_block_shape.area() * self._align(
input_block_shape.depth * input_bytewidth, 8
)
input_banks = _round_up_div(input_bytes, self._bank_size_bytes) * 2
input_banks = _round_up(input_banks, self._input_granularity[input_bytewidth])
return input_banks
def _get_accumulator_banks(self, output_block_shape, acc_bytewidth):
acc_depth = _round_up(output_block_shape.depth, 8)
acc_bytes = output_block_shape.area() * self._align(acc_depth, 8) * acc_bytewidth
acc_banks = _round_up_div(acc_bytes, self._bank_size_bytes) * 2
acc_banks = _round_up(acc_banks, self._accumulator_granularity[acc_bytewidth])
return acc_banks
@staticmethod
def _create_layout_block(nhwc_block_config, layout):
"""A helper function to convert to brick layout"""
if layout == "NHCWB16":
return [
nhwc_block_config[0],
nhwc_block_config[1],
1 + ((nhwc_block_config[3] - 1) // 16),
nhwc_block_config[2],
16,
]
# else it could only be NHWC
return nhwc_block_config
def get_elementwise_block_config(
self,
ifm_propagator: Propagator,
ifm2_propagator: Optional[Propagator],
op_attrs: Dict,
ofm_shape: List[int],
output_layout: str,
input_layout: str,
input2_layout: Optional[str],
ifm_dtype: str,
ofm_dtype: str,
) -> List[BlockConfig]:
"""Get a suitable block config for an elementwise operator
Parameters
----------
ifm_propagator: Propagator,
The propagator containing the data dependencies between input and output
ifm2_propagator: Propagator,
The propagator containing the data dependencies between input2 and output
op_attrs: Dict,
Dictionary containing operator attributes
ofm_shape: List[int],
Shape of the output tensor
output_layout: str,
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
input_layout: str,
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
input2_layout: str,
The layout of the Input2 Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm_dtype: str,
Datatype of the Input Feature Map tensor (IFM)
ofm_dtype: str,
Datatype of the Output Feature Map tensor (OFM)
Returns
----------
List[BlockConfig]
List containing a single suitable block config
"""
block_config = []
output_shape = [int(a) for a in ofm_shape]
op_type = op_attrs.get("op")
op_str = op_attrs.get("op_str")
activation = op_attrs.get("activation", "NONE")
input_bytewidth = 1 if ifm_dtype == "int8" else 2 if ifm_dtype == "int16" else 4
banks_available = self._total_banks - self._reserved_banks
if activation == "LUT" and not self._lut_reserved:
banks_available -= 2
# Handle user-forced block config
options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None)
if options and options.dev_force_block_config:
block_config = [int(v) for v in options.dev_force_block_config.split("x")]
assert len(block_config) == 3
if output_layout == "NHWC":
block_shape = [output_shape[0], block_config[0], block_config[1], block_config[2]]
else:
block_shape = [
output_shape[0],
block_config[0],
1 + ((block_config[2] - 1) // 16),
block_config[1],
16,
]
output_cycles = self._get_output_cycles(
op_type, op_str, ifm_dtype, ofm_dtype, activation
)
output_cycles *= reduce(lambda a, b: a * b, block_shape, 1)
output_cycles = int(math.ceil(output_cycles))
return [BlockConfig(block_shape, block_shape, 0, output_cycles)]
# Split the block in half until it fits into SHRAM
max_height, max_width, max_depth = self._max_block_shape.as_list()[1:]
if output_layout == "NHCWB16":
output_height = output_shape[1]
output_width = output_shape[3]
output_channels = output_shape[2] * 16
else:
output_height = output_shape[1]
output_width = output_shape[2]
output_channels = output_shape[3]
output_nhwc_block = [
1,
_round_up(min(output_height, max_height), self._micro_block.height),
_round_up(min(output_width, max_width), self._micro_block.width),
_round_up(min(output_channels, max_depth), self._micro_block.depth),
]
output_block = self._create_layout_block(output_nhwc_block, output_layout)
split_order = (a for a in [1, 2, 3])
split_axis = next(split_order)
offset = [0] * len(output_block)
stripes = [1] * len(output_block)
order = [1, 2, 4, 3, 0] if output_layout == "NHCWB16" else [1, 2, 3, 4]
while True:
# Create stripe config for output block
output_stripe_config = StripeConfig(
output_block, output_block, output_block, order, stripes, offset
)
# Propagate the output to obtain the two input blocks
input_block = _Shape(ifm_propagator.propagate(output_stripe_config).shape, input_layout)
if ifm2_propagator:
input2_block = _Shape(
ifm2_propagator.propagate(output_stripe_config).shape, input2_layout
)
else:
# Unary elementwise
input2_block = input_block
input_block.round_up(self._input_micro_block)
input2_block.round_up(self._input_micro_block)
# Banks required for input block
input_banks = self._get_input_banks(input_block, input_bytewidth)
# Banks required for input2 block
input2_banks = self._get_input_banks(input2_block, input_bytewidth)
# Check whether or not both IFMs fit into SHRAM
if (input_banks + input2_banks) <= banks_available:
output_cycles = self._get_output_cycles(
op_type, op_str, ifm_dtype, ofm_dtype, activation
)
output_cycles *= reduce(lambda a, b: a * b, output_block, 1)
output_cycles = int(math.ceil(output_cycles))
block_config.append(
BlockConfig(input_block.as_list(), output_block, 0, output_cycles)
)
break
if output_nhwc_block[split_axis] == self._micro_block.as_list()[split_axis]:
split_axis = next(split_order)
output_nhwc_block[split_axis] = _round_up(
_round_up_div(output_nhwc_block[split_axis], 2),
self._micro_block.as_list()[split_axis],
)
output_block = self._create_layout_block(output_nhwc_block, output_layout)
return block_config
def _get_subkernel_propagator(
self, op_attrs, ifm_propagator, input_layout, output_layout, depth
):
op_type = op_attrs.get("op")
stride_h = int(op_attrs.get("stride_h", 1))
stride_w = int(op_attrs.get("stride_w", 1))
transform = ifm_propagator.transform
if op_type != "ethosu_identity":
if input_layout == "NHCWB16":
transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h)
transform[3][-1] = min(transform[3][-1], self._subkernel_limits[1] - stride_w)
else:
transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h)
transform[2][-1] = min(transform[2][-1], self._subkernel_limits[1] - stride_w)
if op_type in ("ethosu_pooling", "ethosu_depthwise_conv2d"):
if output_layout == "NHCWB16" and input_layout == "NHWC":
transform[3][-1] = depth
elif output_layout == "NHCWB16" and input_layout == "NHCWB16":
transform[2][-1] = 1 + ((depth - 1) // 16)
return Propagator(transform, ifm_propagator.offset)
def get_valid_block_configs(
self,
ifm_propagator: Propagator,
op_attrs: Dict,
ofm_shape: List[int],
ofm_channels: int,
ifm_channels: int,
output_layout: str,
input_layout: str,
ifm_dtype: str,
ofm_dtype: str,
kernel_h: int = 1,
kernel_w: int = 1,
) -> List[BlockConfig]:
"""Get all of the valid block configs
Parameters
----------
ifm_propagator: Propagator,
The propagator containing the data dependencies between input and output
op_attrs: Dict,
Dictionary containing operator attributes
ofm_shape: List[int],
Shape of the output tensor
ofm_channels: int,
Number of output channels
ifm_channels: int,
Number of input channels
output_layout: str,
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
input_layout: str,
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm_dtype: str,
Datatype of the Input Feature Map tensor (IFM)
ofm_dtype: str,
Datatype of the Output Feature Map tensor (OFM)
kernel_h: int,
Height of kernel
kernel_h: int
Width of kernel
Returns
----------
List[BlockConfig]
List containing all of the valid block configs
"""
valid_block_configs = []
op_type = op_attrs.get("op")
op_str = op_attrs.get("op_str")
activation = op_attrs.get("activation", "NONE")
upscaling_factor = 1 if op_attrs.get("upscale", "NONE") == "NONE" else 2
if output_layout == "NHCWB16":
output_shape = _Shape([1, ofm_shape[1], ofm_shape[3], ofm_channels])
else:
output_shape = _Shape(ofm_shape)
# Define search space
max_height = min(output_shape.height, self._max_block_shape.height)
min_height = max(self._micro_block.height, upscaling_factor)
max_width = min(output_shape.width, self._max_block_shape.width)
min_width = max(self._micro_block.width, upscaling_factor)
max_depth = min(ofm_channels, self._max_block_shape.depth)
min_depth = max(self._micro_block.depth, upscaling_factor)
heights = range(min_height, max_height + min_height, min_height)
widths = range(min_width, max_width + min_width, min_width)
depths = range(min_depth, max_depth + min_depth, min_depth)
# Handle user-forced block config
options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None)
forced = False
if options and options.dev_force_block_config:
block_config = [int(v) for v in options.dev_force_block_config.split("x")]
assert len(block_config) == 3
heights = [block_config[0]]
widths = [block_config[1]]
depths = [block_config[2]]
forced = True
input_bytewidth = 1 if ifm_dtype == "int8" else 2
acc_bytewidth = self._get_accumulator_width(op_type, ifm_dtype)
banks_available = self._total_banks - self._reserved_banks
if activation == "LUT" and not self._lut_reserved:
banks_available -= 2
# Input block depth has additional limitations for operators that require full input depth
input_block_depth = 0
partkernel = self.is_partkernel(op_type, ifm_channels, ifm_dtype, kernel_h * kernel_w)
if op_type == "ethosu_conv2d":
if partkernel:
input_block_depth = min(ifm_channels, 16)
else:
input_block_depth = min(ifm_channels, 32)
for depth in reversed(depths):
if (depth < output_shape.depth) and (depth % self._split_depth != 0) and not forced:
# Block depth has to be less than full depth or a multiple of the split depth
continue
subkernel_propagator = self._get_subkernel_propagator(
op_attrs, ifm_propagator, input_layout, output_layout, depth
)
for width in reversed(widths):
for height in reversed(heights):
if output_layout == "NHCWB16":
output_block = (
1,
height,
1 + ((depth - 1) // 16),
width,
16,
)
order = [1, 2, 4, 3, 0]
else:
output_block = (1, height, width, depth)
order = [1, 2, 3, 4]
offset = [0] * len(output_block)
stripes = [1] * len(output_block)
block_stripe_config = StripeConfig(
output_block,
output_block,
output_block,
order,
stripes,
offset,
)
# Propagate output block
input_block = subkernel_propagator.propagate(block_stripe_config)
input_block_shape = _Shape(input_block.shape, input_layout)
input_block_shape.round_up(self._input_micro_block)
output_block_shape = _Shape(output_block, output_layout)
if op_type == "ethosu_conv2d":
input_block_shape.depth = input_block_depth
# Banks required for input block
input_banks = self._get_input_banks(input_block_shape, input_bytewidth)
# Banks required for accumulation
acc_banks = self._get_accumulator_banks(output_block_shape, acc_bytewidth)
if (input_banks + acc_banks) <= banks_available:
output_cycles = self._get_output_cycles(
op_type, op_str, ifm_dtype, ofm_dtype, activation
)
output_cycles *= np.prod(output_block).tolist()
output_cycles = int(math.ceil(output_cycles))
compute_cycles = self._estimate_compute_cycles_per_block(
op_type,
output_block_shape,
input_block_shape,
kernel_h,
kernel_w,
ifm_channels,
"int8",
partkernel,
)
block_config = BlockConfig(
input_block_shape.as_list(), output_block, compute_cycles, output_cycles
)
if self._disable_block_culling:
# Block culling disabled - add all block configs that fit
valid_block_configs.append(block_config)
else:
# Add block config only if it's not dominated by an existing block.
# A block config is dominated by another if its output_shape is greater
# or equal in every dimension and strictly greater in at least one
# dimension.
dominated = False
for valid_block in valid_block_configs:
if block_config < valid_block:
dominated = True
break
if not dominated:
valid_block_configs.append(block_config)
# Every consecutive block in the innermost loop will be dominated by
# this one so break
break
return valid_block_configs
def _estimate_compute_cycles_per_block(
self,
op_type: str,
block_shape: _Shape,
input_block_shape: _Shape,
kernel_h: int,
kernel_w: int,
input_channels: int,
ifm_dtype: str,
partkernel: bool = False,
) -> Tuple[int, int]:
# Calculate the amount of micro blocks per block, per axis
num_quantum_x = _round_up_div(block_shape.width, self._micro_block.width)
num_quantum_y = _round_up_div(block_shape.height, self._micro_block.height)
num_quantum_z = _round_up_div(block_shape.depth, self._micro_block.depth)
num_quantum_xy = num_quantum_x * num_quantum_y
kernel_steps = self.get_kernel_steps(op_type, kernel_h, kernel_w, ifm_dtype, partkernel)
wd_cycles = self._get_weight_decoder_cycles(op_type)
delay_cycles = self._get_delay_cycles(op_type, ifm_dtype)
cycle_quantum = 4
compute_cycles = 0
for subkernel_steps in kernel_steps:
subkernel_cycles = 1 if op_type == "ethosu_pooling" else subkernel_steps
compute_cycles += (
max(wd_cycles, cycle_quantum * num_quantum_xy) * subkernel_cycles * num_quantum_z
)
if num_quantum_xy == 1:
if num_quantum_z == 1:
compute_cycles += delay_cycles * subkernel_steps
elif subkernel_steps > 1:
compute_cycles += delay_cycles * (subkernel_steps - 1) * num_quantum_z
if partkernel:
compute_cycles *= _round_up_div(input_block_shape.depth, 8)
if op_type == "ethosu_conv2d":
compute_cycles *= _round_up_div(input_channels, input_block_shape.depth)
return compute_cycles
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Graph objects to define compute graphs for the NPU cascader."""
from typing import List, Dict
from enum import IntEnum
from collections import namedtuple
import numpy as np
import tvm._ffi
from tvm import te
from tvm.runtime import Object
from .stripe_config import StripeConfig
from .device_config import EthosuDeviceConfig
from . import _ffi_api
# A global store to register matching functions
REGISTERED_MATCHERS = []
TESubgraph = namedtuple("TESubgraph", ["input_tensors", "output_tensor"])
class BufferMode(IntEnum):
RECOMPUTE = 0
ROLLING = 1
@tvm._ffi.register_object("contrib.ethosu.cascader.PerformanceInfo")
class PerformanceInfo(Object):
"""PerformanceInfo class"""
@property
def compute_cycles(self):
return self._compute_cycles
@property
def read_bytes(self):
return list(self._read_bytes)
@property
def write_bytes(self):
return self._write_bytes
@property
def block_config(self):
return self._block_config
@tvm._ffi.register_object("contrib.ethosu.cascader.Tensor")
class Tensor(Object):
"""Tensor class"""
def __init__(self, shape, dtype, is_constant=False, compression_ratio=1):
self.__init_handle_by_constructor__(
_ffi_api.Tensor, shape, dtype, is_constant, compression_ratio
)
def add_producer(self, part):
_ffi_api.TensorAddProducer(self, part)
def add_consumer(self, part):
_ffi_api.TensorAddConsumer(self, part)
@property
def producers(self):
return list(self._producers)
@property
def consumers(self):
return list(self._consumers)
@property
def shape(self):
return list(self._shape)
@property
def dtype(self):
return self._dtype
@property
def is_constant(self):
return self._is_constant
@property
def compression_ratio(self):
return self._compression_ratio
@property
def size(self):
return self._size
class Part(Object):
"""Part base class"""
def set_input(self, index: int, tensor: Tensor):
_ffi_api.PartSetInput(self, index, tensor)
def set_output(self, tensor: Tensor):
_ffi_api.PartSetOutput(self, tensor)
def calculate_input_stripe_configs(
self, output_stripe_config: StripeConfig
) -> List[StripeConfig]:
return list(_ffi_api.PartCalculateInputStripeConfigs(self, output_stripe_config))
def get_stripe_align_hint(self) -> List[int]:
return list(_ffi_api.PartGetStripeAlignHint(self))
def get_performance_info(
self, stripe_config: StripeConfig, buffer_mode: BufferMode
) -> PerformanceInfo:
return _ffi_api.PartGetPerformanceInfo(self, stripe_config, buffer_mode)
@property
def input_tensors(self):
return list(self._input_tensors)
@property
def output_tensor(self):
return self._output_tensor
@property
def propagators(self):
return list(self._propagators)
@property
def in_line(self):
return self._in_line
@property
def subgraph(self):
return TESubgraph(list(self._te_input_tensors), self._te_output_tensor)
@tvm._ffi.register_object("contrib.ethosu.cascader.CascaderGraph")
class CascaderGraph(Object):
"""A class to describe a graph of Parts and Tensors used by the cascader.
This class describes a graph consisting of two object types: Tensors and Parts.
It defines a topological ordering on the graph such that each Part and Tensor has a
position in the ordering. This ordering is used by the Plan and Proposal generation
algorithms. It is also the ordering the Parts are expected to be executed in.
In addition to defining an ordering, the Parts and Tensors are also all given unique
IDs which they can be referred to by."""
def __init__(self, input_tensors: List[Tensor], output_tensors: List[Tensor]):
self.__init_handle_by_constructor__(_ffi_api.CascaderGraph, input_tensors, output_tensors)
def get_part_id(self, part: Part) -> int:
return _ffi_api.CascaderGraphGetPartID(self, part)
def get_tensor_id(self, tensor: Tensor) -> int:
return _ffi_api.CascaderGraphGetTensorID(self, tensor)
@property
def input_tensors(self):
return list(self._input_tensors)
@property
def output_tensors(self):
return list(self._output_tensors)
@property
def tensor_order(self):
return list(self._tensor_order)
@property
def part_order(self):
return list(self._part_order)
def register_matcher(matcher):
"""Register a match function to the frontend.
A match function takes a te.Tensor and checks whether it matches
a known operator/operator sequence. If it does, it returns a Part
which models the behaviour of that operator sequence. Otherwise,
it returns None.
"""
REGISTERED_MATCHERS.append(matcher)
return matcher
def create_cascader_graph(
te_graph: TESubgraph, const_dict: Dict[int, np.ndarray], device_config: EthosuDeviceConfig
) -> CascaderGraph:
"""Create a CascaderGraph from a Tensor Expression graph and constant dictionary.
Parameters
----------
te_graph : TESubgraph
The Tensor Expression graph.
const_dict : Dict[int, np.ndarray]
The constant dictionary.
device_config : EthosuDeviceConfig
Target device configuration.
Returns
-------
CascaderGraph
The CascaderGraph.
"""
tensor_map = {}
def _visit_tensor(tensor):
if tensor not in tensor_map:
is_const = False
# Logic to determine if the tensor is constant
if tensor in list(te_graph.inputs):
i = list(te_graph.inputs).index(tensor)
if i in const_dict:
is_const = True
# TODO(@mbaret): Calculate the compression ratio
plan_tensor = Tensor(
tensor.shape,
tensor.dtype,
is_constant=is_const,
)
tensor_map[tensor] = plan_tensor
if isinstance(tensor.op, te.PlaceholderOp) or tensor in te_graph.inputs:
return
input_tensors = []
# Check whether any of the registered matchers match the current tensor
for matcher in REGISTERED_MATCHERS:
part = matcher(tensor, device_config)
if part:
input_tensors = part.subgraph.input_tensors
break
assert part is not None, f"The tensor {tensor} doesn't match any part."
part.set_output(plan_tensor)
plan_tensor.add_producer(part)
for i, input_tensor in enumerate(input_tensors):
_visit_tensor(input_tensor)
part.set_input(i, tensor_map[input_tensor])
tensor_map[input_tensor].add_consumer(part)
for output in te_graph.outputs:
_visit_tensor(output)
input_tensors = []
for t in te_graph.inputs:
# This is needed because sometimes there are orphaned constants
if t in tensor_map:
input_tensors.append(tensor_map[t])
output_tensors = [tensor_map[t] for t in te_graph.outputs]
return CascaderGraph(input_tensors, output_tensors)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/logging.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A class to hold logging information about the cascader"""
from typing import Tuple
import datetime
import json
import os
import math
class Logging:
"""Cascader logging class"""
def __init__(self):
self.min_memory_usage = 0
self.max_memory_usage = 0
self.min_cycles = 0
self.max_cycles = 0
self.selected_proposal_idx = -1
self.proposals = {}
self.cascader_runtime = 0
def add_proposal(self, idx: int, memory_usage: int, cycles: int):
self.proposals[idx] = {"memory_usage": memory_usage, "cycles": cycles}
def get_extreme_points(self) -> Tuple[int, int, int, int]:
min_cycles, min_mem_usage = math.inf, math.inf
max_cycles, max_mem_usage = 0, 0
for proposal in self.proposals.values():
min_mem_usage = min(proposal["memory_usage"], min_mem_usage)
max_mem_usage = max(proposal["memory_usage"], max_mem_usage)
min_cycles = min(proposal["cycles"], min_cycles)
max_cycles = max(proposal["cycles"], max_cycles)
return min_mem_usage, max_mem_usage, min_cycles, max_cycles
def dump_json(self):
min_mem_usage, max_mem_usage, min_cycles, max_cycles = self.get_extreme_points()
with open(os.getcwd() + "/cascader_log.json", "w") as json_file:
print(
json.dumps(
{
"date": f"{datetime.datetime.now()}",
"cascader_runtime": self.cascader_runtime,
"min_cycles": min_cycles,
"max_cycles": max_cycles,
"min_memory_usage": min_mem_usage,
"max_memory_usage": max_mem_usage,
"selected_proposal": self.selected_proposal_idx,
"proposals": self.proposals,
},
indent=2,
),
file=json_file,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/pareto.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pareto optimisation functions for the NPU cascader."""
from typing import List
from tvm import Object
from . import _ffi_api
from .plan import Plan
def _get_pareto_frontier(costs: List[List[float]]) -> List[bool]:
for i, cost in enumerate(costs):
for j, value in enumerate(cost):
costs[i][j] = float(value)
return [bool(v) for v in _ffi_api.GetParetoFrontier(costs)]
def _thin_vector(vec: List[Object], max_size: int) -> List[Object]:
return list(_ffi_api.ThinVector(vec, max_size))
def _pareto_cull_plans(
plans: List[Plan], max_plans: int, disable_pareto_metric: bool
) -> List[Plan]:
return list(_ffi_api.ParetoCullPlans(plans, max_plans, disable_pareto_metric))
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/parts.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Parts used by the NPU cascader."""
from typing import List
import tvm._ffi
from .propagator import Propagator
from .graph import Part, TESubgraph
from .block_config import BlockConfig
from .stripe_config import StripeConfig
from . import _ffi_api
@tvm._ffi.register_object("contrib.ethosu.cascader.InlinePart")
class InlinePart(Part):
"""InlinePart class"""
def __init__(
self,
te_subgraph: TESubgraph,
propagators: List[Propagator],
):
self.__init_handle_by_constructor__(
_ffi_api.InlinePart,
te_subgraph.input_tensors,
te_subgraph.output_tensor,
propagators,
)
@tvm._ffi.register_object("contrib.ethosu.cascader.EthosuPart")
class EthosuPart(Part):
"""A class to describe a Part to be executed on an Arm(R) Ethos(TM)-U NPU.
EthosuParts must be provided with an output quantum and the cycles taken to
compute an output quantum which depend on the operator the NPU is computing."""
def __init__(
self,
te_subgraph: TESubgraph,
propagators: List[Propagator],
output_quantum: List[int],
subkernels: int,
valid_block_configs: List[BlockConfig],
weight_tensor_idx: int = -1,
):
self.__init_handle_by_constructor__(
_ffi_api.EthosuPart,
te_subgraph.input_tensors,
te_subgraph.output_tensor,
propagators,
output_quantum,
subkernels,
valid_block_configs,
weight_tensor_idx,
)
def get_block_config(self, stripe_config: StripeConfig) -> BlockConfig:
return _ffi_api.EthosuPartGetBlockConfig(self, stripe_config)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/plan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Plan class to hold subgraph scheduling information."""
from typing import Dict, FrozenSet
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
from .graph import Tensor, Part
from .tensor_config import TensorConfig, MemoryRegion
@tvm._ffi.register_object("contrib.ethosu.cascader.Plan")
class Plan(Object):
"""
A class which describes how to schedule a subgraph of Parts together.
A Plan takes the form of a subgraph of connected Parts (recorded in part_group) with
TensorConfigs for all of the required Tensors (recorded in tensor_configs). This information
can be used to produce a Tensor Expression schedule with inter-operator scheduling. A Plan is
necessarily single-output such that all non-output Parts are 'computed_at'ed the scope of the
output Part. This is what achieves the technique referred to as 'cascading'. A Plan also has
an interior memory region which specifies the region of memory into which all the Plans
intermediate buffers should be allocated.
Additionally, a Plan contains some other information used during the Plan generation and
selection algorithms. Both the memory and cycles required to run the Plan are accounted for so
that Plans can be ranked and Pareto-culled on these metrics. Furthermore, the TensorConfigs
which are 'open' is recorded indicating that these are valid points to merge with another Plan.
A Plan can only be turned into a schedule if it has no 'open' TensorConfigs - at which point
the Plan is said to be 'closed'.
Attributes
----------
tensor_configs : Dict[Tensor, TensorConfig]
The TensorConfigs specified by the Plan.
open_configs : FrozenSet[TensorConfig]
The TensorConfigs which are 'open' meaning they are a Plan input/output but have
'interior' state.
output_config : TensorConfig
The TensorConfig of the Plan's output tensor.
part_group : FrozenSet[Part]
The Parts which are covered by the Plan.
interior_region : MemoryRegion
The MemoryRegion in which to store 'interior' Plan buffers.
memory_usage : int
The interior memory used by the Plan in bytes.
cycles : int
The cycles taken to execute the Plan.
"""
def __init__(
self,
tensor_configs: Dict[Tensor, TensorConfig],
open_configs: FrozenSet[TensorConfig],
output_config: TensorConfig,
part_group: FrozenSet[Part],
interior_region: MemoryRegion,
memory_usage: int,
cycles: int,
):
self.__init_handle_by_constructor__(
_ffi_api.Plan,
list(tensor_configs.values()),
list(open_configs),
output_config,
list(part_group),
interior_region,
memory_usage,
cycles,
)
def merge(self, other):
"""
Merge two Plans with share an 'open' TensorConfig.
The current Plan is referred to as the 'upper Plan' and the other Plan as the 'lower
Plan'. The 'open' output config of the upper Plan must be an 'open' input config of the
lower Plan. The Tensor referenced by these configs is the Tensor on which the two Plans
will be merged. The merge process does the following:
The tensor config maps will be merged with TensorConfigs from the upper Plan taking
priority. The open configs will be merged with the TensorConfigs that are being merged
having been removed. The output config will be that of the lower Plan. The part groups
will be merged. The interior region is necessarily the same for both the upper and lower
Plan. The cycles and memory usage will be summed.
Parameters
----------
other : Plan
The Plan to merge with.
Return
------
Plan
The merged Plan.
"""
return _ffi_api.PlanMerge(self, other)
@property
def tensor_configs(self):
"""The TensorConfigs specified by the Plan."""
tensor_configs = {}
for config in self._tensor_configs:
tensor_configs[config.tensor] = config
return tensor_configs
@property
def open_configs(self):
"""
The TensorConfigs which are 'open' meaning they are a Plan input/output but have
'interior' state.
"""
return frozenset(self._open_configs)
@property
def output_config(self):
"""The TensorConfig of the Plan's output tensor."""
return self._output_config
@property
def part_group(self):
"""The Parts which are covered by the Plan."""
return frozenset(self._part_group)
@property
def interior_region(self):
"""The MemoryRegion in which to store 'interior' Plan buffers."""
return self._interior_region
@property
def memory_usage(self):
"""The interior memory used by the Plan in bytes."""
return self._memory_usage
@property
def cycles(self):
"""The cycles taken to execute the Plan."""
return self._cycles
def __repr__(self):
return (
f"Plan(tensor_configs={self.tensor_configs}, "
f"open_configs={self.open_configs}, "
f"output_config={self.output_config}, "
f"part_group={self.part_group}, "
f"interior_region={self.interior_region.name}, "
f"memory_usage={self.memory_usage}, "
f"cycles={self.cycles}, "
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/plan_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Algorithms to generate Plans for a CascaderGraph."""
from typing import List, Dict, Tuple
from tvm.contrib.ethosu.cascader.tensor_config import MemoryRegion, TensorConfig
from . import _ffi_api
from .cascader_options import CascaderOptions
from .plan import Plan
from .stripe_config import StripeConfig
from .graph import CascaderGraph, Part, Tensor
def _generate_output_stripe_configs(
part: Part, stripe_factors: int, enable_striping: bool, multi_dimensional: bool
) -> List[StripeConfig]:
return list(
_ffi_api.GenerateOutputStripeConfigs(
part, stripe_factors, enable_striping, multi_dimensional
)
)
def _generate_single_plans(
part: Part,
output_stripe_configs: List[StripeConfig],
home_map: Dict[Tensor, List[MemoryRegion]],
cascade_region: MemoryRegion,
) -> List[Plan]:
return list(_ffi_api.GenerateSinglePlans(part, output_stripe_configs, home_map, cascade_region))
def _generate_graph_plans(
graph: CascaderGraph,
home_map: Dict[Tensor, List[MemoryRegion]],
options: CascaderOptions,
):
return _ffi_api.GenerateGraphPlans(
graph,
home_map,
options,
)
def get_copy_cycles_hint(tensor_config: TensorConfig) -> Tuple[int, int]:
"""
Returns a hint estimating the number of cycles for the copy
specified by tensor_config.
Parameters
----------
tensor_config : TensorConfig
The tensor configuration to estimate.
Returns
-------
mem2mem_cycles : int
Total estimated cycles.
initial_mem2mem_cycles : int
Estimated cycles for the first block.
"""
return _ffi_api.GetCopyCyclesHint(tensor_config)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/propagator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Propagator class."""
# pylint: disable=invalid-name
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("contrib.ethosu.cascader.Propagator")
class Propagator(Object):
"""Propagator class"""
def __init__(self, transform, offset):
float_transform = list([list(float(v) for v in row) for row in transform])
self.__init_handle_by_constructor__(_ffi_api.Propagator, float_transform, offset)
def propagate(self, stripe_config):
return _ffi_api.PropagatorPropagate(self, stripe_config)
@property
def transform(self):
"""Get the transform matrix"""
new_matrix = []
for row in self._transform:
new_row = []
for v in row:
new_row.append(v.value)
new_matrix.append(new_row)
return new_matrix
@property
def offset(self):
"""Get the offset matrix"""
new_vec = []
for v in self._offset:
new_vec.append(v.value)
return new_vec
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/proposal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Proposal class to hold graph scheduling information."""
from typing import Dict, FrozenSet, List
import tvm._ffi
from tvm.contrib.ethosu.cascader.plan import Plan
from tvm.runtime import Object
from . import _ffi_api
from .graph import Tensor, Part, CascaderGraph
from .tensor_config import TensorConfig, MemoryRegion
@tvm._ffi.register_object("contrib.ethosu.cascader.Proposal")
class Proposal(Object):
"""A class which describes how to schedule a CascaderGraph as a series of disjoint Plans.
Attributes
----------
graph : CascaderGraph
The CascaderGraph to which the Proposal applies.
part_group : FrozenSet[Part]
The Parts which are covered by the Proposal.
plans : List[Plan]
The Plans used in the Proposal.
input_tensor_configs : Dict[Tensor, TensorConfig]
The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan.
cascade_region : MemoryRegion
The MemoryRegion where cascading buffers should be homed.
memory_usage : int
The memory required to execute the Proposal in the cascading MemoryRegion.
cycles : int
The estimated cycles taken to execute the Proposal.
"""
def __init__(
self,
graph: CascaderGraph,
part_group: FrozenSet[Part],
plans: List[Plan],
input_tensor_configs: Dict[Tensor, TensorConfig],
cascade_region: MemoryRegion,
memory_usage: Dict[MemoryRegion, int],
cycles: int,
):
self.__init_handle_by_constructor__(
_ffi_api.Proposal,
graph,
list(part_group),
plans,
input_tensor_configs,
cascade_region,
memory_usage,
cycles,
)
@property
def graph(self) -> CascaderGraph:
"""The CascaderGraph to which the Proposal applies."""
return self._graph
@property
def part_group(self) -> FrozenSet[Part]:
"""The Parts which are covered by the Proposal."""
return frozenset(self._part_group)
@property
def plans(self) -> List[Plan]:
"""The Plans used in the Proposal."""
return list(self._plans)
@property
def input_tensor_configs(self) -> Dict[Tensor, TensorConfig]:
"""The TensorConfigs indexed by Tensor in the Proposal which aren't produced by a Plan."""
return dict(self._input_tensor_configs)
@property
def cascade_region(self) -> MemoryRegion:
"""The MemoryRegion where cascading buffers should be homed."""
return self._cascade_region
@property
def memory_usage(self) -> int:
"""The memory required to execute the Proposal in the cascading MemoryRegion."""
return int(self._memory_usage)
@property
def cycles(self) -> int:
"""The estimated cycles taken to execute the Proposal."""
return int(self._cycles)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/proposal_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Algorithms to generate Proposals for a Graph."""
from typing import List, Dict, FrozenSet
from . import _ffi_api
from .cascader_options import CascaderOptions
from .plan import Plan
from .proposal import Proposal
from .graph import CascaderGraph, Part
def generate_proposals(
graph: CascaderGraph,
home_map: Dict[FrozenSet[Part], List[Plan]],
options: CascaderOptions,
) -> List[Proposal]:
"""Generate Pareto optimal Proposals for a CascaderGraph.
This algorithm takes a top-down dynamic programming approach to determining how
to optimally combine Plans into Proposals.
Parameters
----------
graph : CascaderGraph
The CascaderGraph to generate Proposals for.
home_map : Dict[FrozenSet[Part], List[Plan]]
The Tensor homing map defining valid memory homes for Tensors.
options : CascaderOptions
The configuration options with which to run the generator.
Returns
------
List[Proposal]
A list of Pareto optimal Proposals.
"""
return list(
_ffi_api.GenerateProposals(
graph,
home_map,
options,
)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Scheduler for cascader which converts Proposals into Schedules."""
from typing import Tuple, List, Dict, DefaultDict
from collections import defaultdict
import time
import numpy as np
import tvm
from tvm import te
from tvm import tir
from tvm import PoolInfo
from .cascader_options import CascaderOptions
from .graph import CascaderGraph, Part, Tensor, TESubgraph
from .parts import EthosuPart
from .tensor_config import MemoryRegion
from .proposal import Proposal
from .proposal_generator import generate_proposals
from .plan_generator import get_copy_cycles_hint
from .graph import create_cascader_graph
from .device_config import EthosuDeviceConfig
from .logging import Logging
def tile_nd(
sch: te.Schedule, tensor: te.Tensor, tile: Tuple[int, ...]
) -> Tuple[List[tir.IterVar], List[tir.IterVar]]:
"""Scheduling utility to perform N-dimensional tiling.
Parameters
----------
sch : te.Schedule
The schedule to apply the tiling to.
tensor : te.Tensor
The tensor to apply the tiling to.
tile : Tuple[int, ...]
The N-dimensional tile size
Returns
-------
outer_indices : List[tir.IterVar]
The outer iteration variables.
inner_indices : List[tir.IterVar]
The inner iteration variables.
"""
outer_indices = []
inner_indices = []
for i, size in enumerate(tile):
outer, inner = sch[tensor].split(tensor.op.axis[i], size)
outer_indices.append(outer)
inner_indices.append(inner)
sch[tensor].reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
def stripe_part(
part: Part, stripe_shape: Tuple[int, ...], sch: te.Schedule
) -> Tuple[te.Stage, tir.IterVar]:
"""Apply a striping schedule to the TE subgraph represented by a Part."""
te_subgraph = part.subgraph
te_output_tensor = te_subgraph.output_tensor
outer_indices, _ = tile_nd(sch, te_output_tensor, stripe_shape)
g = sch.create_group(
outputs=te_output_tensor.op.input_tensors,
inputs=te_subgraph.input_tensors,
include_inputs=False,
)
g.compute_at(sch[te_output_tensor], outer_indices[-1])
for axis in outer_indices:
sch[te_output_tensor].unroll(axis)
return sch[te_output_tensor], outer_indices[-1]
def cascade_part(
part: Part, stripe_stage: te.Stage, stripe_axis: tir.IterVar, sch: te.Schedule
) -> None:
"""Schedule a Part into a cascade indicated by a stripe Stage."""
te_subgraph = part.subgraph
g = sch.create_group(
outputs=te_subgraph.output_tensor, inputs=te_subgraph.input_tensors, include_inputs=False
)
g.compute_at(stripe_stage, stripe_axis)
def update_readers(part: Part, readers: DefaultDict[te.Tensor, List[te.Tensor]]) -> None:
"""
Update a dictionary which stores the te.Tensors that need to be read in
order to produce a given te.Tensor.
"""
visited = set()
def _visit(tensor):
if tensor not in visited and tensor not in part.subgraph.input_tensors:
visited.add(tensor)
for input_tensor in tensor.op.input_tensors:
readers[input_tensor].append(tensor)
_visit(input_tensor)
_visit(part.subgraph.output_tensor)
def apply_proposal(proposal: Proposal, sch: te.Schedule) -> None:
"""Apply a Proposal to a Schedule, converting all the Plans into TE scheduling instructions.
Note that the Schedule is mutated in-place.
Parameters
----------
proposal : Proposal
The Proposal to apply to the Schedule.
sch : te.Schedule
The Schedule to apply to Proposal to.
"""
for plan in proposal.plans:
for part in plan.part_group:
if isinstance(part, EthosuPart):
tensor_config = plan.tensor_configs[part.output_tensor]
stripe_config = tensor_config.stripe_configs[0]
buffer_mode = tensor_config.buffer_mode
block_config = part.get_block_config(stripe_config)
compute_cycles = part.get_performance_info(
stripe_config, buffer_mode
).compute_cycles
iv = part.subgraph.output_tensor.op.axis[0]
block_shape = block_config.output_shape
if len(block_shape) == 4:
height, width, depth = block_shape[1:]
else:
height = block_shape[1]
width = block_shape[3]
depth = block_shape[2] * block_shape[4]
sch[part.subgraph.output_tensor].pragma(iv, "block_config_height", height)
sch[part.subgraph.output_tensor].pragma(iv, "block_config_width", width)
sch[part.subgraph.output_tensor].pragma(iv, "block_config_depth", depth)
# Attach AttrStmt directly to npu op so it isn't removed by ReplaceOperators
npu_op = part.subgraph.output_tensor.op.input_tensors[0].op.input_tensors[0]
# Force the pragma to interpret the compute cycles as an int64 value
compute_cycles_int64_cast = tvm.tir.IntImm("int64", compute_cycles)
sch[npu_op].pragma(
npu_op.op.axis[0], "compute_cycles_hint", compute_cycles_int64_cast
)
output_tensor_config = plan.output_config
output_tensor = output_tensor_config.tensor
output_part = output_tensor.producers[0]
if output_part.in_line:
continue
stripe_config = output_tensor_config.stripe_configs[0]
stripe_shape = [int(x) for x in stripe_config.shape]
stripe_stage, stripe_axis = stripe_part(output_part, stripe_shape, sch)
copy_te_tensors = []
compute_cycles_hints = []
readers = defaultdict(list)
for part in plan.part_group:
if part != output_part:
cascade_part(part, stripe_stage, stripe_axis, sch)
update_readers(part, readers)
for i, input_tensor in enumerate(part.input_tensors):
tensor_config = plan.tensor_configs[input_tensor]
if tensor_config.home_region != tensor_config.copy_region:
copy_te_tensors.append(part.subgraph.input_tensors[i])
compute_cycles_hint, _ = get_copy_cycles_hint(tensor_config)
compute_cycles_hints.append(compute_cycles_hint)
for te_tensor, compute_cycles_hint in zip(copy_te_tensors, compute_cycles_hints):
copy_stage = sch.cache_read(te_tensor, "global", readers[te_tensor])
sch[copy_stage].pragma(
copy_stage.op.axis[0], "compute_cycles_hint", compute_cycles_hint
)
sch[copy_stage].compute_at(stripe_stage, stripe_axis)
def create_home_map(
graph: CascaderGraph,
io_region: MemoryRegion,
constant_region: MemoryRegion,
working_regions: List[MemoryRegion],
) -> Dict[Tensor, List[MemoryRegion]]:
"""Create a map between Tensors and the MemoryRegions they can be homed in."""
home_map = {}
for tensor in graph.tensor_order:
if tensor.is_constant:
home_map[tensor] = [constant_region]
elif tensor in graph.input_tensors or tensor in graph.output_tensors:
home_map[tensor] = [io_region]
else:
home_map[tensor] = working_regions
return home_map
def choose_proposal(
proposals: List[Proposal], cascade_region: MemoryRegion, select_proposal_idx: int
):
"""Choose the best performing Proposal that doesn't overflow the cascade region."""
if select_proposal_idx != -1:
# Manually select proposal based on index, take modulus the total number of proposals to
# ensure that some proposal is always selected.
proposal_choice = proposals[select_proposal_idx % len(proposals)]
else:
proposal_choice = proposals[0]
for proposal in reversed(proposals):
if proposal.memory_usage < cascade_region.size:
proposal_choice = proposal
break
return proposal_choice
def extract_memory_info(memory_pool: PoolInfo, memory_pressure: int) -> MemoryRegion:
"Create a MemoryRegion based on the info in the memory pool"
size = int(memory_pool.size_hint_bytes - memory_pressure)
read_bandwidth = int(memory_pool.read_bandwidth_bytes_per_cycle)
write_bandwidth = int(memory_pool.write_bandwidth_bytes_per_cycle)
for param in (size, read_bandwidth, write_bandwidth):
assert param != -1, f"{param} needs to be specified for the cascader."
name_to_burst_length = {
target.kind.name: burst for target, burst in memory_pool.target_burst_bytes.items()
}
try:
burst_length = int(name_to_burst_length["ethos-u"])
except KeyError:
burst_length = 1
return MemoryRegion(
name=memory_pool.pool_name,
size=size,
read_bandwidth=read_bandwidth,
write_bandwidth=write_bandwidth,
read_latency=int(memory_pool.read_latency_cycles),
write_latency=int(memory_pool.write_latency_cycles),
burst_length=burst_length,
)
def cascade(
sch: te.Schedule,
te_graph: TESubgraph,
const_dict: Dict[int, np.ndarray],
options: CascaderOptions,
io_region: MemoryRegion,
constant_region: MemoryRegion,
working_regions: List[MemoryRegion],
device_config: EthosuDeviceConfig,
) -> None:
"""Schedule a Tensor Expression graph using the technique of 'cascading'.
'Cascading' is a technique whereby operations are split into smaller
dependent tiles ('stripes') which can then execute in an interleaved
fashion. This allows for operations to execute together rather than
sequentially which can reduce intermediate memory requirements and in
certain cases improve performance.
For more detail on 'cascading' as well as how it is implemented, refer to
the RFC here: https://github.com/apache/tvm-rfcs/pull/37.
Parameters
----------
sch : te.Schedule
The Schedule to apply the cascading to.
te_graph : TESubgraph
The Tensor Expression graph from which the Schedule was created.
const_dict : Dict[int, np.ndarray]
A dictionary mapping input index to constant data if that input is
to be a constant.
options : CascaderOptions
Configuration options for the cascading scheduler.
io_region : MemoryRegion
The MemoryRegion in which input/output tensors should reside.
constant_region : MemoryRegion
The MemoryRegion in which constants should reside.
working_regions : List[MemoryRegion]
The MemoryRegions in which intermediate working tensors can reside. The
cascading scheduler will select which MemoryRegion to per tensor.
device_config : EthosuDeviceConfig
Target device configuration.
"""
tvmc_options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None)
log = Logging() if tvmc_options and tvmc_options.dev_cascader_logging else None
select_proposal_idx = (
int(tvmc_options.dev_select_proposal_idx)
if tvmc_options and tvmc_options.dev_select_proposal_idx
else -1
)
if log:
start = time.time()
assert options.cascade_region in working_regions
# First convert the Tensor Expression graph into a CascaderGraph
casc_graph = create_cascader_graph(te_graph, const_dict, device_config)
# Then create a mapping between Tensors and their possible memory homes
home_map = create_home_map(casc_graph, io_region, constant_region, working_regions)
# Generate Proposals for Pareto-optimal ways to cascade the CascaderGraph
proposals = generate_proposals(casc_graph, home_map, options)
# Select the best Proposal subject to the memory constraints
proposal_choice = choose_proposal(proposals, options.cascade_region, select_proposal_idx)
if log:
for idx, proposal in enumerate(proposals):
log.add_proposal(idx, proposal.memory_usage, proposal.cycles)
if proposal == proposal_choice:
log.selected_proposal_idx = idx
log.cascader_runtime = time.time() - start
log.dump_json()
# Apply the selected Proposal to the Tensor Expression Schedule
apply_proposal(proposal_choice, sch)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/stripe_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Stripe config class to hold tensor striping information."""
# pylint: disable=invalid-name
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object("contrib.ethosu.cascader.StripeConfig")
class StripeConfig(Object):
"""StripeConfig class"""
def __init__(self, shape, extent, strides, order, stripes, offset):
strides = list([float(v) for v in strides])
self.__init_handle_by_constructor__(
_ffi_api.StripeConfig, shape, extent, strides, order, stripes, offset
)
@property
def shape(self):
return list(self._shape)
@property
def extent(self):
return list(self._extent)
@property
def strides(self):
return list([float(v.value) for v in self._strides])
@property
def order(self):
return list(self._order)
@property
def stripes(self):
return list(self._stripes)
@property
def offset(self):
return list(self._offset)
def __hash__(self):
return self._hash
def __eq__(self, other):
return _ffi_api.StripeConfigEqual(self, other)
def __repr__(self):
return (
f"StripeConfig(shape={self.shape}, "
f"extent={self.extent}, "
f"strides={self.strides}, "
f"order={self.order}, "
f"stripes={self.stripes}, "
f"offset={self.offset}"
)
def count_stripes(stripe_config: StripeConfig, enable_sliding_window: bool = False):
stripe_counts = dict(_ffi_api.CountStripes(stripe_config, enable_sliding_window))
# Some code to 'de-TVM' the data types and make them pure Python
clean_stripe_counts = dict()
for stripe, count in stripe_counts.items():
clean_stripe = tuple([int(v) for v in stripe])
clean_count = int(count)
clean_stripe_counts[clean_stripe] = clean_count
return clean_stripe_counts
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ethosu/cascader/tensor_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor config class to hold tensor scheduling information."""
from typing import List, Union
from enum import IntEnum
import tvm._ffi
from tvm.contrib.ethosu.cascader.stripe_config import StripeConfig
from tvm.runtime import Object
from . import _ffi_api
from .stripe_config import StripeConfig
from .graph import Tensor, BufferMode
class TensorConfigState(IntEnum):
"""
The 'state' of a TensorConfig as used in the Plan generation algorithm.
BOUNDARY - Should describe a Plan input/output Tensor.
INTERIOR - Should describe an intermediate Tensor in a 'closed' Plan.
"""
BOUNDARY = 0
INTERIOR = 1
@tvm._ffi.register_object("contrib.ethosu.cascader.MemoryRegion")
class MemoryRegion(Object):
"""
MemoryRegion class to store information about device memories.
Attributes
----------
name : str
The name of the region.
size : int
The size of the region.
read_bandwidth : int
The read bandwidth of the region in bytes per cycle.
write_bandwidth : int
The write bandwidth of the region in bytes per cycle.
"""
def __init__(
self,
name: str,
size: int,
read_bandwidth: int,
write_bandwidth: int,
read_latency: int = 0,
write_latency: int = 0,
burst_length: int = 1,
):
self.__init_handle_by_constructor__(
_ffi_api.MemoryRegion,
name,
size,
read_bandwidth,
write_bandwidth,
read_latency,
write_latency,
burst_length,
)
@tvm._ffi.register_object("contrib.ethosu.cascader.TensorConfig")
class TensorConfig(Object):
"""
A class which describes how to realize a Tensor.
The TensorConfig describes both how a Tensor is scheduled (the order in which it's
produced/consumed) and how its allocated in memory (which region it should reside in
and whether it should be copied).
Attributes
----------
tensor : Tensor
The Tensor the config applies to.
home_region : MemoryRegion
The region where the tensor is allocated.
state : TensorConfigState
The state of the TensorConfig.
The TensorConfigState is only used as part of the Plan generation algorithm. For a Plan
to be 'closed' (and therefore not subject to any further merging), all the TensorConfigs
that describe Plan input or output Tensors must be in the 'BOUNDARY' state with the rest
being 'INTERIOR'. If any of the input or output tensors are described by an 'INTERIOR'
TensorConfig, then the Plan is 'open' and should be merged with other 'open' Plans until
the result becomes 'closed'.
buffer_mode : BufferMode
The mode in which the buffer should be realized.
There are multiple buffering strategies by which a tensor may be realized (computed).
These affect the amount of recomputation necessary as well as the size of buffer required
to store the tensor. See 'BufferMode' for a description of the allowable buffering modes.
stripe_configs : List[StringConfig]
The StripeConfigs with which to compute the tensor.
The StripeConfigs determine the order in which the elements of the tensor should be
computed, including potentially computing them multiple times (recompute). Multiple
StripeConfigs are used over just a single StripeConfig for the case where the tensor is
consumed by two different Parts executing themselves with different StripeConfigs. In this
case, there is a StripeConfig per consumer of the tensor.
copy_tensor : bool, optional
Whether to copy the tensor.
While a tensor will originally reside in its home region, the TensorConfig may optionally
specify that the tensor should be copied (according to the StripeConfigs) into another
MemoryRegion. As an example for where this may be used, if a weights tensor initially
resides in slow Flash memory then necessarily the home region will be Flash. However, if
the weights values are used multiple times by a Part, it may be more performant to choose
to copy the weights into a faster memory like SRAM.
copy_region : Union[MemoryRegion, None], optional
The region to copy the tensor to.
"""
def __init__(
self,
tensor: Tensor,
home_region: MemoryRegion,
state: TensorConfigState,
buffer_mode: BufferMode,
stripe_configs: List[StripeConfig],
copy_tensor: bool = False,
copy_region: Union[MemoryRegion, None] = None,
):
if copy_region is None:
copy_region = home_region
self.__init_handle_by_constructor__(
_ffi_api.TensorConfig,
tensor,
home_region,
state,
buffer_mode,
stripe_configs,
copy_tensor,
copy_region,
)
def get_buffer_size(self):
"""
The size of the buffer needed for the TensorConfig.
The size of buffer necessary to store a tensor being produced using the TensorConfig is
not necessarily just the size of the tensor. In Plans, a tensor may be being produced and
consumed in 'stripes' which are smaller than the full tensor. Therefore, the buffer
necessary to store the tensor may only need to be as large as the stripe. The precise size
of the buffer will depend both on the BufferMode and StripeConfigs (as well as, of course,
the Tensor).
"""
return _ffi_api.TensorConfigGetBufferSize(self)
@property
def tensor(self):
"""The Tensor the config applies to."""
return self._tensor
@property
def home_region(self):
"""The region where the tensor is allocated."""
return self._home_region
@property
def state(self):
"""The state of the TensorConfig."""
return TensorConfigState(self._state)
@property
def buffer_mode(self):
"""The mode in which the buffer should be realized."""
return BufferMode(self._buffer_mode)
@property
def stripe_configs(self):
"""The StripeConfigs with which to compute the tensor."""
return list(self._stripe_configs)
@property
def copy_tensor(self):
"""Whether to copy the tensor."""
return bool(self._copy_tensor)
@property
def copy_region(self):
"""The region to copy the tensor to."""
return self._copy_region
def __hash__(self):
return self._hash
def __eq__(self, other):
return _ffi_api.TensorConfigEqual(self, other)
def __repr__(self):
return (
f"TensorConfig(tensor={self.tensor}, "
f"home_region={self.home_region.name}, "
f"state={self.state.name}, "
f"buffer_mode={self.buffer_mode.name}, "
f"stripe_configs={self.stripe_configs}, "
f"copy_tensor={self.copy_tensor}, "
f"copy_region={self.copy_region.name}"
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Minimum graph executor that executes graph containing TVM PackedFunc."""
import numpy as np
import tvm._ffi
from tvm.rpc import _ffi_api as _rpc_ffi_api
from tvm.rpc import base as rpc_base
from tvm._ffi.base import string_types
from tvm._ffi.runtime_ctypes import Device
def create(graph_json_str, libmod, device):
"""Create a runtime executor module given a graph and module.
Parameters
----------
graph_json_str : str
The graph to be deployed in json format output by json graph.
The graph can contain operator(tvm_op) that points to the name
of PackedFunc in the libmod.
libmod : tvm.runtime.Module
The module of the corresponding function
device : Device or list of Device
The device to deploy the module. It can be local or remote when there
is only one Device. Otherwise, the first device in the list will
be used as this purpose. All device should be given for heterogeneous
execution.
Returns
-------
graph_module : GraphModule
Runtime graph module that can be used to execute the graph.
Note
----
See also :py:class:`tvm.contrib.graph_executor.GraphModule`
for examples to directly construct a GraphModule from an exported
relay compiled library.
"""
assert isinstance(graph_json_str, string_types)
dev, num_rpc_dev, device_type_id = get_device(libmod, device)
if num_rpc_dev == len(dev):
fcreate = dev[0]._rpc_sess.get_function("tvm.graph_executor.create")
else:
fcreate = tvm._ffi.get_global_func("tvm.graph_executor.create")
return GraphModule(fcreate(graph_json_str, libmod, *device_type_id))
def get_device(libmod, device):
"""Parse and validate all the device(s).
Parameters
----------
libmod : tvm.runtime.Module
The module of the corresponding function
device : Device or list of Device
Returns
-------
device : list of Device
num_rpc_dev : Number of rpc devices
device_type_id : List of device type and device id
"""
if isinstance(device, Device):
device = [device]
elif not isinstance(device, (list, tuple)):
raise ValueError("dev has to be the type of Device or a list of Device")
for cur_dev in device:
if not isinstance(cur_dev, Device):
raise ValueError("dev has to be the type of Device or a list of Device")
# device_type_id[0], device_type_id[1] are used as the primary/fallback
# device type and id. All other ones are used as device for
# heterogeneous execution.
num_rpc_dev = 0
device_type_id = []
for cur_dev in device:
device_type = cur_dev.device_type
if device_type >= rpc_base.RPC_SESS_MASK:
assert libmod.type_key == "rpc"
assert _rpc_ffi_api.SessTableIndex(libmod) == cur_dev._rpc_sess._tbl_index
num_rpc_dev += 1
device_type = cur_dev.device_type % rpc_base.RPC_SESS_MASK
device_type_id.append(device_type)
device_type_id.append(cur_dev.device_id)
if 0 < num_rpc_dev < len(device):
raise ValueError("Either all or none of the devices should be rpc.")
return device, num_rpc_dev, device_type_id
class GraphModule(object):
"""Wrapper runtime module.
This is a thin wrapper of the underlying TVM module.
you can also directly call set_input, run, and get_output
of underlying module functions
Parameters
----------
module : tvm.runtime.Module
The internal tvm module that holds the actual graph functions.
Attributes
----------
module : tvm.runtime.Module
The internal tvm module that holds the actual graph functions.
Examples
--------
.. code-block:: python
import tvm
from tvm import relay
from tvm.contrib import graph_executor
# build the library using graph executor
lib = relay.build(...)
lib.export_library("compiled_lib.so")
# load it back as a runtime
lib: tvm.runtime.Module = tvm.runtime.load_module("compiled_lib.so")
# Call the library factory function for default and create
# a new runtime.Module, wrap with graph module.
gmod = graph_executor.GraphModule(lib["default"](dev))
# use the graph module.
gmod.set_input("x", data)
gmod.run()
"""
def __init__(self, module):
self.module = module
self._set_input = module["set_input"]
self._run = module["run"]
self._get_output = module["get_output"]
self._get_input = module["get_input"]
self._get_num_outputs = module["get_num_outputs"]
self._get_input_index = module["get_input_index"]
self._get_input_info = module["get_input_info"]
self._get_num_inputs = module["get_num_inputs"]
self._load_params = module["load_params"]
self._share_params = module["share_params"]
def set_input(self, key=None, value=None, **params):
"""Set inputs to the module via kwargs
Parameters
----------
key : int or str
The input key
value : the input value.
The input key
params : dict of str to NDArray
Additional arguments
"""
if key is not None:
v = self._get_input(key)
if v is None:
raise RuntimeError("Could not find '%s' in graph's inputs" % key)
v.copyfrom(value)
if params:
# upload big arrays first to avoid memory issue in rpc mode
keys = list(params.keys())
keys.sort(key=lambda x: -np.prod(params[x].shape))
for k in keys:
# TODO(zhiics) Skip the weights for submodule in a better way.
# We should use ConstLoaderModule for initialization and remove
# params from set_input
val = self._get_input(k)
if val:
self._get_input(k).copyfrom(params[k])
def run(self, **input_dict):
"""Run forward execution of the graph
Parameters
----------
input_dict: dict of str to NDArray
List of input values to be feed to
"""
if input_dict:
self.set_input(**input_dict)
self._run()
def get_num_outputs(self):
"""Get the number of outputs from the graph
Returns
-------
count : int
The number of outputs.
"""
return self._get_num_outputs()
def get_num_inputs(self):
"""Get the number of inputs to the graph
Returns
-------
count : int
The number of inputs.
"""
return self._get_num_inputs()
def get_input(self, index, out=None):
"""Get index-th input to out
Parameters
----------
index : int
The input index
out : NDArray
The output array container
"""
if out:
self._get_input(index).copyto(out)
return out
return self._get_input(index)
def get_input_index(self, name):
"""Get inputs index via input name.
Parameters
----------
name : str
The input key name
Returns
-------
index: int
The input index. -1 will be returned if the given input name is not found.
"""
return self._get_input_index(name)
def get_input_info(self):
"""Return the 'shape' and 'dtype' dictionaries of the graph.
.. note::
We can't simply get the input tensors from a TVM graph
because weight tensors are treated equivalently. Therefore, to
find the input tensors we look at the 'arg_nodes' in the graph
(which are either weights or inputs) and check which ones don't
appear in the params (where the weights are stored). These nodes
are therefore inferred to be input tensors.
Returns
-------
shape_dict : Map
Shape dictionary - {input_name: tuple}.
dtype_dict : Map
dtype dictionary - {input_name: dtype}.
"""
input_info = self._get_input_info()
assert "shape" in input_info
shape_dict = input_info["shape"]
assert "dtype" in input_info
dtype_dict = input_info["dtype"]
return shape_dict, dtype_dict
def get_output(self, index, out=None):
"""Get index-th output to out
Parameters
----------
index : int
The output index
out : NDArray
The output array container
"""
if out:
self._get_output(index, out)
return out
return self._get_output(index)
def debug_get_output(self, node, out):
"""Run graph up to node and get the output to out
Parameters
----------
node : int / str
The node index or name
out : NDArray
The output array container
"""
raise NotImplementedError("Please use debugger.debug_executor as graph_executor instead.")
def load_params(self, params_bytes):
"""Load parameters from serialized byte array of parameter dict.
Parameters
----------
params_bytes : bytearray
The serialized parameter dict.
"""
self._load_params(bytearray(params_bytes))
def share_params(self, other, params_bytes):
"""Share parameters from pre-existing GraphExecutor instance.
Parameters
----------
other: GraphExecutor
The parent GraphExecutor from which this instance should share
it's parameters.
params_bytes : bytearray
The serialized parameter dict (used only for the parameter names).
"""
self._share_params(other.module, bytearray(params_bytes))
def __getitem__(self, key):
"""Get internal module function
Parameters
----------
key : str
The key to the module.
"""
return self.module[key]
def benchmark(
self,
device,
func_name="run",
repeat=5,
number=5,
min_repeat_ms=None,
limit_zero_time_iterations=100,
end_to_end=False,
cooldown_interval_ms=0,
repeats_to_cooldown=1,
**kwargs,
):
"""Calculate runtime of a function by repeatedly calling it.
Use this function to get an accurate measurement of the runtime of a function. The function
is run multiple times in order to account for variability in measurements, processor speed
or other external factors. Mean, median, standard deviation, min and max runtime are all
reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that
synchonization and data transfer operations are not counted towards the runtime. This allows
for fair comparison of runtimes across different functions and models. The `end_to_end` flag
switches this behavior to include data transfer operations in the runtime.
The benchmarking loop looks approximately like so:
.. code-block:: python
for r in range(repeat):
time_start = now()
for n in range(number):
func_name()
time_end = now()
total_times.append((time_end - time_start)/number)
Parameters
----------
func_name : str
The function to benchmark. This is ignored if `end_to_end` is true.
repeat : int
Number of times to run the outer loop of the timing code (see above). The output will
contain `repeat` number of datapoints.
number : int
Number of times to run the inner loop of the timing code. This inner loop is run in
between the timer starting and stopping. In order to amortize any timing overhead,
`number` should be increased when the runtime of the function is small (less than a 1/10
of a millisecond).
min_repeat_ms : Optional[int]
If set, the inner loop will be run until it takes longer than `min_repeat_ms`
milliseconds. This can be used to ensure that the function is run enough to get an
accurate measurement.
limit_zero_time_iterations : Optional[int]
The maximum number of repeats when measured time is equal to 0.
It helps to avoid hanging during measurements.
end_to_end : bool
If set, include time to transfer input tensors to the device and time to transfer
returned tensors in the total runtime. This will give accurate timings for end to end
workloads.
cooldown_interval_ms: Optional[int]
The cooldown interval in milliseconds between the number of repeats defined by
`repeats_to_cooldown`.
repeats_to_cooldown: Optional[int]
The number of repeats before the cooldown is activated.
kwargs : Dict[str, Object]
Named arguments to the function. These are cached before running timing code, so that
data transfer costs are not counted in the runtime.
Returns
-------
timing_results : BenchmarkResult
Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to
access the individual runtimes (in seconds).
"""
min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms
if end_to_end:
# Have to unpack kwargs into a single list
args = []
for k, v in kwargs.items():
args.append(k)
args.append(v)
return self.module.time_evaluator(
"run_from_inputs",
device,
repeat=repeat,
number=number,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
)(device.device_type % rpc_base.RPC_SESS_MASK, device.device_id, *args)
if kwargs:
self.set_input(**kwargs)
return self.module.time_evaluator(
func_name,
device,
repeat=repeat,
number=number,
min_repeat_ms=min_repeat_ms,
limit_zero_time_iterations=limit_zero_time_iterations,
cooldown_interval_ms=cooldown_interval_ms,
repeats_to_cooldown=repeats_to_cooldown,
)()
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/graph_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Deprecated Python API for GraphExecutor."""
import warnings
from . import graph_executor
def create(*args, **kwargs):
warnings.warn(
"This function has been moved to tvm.contrib.graph_executor and will be removed "
"in the next TVM release"
)
return graph_executor.create(*args, **kwargs)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon APIs."""
from .tools import *
from .transform import *
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/_ci_env_check.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon environment checks for CI usage
These may be required by either tvm.testing or
tvm.contrib.hexagon.pytest_plugin, and are separated here to avoid a
circular dependency.
"""
import os
import tvm
ANDROID_SERIAL_NUMBER = "ANDROID_SERIAL_NUMBER"
HEXAGON_TOOLCHAIN = "HEXAGON_TOOLCHAIN"
def _compile_time_check():
"""Return True if compile-time support for Hexagon is present, otherwise
error string.
Designed for use as a the ``compile_time_check`` argument to
`tvm.testing.Feature`.
"""
if (
tvm.testing.utils._cmake_flag_enabled("USE_LLVM")
and tvm.target.codegen.llvm_version_major() < 7
):
return "Hexagon requires LLVM 7 or later"
if "HEXAGON_TOOLCHAIN" not in os.environ:
return f"Missing environment variable {HEXAGON_TOOLCHAIN}."
return True
def _run_time_check():
"""Return True if run-time support for Hexagon is present, otherwise
error string.
Designed for use as a the ``run_time_check`` argument to
`tvm.testing.Feature`.
"""
if ANDROID_SERIAL_NUMBER not in os.environ:
return f"Missing environment variable {ANDROID_SERIAL_NUMBER}."
return True
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines top-level glue functions for building Hexagon."""
import abc
import datetime
import logging
import multiprocessing as mp
import os
import pathlib
import signal
import socket
import stat
import random
import string
import subprocess
import tempfile
from typing import Union
from tvm.contrib.hexagon.hexagon_profiler import HexagonProfiler
from ..._ffi import libinfo
from .session import Session
from .tools import HEXAGON_SIMULATOR_NAME
HEXAGON_RPC_LIB_DIR = os.environ.get("HEXAGON_RPC_LIB_DIR")
ANDROID_BASH_FILE_NAME = "android_bash.sh"
HEXAGON_REMOTE_DEVICE_KEY = "hexagon-dev"
def _check_call_verbose(cmd, **kwargs) -> None:
"""
Similar to subprocess.check_call(cmd), but if the exit code is non-zero
then the raised Exception's message provides more detail, including
the stdout/stderr provided by the subprocess.
"""
try:
subprocess.run(
cmd,
check=True,
encoding="UTF-8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,
)
except subprocess.CalledProcessError as err:
error_msg = f"{err}\nstdout:\n{err.stdout}\nstderr:\n{err.stderr}"
raise Exception(error_msg)
def _get_hexagon_rpc_lib_dir() -> pathlib.Path:
"""Find the Hexagon API binaries.
Returns
-------
pathlib.Path :
The path to the Hexagon API directory.
"""
global HEXAGON_RPC_LIB_DIR
if HEXAGON_RPC_LIB_DIR is None:
for path in libinfo.find_lib_path():
rpc_dir = os.path.join(os.path.dirname(path), "hexagon_api_output")
if os.path.isdir(rpc_dir):
HEXAGON_RPC_LIB_DIR = rpc_dir
break
else:
raise RuntimeError("hexagon_api binaries not found, please define HEXAGON_RPC_LIB_DIR")
return pathlib.Path(HEXAGON_RPC_LIB_DIR)
def _get_test_directory_name() -> str:
"""Generate a time-stamped name for use as a test directory name."""
date_str = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
random_str = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
return f"{date_str}-{random_str}"
class HexagonLauncherRPC(metaclass=abc.ABCMeta):
"""Base class for RPC-based launchers.
This is an abstract class intended to be a base class for specific
implementations of RPC launchers. There are two public methods that
each launcher needs to implement:
- start_server
- stop server
and two "private" methods used in setting up the environment:
- _copy_to_remote
- _create_remote_directory
The basic flow of interaction with the launcher is
launcher = HexagonLauncher(...)
launcher.start_server()
with launcher.create_session() as session:
# Do something with the session
launcher.stop_server()
Parameters
----------
rpc_info : dict
Description of the RPC setup. Recognized keys:
"rpc_tracker_host" : str name of the host running the tracker (default "0.0.0.0")
"rpc_tracker_port" : int port number of the tracker (default: 9190)
"rpc_server_port" : int port number for the RPC server to use (default 7070)
"workspace_base" : str name of base test directory (default ".")
workspace : str or patlib.Path
The server's remote working directory. If this directory does not
exist, it will be created. If it does exist, the servermust have
write permissions to it.
If this parameter is None, a subdirectory in the `workspace_base`
directory will be created, otherwise the `workspace_base` is not
used.
"""
def __init__(
self, rpc_info: dict, workspace: Union[str, pathlib.Path] = None, serial_number: str = None
):
self._rpc_info = {
"rpc_tracker_host": "0.0.0.0",
"rpc_tracker_port": 9190,
"rpc_server_port": 7070,
"workspace_base": ".",
}
self._rpc_info.update(rpc_info)
self._workspace = self._create_workspace(workspace)
self._serial_number = serial_number
@abc.abstractmethod
def start_server(self):
"""Start the RPC server"""
...
@abc.abstractmethod
def stop_server(self):
"""Stop the RPC server"""
...
@abc.abstractmethod
def cleanup_directory(self):
"""Cleanup working directory"""
...
@abc.abstractmethod
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Copy a local file to a remote location.
Parameters
----------
local_path : str or pathlib.Path
Path to the local file.
remote_path : str or pathlib.Path
Path to the remote file (to be written).
"""
...
@abc.abstractmethod
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]) -> pathlib.Path:
"""Create a directory in the remote location.
Parameters
----------
remote_path : str or pathlib.Path
Name of the directory to be created.
Returns
-------
pathlib.Path :
Absolute path of the remote workspace.
"""
...
def _create_workspace(self, workspace: Union[str, pathlib.Path]) -> pathlib.Path:
"""Create a working directory for the server.
Parameters
----------
workspace : str or pathlib.Path or NoneType
Name of the directory to create. If None, a new name is constructed
using workspace_base.
Returns
-------
pathlib.Path :
Created workspace.
"""
if not workspace:
base_dir = self._rpc_info["workspace_base"]
workspace = os.path.join(base_dir, _get_test_directory_name())
return self._create_remote_directory(workspace)
@abc.abstractmethod
def get_profile_output(
self,
hex_profiler: HexagonProfiler,
session: Session,
) -> str:
"""Extract profile output.
Parameters
----------
hex_profiler : HexagonProfiler
HexagonProfiler object that contains the profiling related information.
session : Session
Remote session. The session must be established (via __enter__)
prior to calling this function.
Returns
-------
profile_data : str
Path of the profiling data file
"""
...
def create_session(self, session_name: str = "hexagon-rpc") -> Session:
"""Create an RPC session.
Parameters
----------
session_name : str
RPC session name.
Returns
-------
Session :
The session object.
"""
hexagon_session_kw = {
"remote_workspace": self._workspace,
"rpc_tracker": (self._rpc_info["rpc_tracker_host"], self._rpc_info["rpc_tracker_port"]),
"rpc_server_key": self._rpc_info["device_key"],
"serial_number": self._serial_number,
"session_name": session_name,
}
return Session(**hexagon_session_kw)
def is_simulator(self):
return self._serial_number == HEXAGON_SIMULATOR_NAME
class HexagonLauncherAndroid(HexagonLauncherRPC):
"""Hexagon Launcher for Android."""
ANDROID_HEXAGON_TEST_BASE_DIR = pathlib.Path("/data/local/tmp/hexagon_test")
ANDROID_HEXAGON_RPC_FILES = [
"libhexagon_rpc_skel.so",
"libtvm_runtime.so",
"tvm_rpc_android",
]
def __init__(
self,
serial_number: str,
rpc_info: dict,
workspace: Union[str, pathlib.Path] = None,
hexagon_debug: bool = False,
clear_logcat: bool = False,
sysmon_profile: bool = False,
farf_config: str = "0x1e",
):
"""Configure a new HexagonLauncherAndroid
Parameters
----------
serial_number : str
Android device serial number.
rpc_info : dict
Same as in HexagonLauncherRPC, except if the "workspace_base"
key is not present or is None, ANDROID_HEXAGON_TEST_BASE_DIR
is used as the base directory.
workspace : str or pathlib.Path, optional
Test workspace path on android.
hexagon_debug: bool, optional
Should the server run debug options.
clear_logcat: bool, optional
Should the server clear logcat before running.
sysmon_profile: bool, optional
Should the server run sysmon profiler in the background.
farf_config: str, optional
Configuration string for runtime log level filtering.
Use farf_config_from_python_log_level to generate a bitmask
string from a Python logging level (e.g., logging.INFO)
"""
if not rpc_info.get("workspace_base"):
rpc_info["workspace_base"] = self.ANDROID_HEXAGON_TEST_BASE_DIR
self._serial_number = serial_number
assert self._serial_number != "", "Android serial number is not set."
adb_socket = rpc_info["adb_server_socket"] if rpc_info["adb_server_socket"] else "tcp:5037"
self._adb_device_sub_cmd = ["adb", "-L", adb_socket, "-s", self._serial_number]
self.forwarded_ports_ = []
self._hexagon_debug = hexagon_debug
self._clear_logcat = clear_logcat
self._sysmon_profile = sysmon_profile
self._sysmon_process = None
self._farf_config = farf_config
rpc_info["device_key"] = HEXAGON_REMOTE_DEVICE_KEY + "." + self._serial_number
super(HexagonLauncherAndroid, self).__init__(rpc_info, workspace, self._serial_number)
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
_check_call_verbose(self._adb_device_sub_cmd + ["push", str(local_path), str(remote_path)])
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]) -> pathlib.Path:
"""Abstract method implementation. See description in HexagonLauncherRPC."""
_check_call_verbose(self._adb_device_sub_cmd + ["shell", "mkdir", "-p", str(remote_path)])
return pathlib.Path(remote_path)
def _copy_binaries(self):
"""Upload Android server binaries."""
# Create bash script
with open(_get_hexagon_rpc_lib_dir() / f"{ANDROID_BASH_FILE_NAME}.template", "r") as src_f:
with tempfile.TemporaryDirectory() as temp_dir:
android_bash_script_path = pathlib.Path(temp_dir) / ANDROID_BASH_FILE_NAME
with open(android_bash_script_path, "w") as dest_f:
for line in src_f.readlines():
if "<RPC_TRACKER_HOST>" in line:
line = line.replace(
"<RPC_TRACKER_HOST>", str(self._rpc_info["rpc_tracker_host"])
)
if "<RPC_TRACKER_PORT>" in line:
line = line.replace(
"<RPC_TRACKER_PORT>", str(self._rpc_info["rpc_tracker_port"])
)
if "<HEXAGON_REMOTE_DEVICE_KEY>" in line:
line = line.replace(
"<HEXAGON_REMOTE_DEVICE_KEY>", self._rpc_info["device_key"]
)
if "<RPC_SERVER_PORT>" in line:
line = line.replace(
"<RPC_SERVER_PORT>", str(self._rpc_info["rpc_server_port"])
)
if "<FARF_CONFIG>" in line:
line = line.replace("<FARF_CONFIG>", str(self._farf_config))
dest_f.write(line)
# Make shell script executable
android_bash_stat = os.stat(android_bash_script_path)
os.chmod(android_bash_script_path, android_bash_stat.st_mode | stat.S_IEXEC)
self._copy_to_remote(
android_bash_script_path, self._workspace / android_bash_script_path.name
)
# Push files
lib_dir = _get_hexagon_rpc_lib_dir()
for item in self.ANDROID_HEXAGON_RPC_FILES:
self._copy_to_remote(lib_dir / item, self._workspace / item)
def _process_forwarded_ports(self):
forwarded_ports = subprocess.check_output(self._adb_device_sub_cmd + ["forward", "--list"])
existing_forwards = []
for forward in str(forwarded_ports).split("\\n"):
entry = forward.split()
if len(entry) == 3:
_, local, _ = entry
existing_forwards.append(int(local.strip("tcp:")))
return existing_forwards
def _forward_ports(self, rpc_server_port, existing_forwards):
# Enable port forward for RPC server. We forward the first ten open ports
# starting from the rpc_server_port
port = rpc_server_port
while len(self.forwarded_ports_) < 10:
if port not in existing_forwards and not _is_port_in_use(port):
_check_call_verbose(
self._adb_device_sub_cmd + ["forward", f"tcp:{port}", f"tcp:{port}"]
)
self.forwarded_ports_.append(port)
port += 1
def _reverse_ports(self, rpc_tracker_port):
_check_call_verbose(
self._adb_device_sub_cmd
+ ["reverse", f"tcp:{rpc_tracker_port}", f"tcp:{rpc_tracker_port}"]
)
def _run_server_script(self):
"""Setup the ADB connection and execute the server script."""
# Collect any existing adb port forwarding to avoid duplication
# with another running process
existing_forwards = self._process_forwarded_ports()
# Enable port reverse for RPC tracker
rpc_tracker_port = self._rpc_info["rpc_tracker_port"]
rpc_server_port = self._rpc_info["rpc_server_port"]
self._reverse_ports(rpc_tracker_port)
self._forward_ports(rpc_server_port, existing_forwards)
# Run server and connect to tracker
subprocess.Popen(
self._adb_device_sub_cmd
+ ["shell", f"cd {self._workspace} && ./{ANDROID_BASH_FILE_NAME}"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def _cleanup_port_forwarding(self):
# Removed pre-defined forward/reverse rules
rpc_tracker_port = self._rpc_info["rpc_tracker_port"]
_check_call_verbose(
self._adb_device_sub_cmd + ["reverse", "--remove", f"tcp:{rpc_tracker_port}"]
)
for port in self.forwarded_ports_:
_check_call_verbose(self._adb_device_sub_cmd + ["forward", "--remove", f"tcp:{port}"])
def _terminate_remote(self):
# Send interupt to main and child processes
subprocess.Popen(
self._adb_device_sub_cmd
+ ["shell", f"pkill -l sigint -P `cat {self._workspace}/rpc_pid.txt`"]
)
subprocess.Popen(
self._adb_device_sub_cmd
+ ["shell", f"kill -s sigint `cat {self._workspace}/rpc_pid.txt`"]
)
# Wait for processes to destruct cleanly after receiving the intrupt
subprocess.Popen(self._adb_device_sub_cmd + ["shell", "sleep", "0.1s"])
# Kill process children
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"pkill -P `cat {self._workspace}/rpc_pid.txt`"]
)
# Kill main process
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"kill `cat {self._workspace}/rpc_pid.txt`"]
)
def cleanup_directory(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
subprocess.Popen(self._adb_device_sub_cmd + ["shell", f"rm -rf {self._workspace}"])
def _start_sysmon(self):
hexagon_sdk_root = os.environ.get("HEXAGON_SDK_ROOT", default="")
subprocess.call(
self._adb_device_sub_cmd
+ ["push", f"{hexagon_sdk_root}/tools/utils/sysmon/sysMonApp", "/data/local/tmp/"]
)
sysmon_process = subprocess.Popen(
self._adb_device_sub_cmd
+ [
"shell",
"/data/local/tmp/sysMonApp profiler --debugLevel 0 --samplePeriod 1 --q6 cdsp",
],
stdin=subprocess.PIPE,
)
return sysmon_process
def _stop_sysmon(self):
if self._sysmon_process is not None:
self._sysmon_process.communicate(input=b"\n")
self._sysmon_process = None
def _retrieve_sysmon(self):
pathlib.Path("./sysmon_output/").mkdir(exist_ok=True)
subprocess.call(
self._adb_device_sub_cmd + ["pull", "/sdcard/sysmon_cdsp.bin", "./sysmon_output/"]
)
subprocess.call(self._adb_device_sub_cmd + ["root"])
hexagon_sdk_root = os.environ.get("HEXAGON_SDK_ROOT", default="")
subprocess.call(
f"{hexagon_sdk_root}/tools/utils/sysmon/parser_linux_v2/HTML_Parser/sysmon_parser "
+ "./sysmon_output/sysmon_cdsp.bin --outdir ./sysmon_output/",
shell=True,
)
def _clear_debug_logs(self):
subprocess.call(self._adb_device_sub_cmd + ["shell", "logcat", "-c"])
def _retrieve_debug_logs(self):
run_start_time = subprocess.check_output(
self._adb_device_sub_cmd
+ [
"shell",
"stat",
f"{self._workspace}/android_bash.sh | grep 'Change' | grep -oe '[0-9].*'",
]
)
run_start_time = run_start_time[:-1].decode("UTF-8")
subprocess.call(
self._adb_device_sub_cmd
+ [
"shell",
"logcat",
"-t",
f'"{run_start_time}"',
"-f",
f"{self._workspace}/logcat.txt",
]
)
subprocess.call(self._adb_device_sub_cmd + ["pull", f"{self._workspace}/logcat.txt", "."])
def _print_cdsp_logs(self):
crash_count = 0
context_lines = 0
print_buffer = ""
try:
with open("./logcat.txt", "r") as f:
for line in f:
if "Process on cDSP CRASHED" in line:
if crash_count <= 5:
print(print_buffer, "\n")
context_lines = 40
print_buffer = ""
crash_count += 1
if context_lines > 0 and "platform_qdi_driver" in line:
context_lines -= 1
print_buffer += line[80:]
if crash_count <= 5:
print(print_buffer, "\n")
print(
f"There were {crash_count} crashes on the cDSP during execution... "
+ "Crash printing is limited to the first 5."
)
except FileNotFoundError:
print("Unable to parse logcat file.")
def start_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
self._copy_binaries()
if self._sysmon_profile:
self._sysmon_process = self._start_sysmon()
self._run_server_script()
if self._clear_logcat:
self._clear_debug_logs()
def stop_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
if self._sysmon_profile and self._sysmon_process is not None:
self._stop_sysmon()
self._retrieve_sysmon()
if self._hexagon_debug:
self._retrieve_debug_logs()
self._print_cdsp_logs()
self._cleanup_port_forwarding()
self._terminate_remote()
if not self._hexagon_debug:
self.cleanup_directory()
def get_profile_output(
self,
hex_profiler: HexagonProfiler,
session: Session,
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
profile_data = ""
if hex_profiler.is_lwp_enabled():
temp_dir = hex_profiler.get_temp_dir()
remote_path = hex_profiler.get_remote_path()
if not temp_dir:
raise RuntimeError("tempdir not passed")
fname = "lwp.json"
out_path = os.path.join(remote_path, fname)
profile_data = temp_dir.relpath(fname)
ret = session.get_profile_output(hex_profiler.get_mode(), fname)
if ret:
subprocess.check_call(self._adb_device_sub_cmd + ["pull", out_path, profile_data])
else:
raise RuntimeError("Error generating profile output")
elif hex_profiler.profiling_mode == "etm":
hex_profiler.pull_files_for_etm_processing(self._workspace)
else:
raise RuntimeError("Profiling not enabled")
return profile_data
class HexagonLauncherSimulator(HexagonLauncherRPC):
"""Hexagon Launcher for Hexagon simulator."""
SIMULATOR_HEXAGON_RPC_FILES = ["tvm_rpc_x86", "libhexagon_rpc_sim.so"]
def __init__(self, rpc_info: dict, workspace: Union[str, pathlib.Path] = None):
"""Configure a new HexagonLauncherSimulator
Parameters are same as for HexagonLauncherRPC.
"""
self._toolchain = os.environ.get("HEXAGON_TOOLCHAIN")
if not self._toolchain:
raise RuntimeError("Please set HEXAGON_TOOLCHAIN env variable")
self._serial_number = HEXAGON_SIMULATOR_NAME
super(HexagonLauncherSimulator, self).__init__(rpc_info, workspace, self._serial_number)
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
_check_call_verbose(["cp", str(local_path), str(remote_path)])
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]) -> pathlib.Path:
"""Abstract method implementation. See description in HexagonLauncherRPC."""
_check_call_verbose(["mkdir", "-p", str(remote_path)])
return pathlib.Path(os.path.abspath(remote_path))
def _copy_libcxx(self, dest_dir: Union[str, pathlib.Path]):
"""Copy libc++ libraries to the remote workspace."""
# Copy the v68 versions, since we don't have target information.
# The v68 ones should work everywhere on v68+.
lib_dir = os.path.join(self._toolchain, "target/hexagon/lib/v68/G0/pic")
libcxx_files = []
for entry in os.scandir(lib_dir):
if entry.is_dir() or entry.name.find(".so") == -1:
continue
if entry.name.startswith("libc++"):
libcxx_files.append(entry.name)
# Use tar to preserve the symbolic links. Libc++ libraries use the
# typical .so versioning, so that libc++.so may be a symlink to
# something else. Also, shared libraries using libc++ could be
# directly linked against some version, e.g. libc++.so.1, so make
# sure that all files are copied over. The preservation of symbolic
# links is to save disk space.
tar_in = f"tar -cf - -C {lib_dir} " + " ".join(libcxx_files)
tar_out = f"tar -xf - -C {str(dest_dir)}"
_check_call_verbose(tar_in + " | " + tar_out, shell=True)
def start_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
# Copy binaries
lib_dir = _get_hexagon_rpc_lib_dir()
for item in self.SIMULATOR_HEXAGON_RPC_FILES:
self._copy_to_remote(lib_dir / item, self._workspace / item)
# Copy libc++ from the toolchain to the workspace
self._copy_libcxx(self._workspace)
self._rpc_info["device_key"] = HEXAGON_REMOTE_DEVICE_KEY + "." + str(os.getpid())
rpc_tracker_host = self._rpc_info["rpc_tracker_host"]
rpc_tracker_port = self._rpc_info["rpc_tracker_port"]
rpc_server_port = self._rpc_info["rpc_server_port"]
device_key = self._rpc_info["device_key"]
server_exe = os.path.join(".", "tvm_rpc_x86")
args = [
"server",
f"--tracker={rpc_tracker_host}:{rpc_tracker_port}",
f"--port={rpc_server_port}",
f"--key={device_key}",
"--timeout=0",
]
# pylint: disable=unused-argument
def _terminate_handler(self, signum, *rest):
# Terminate the Popen'ed (sub)process.
os.kill(self._subprocess_pid, signal.SIGTERM)
def _start(self):
# This function will be running in a new process. It will start the RPC
# (x86) server as a subprocess of itself.
log_out = self._workspace / "stdout.txt"
log_err = self._workspace / "stderr.txt"
# Intercept the TERM signal so we can also terminate the subprocess.
signal.signal(signal.SIGTERM, lambda *a: _terminate_handler(self, *a))
with open(log_out, "w") as out, open(log_err, "w") as err:
p = subprocess.Popen(
[server_exe, *args], stdout=out, stderr=err, cwd=self._workspace
)
# Insert the pid of the subprocess in the self object.
self._subprocess_pid = p.pid
p.wait()
self._server_process = mp.Process(target=lambda *a: _start(self, *a))
self._server_process.start()
def cleanup_directory(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
def stop_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
self._server_process.terminate()
def get_profile_output(
self,
hex_profiler: HexagonProfiler,
session: Session,
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
profile_data = ""
if hex_profiler.is_lwp_enabled():
fname = "lwp.json"
profile_data = f"{self._workspace}/{fname}"
ret = session.get_profile_output(hex_profiler.get_mode(), fname)
if not ret:
raise RuntimeError("Error generating profile output")
elif hex_profiler.profiling_mode == "etm":
raise RuntimeError("ETM Profiling not supported on the simulator")
else:
raise RuntimeError("Profiling not enabled")
return profile_data
# https://stackoverflow.com/a/52872579/2689797
def _is_port_in_use(port: int) -> bool:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def farf_config_from_python_log_level(level) -> str:
"""Generates a FARF configuration string enabling logging at the specified level
Parameters
----------
level : str or int
Minimum level to log at. Must be a known Python logging level or string
(e.g., logging.INFO or "INFO")
"""
# Runtime log levels can be selectively enabled by computing a bitmask
# corresponding to the levels you want to enable. These get forwarded to
# logcat by the DSP RPC daemon. The bits for each level are:
# 0x01 - Hexagon LOW / TVM DEBUG / Python DEBUG
# 0x02 - Hexagon MEDIUM / TVM INFO / Python INFO
# 0x04 - Hexagon HIGH / TVM WARN / Python WARNING
# 0x08 - Hexagon ERROR / TVM ERROR / Python ERROR
# 0x10 - Hexagon FATAL / TVM FATAL / Python CRITICAL
# Runtime logging can also be filtered on filenames by appending a
# comma-separated list of filenames. For more information, see
# the Hexagon SDK documentation.
if level in (logging.DEBUG, "DEBUG"):
return "0x1F"
if level in (logging.INFO, "INFO"):
return "0x1E"
if level in (logging.WARNING, "WARNING"):
return "0x1C"
if level in (logging.ERROR, "ERROR"):
return "0x18"
if level in (logging.CRITICAL, "CRITICAL"):
return "0x10"
raise ValueError("Argument must be a known Python logging level or string")
# pylint: disable=invalid-name
def HexagonLauncher(
serial_number: str,
rpc_info: dict,
workspace: Union[str, pathlib.Path] = None,
hexagon_debug: bool = False,
clear_logcat: bool = False,
sysmon_profile: bool = False,
farf_config: str = farf_config_from_python_log_level(logging.INFO),
):
"""Creates a HexagonLauncher"""
if serial_number == HEXAGON_SIMULATOR_NAME:
return HexagonLauncherSimulator(rpc_info, workspace)
return HexagonLauncherAndroid(
serial_number, rpc_info, workspace, hexagon_debug, clear_logcat, sysmon_profile, farf_config
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/hexagon_profiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Define HexagonProfiler class to enable profiling for Hexagon"""
import os
import subprocess
import typing
from tvm.ir.transform import PassContext
from tvm.contrib.hexagon.profiling.process_lwp_data import process_lwp_output
from tvm.relay.backend.executor_factory import ExecutorFactoryModule
from tvm.driver.build_module import OperatorModule
from tvm.contrib import utils
class HexagonProfiler:
"""Hexagon Profiler"""
def __init__(
self,
dso_binary: str,
module: typing.Union[ExecutorFactoryModule, OperatorModule],
hexagon_server_process,
enable_debug,
):
"""Configure HexagonProfiler"""
# Save test .so to process profiling data
self._temp_dir = utils.tempdir(keep_for_debug=enable_debug)
self._dso_binary_path = self._temp_dir.relpath(dso_binary)
if isinstance(module, OperatorModule):
module.save(self._dso_binary_path)
else:
module.get_lib().save(self._dso_binary_path)
self._android_serial_number = os.environ.get("ANDROID_SERIAL_NUMBER")
self._remote_path = ""
self._logcat_path = ""
self._profiling_mode = None
config = PassContext.current().config
if self._android_serial_number is None:
raise RuntimeError("ANDROID_SERIAL_NUMBER must be set for profiling")
if ("tir.instrument_lwp", True) in config.items():
# Set profiling mode
self._profiling_mode = "lwp"
if self._android_serial_number != "simulator":
# Clear the logcat buffer and create a child process to redirect logcat output
# into a file.
launcher = hexagon_server_process["launcher"]
subprocess.check_call(launcher._adb_device_sub_cmd + ["logcat", "-c"])
self._logcat_path = self._temp_dir.relpath("logcat.log")
self._fo = open(self._logcat_path, "w")
self._proc = subprocess.Popen(
launcher._adb_device_sub_cmd + ["logcat"], stdout=self._fo
)
# Get the remote workspace on the device from where the lwp data needs to be copied.
self._remote_path = launcher._workspace
if self._profiling_mode is None:
raise RuntimeError("Profiling mode was not set or was not a valid one.")
def get_mode(self):
return self._profiling_mode
def is_lwp_enabled(self):
return self._profiling_mode == "lwp"
def get_temp_dir(self):
return self._temp_dir
def get_remote_path(self):
return self._remote_path
def get_profile_output(self, hexagon_launcher, hexagon_session):
"""Get runtime profiling data"""
prof_out = hexagon_launcher.get_profile_output(self, hexagon_session)
print("lwp json can be found at -- ", prof_out)
# Process lightweight profiling output into an easily readable csv file
# The post-processing requires following parameters:
# 1) Path of the binary file
# 2) android_serial_number
# 3) Path of the lwp json file (lwp.json) which gets created in the current directory
# 4) Path to the run log depending on the environment:
# a) For on-device runs:
# Use logcat output as the run log
# b) For simulator runs:
# Use "stdout.txt" as the run log. There is no need to specify the full path to
# "stdout.txt" as it will be inferred based on 'prof_out' location.
# 5) lwp processed output file - "lwp.csv"
#
lwp_csv = self._temp_dir.relpath("lwp.csv")
if self._android_serial_number == "simulator":
process_lwp_output(
self._dso_binary_path, self._android_serial_number, prof_out, "stdout.txt", lwp_csv
)
else:
# For on-device run
self._proc.kill() # End the child process for logcat
self._fo.close()
if os.path.exists(self._logcat_path):
process_lwp_output(
self._dso_binary_path,
self._android_serial_number,
prof_out,
self._logcat_path,
lwp_csv,
)
else:
raise RuntimeError("Error processing lwp output - missing logcat file")
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/meta_schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Meta schedule tuning utilities for Hexagon."""
import os
import tempfile
from typing import Callable, List, Optional
from tvm.contrib.popen_pool import PopenPoolExecutor
from tvm.meta_schedule.utils import cpu_count, derived_object
from tvm.meta_schedule.builder import LocalBuilder
from tvm.meta_schedule.runner import (
EvaluatorConfig,
RunnerInput,
RunnerFuture,
PyRunner,
)
from tvm.meta_schedule.runner.rpc_runner import (
default_alloc_argument,
default_run_evaluator,
RPCRunnerFuture,
)
from .build import HexagonLauncherRPC
from .tools import export_module
@derived_object
class HexagonRPCRunner(PyRunner):
"""RPCRunner for Hexagon. See the documentation of RPCRunner for more details."""
def __init__(
self,
hexagon_launcher: HexagonLauncherRPC,
evaluator_config: Optional[EvaluatorConfig] = None,
cooldown_sec: float = 0.0,
alloc_repeat: int = 1,
max_workers: Optional[int] = None,
initializer: Optional[Callable[[], None]] = None,
):
"""
Parameters
----------
hexagon_launcher : HexagonLauncherRPC
The RPC launcher for Hexagon. It is needed for creating hexagon.Session
object inside the worker function.
evaluator_config: EvaluatorConfig
The evaluator configuration.
cooldown_sec: float
The cooldown in seconds.
alloc_repeat: int
The number of times to random fill the allocation.
max_workers: Optional[int] = None
The maximum number of connections. Defaults to number of logical CPU cores.
initializer: Optional[Callable[[], None]]
The initializer function.
"""
super().__init__()
self.hexagon_launcher = hexagon_launcher
self.evaluator_config = EvaluatorConfig._normalized(evaluator_config)
self.cooldown_sec = cooldown_sec
self.alloc_repeat = alloc_repeat
if max_workers is None:
max_workers = cpu_count(logical=True)
self.pool = PopenPoolExecutor(
max_workers=max_workers,
timeout=100,
initializer=initializer,
)
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
results = []
for runner_input in runner_inputs:
future = RPCRunnerFuture(
future=self.pool.submit(
_worker_func,
self.hexagon_launcher,
self.evaluator_config,
self.alloc_repeat,
str(runner_input.artifact_path),
tuple(arg_info.as_json() for arg_info in runner_input.args_info),
),
timeout_sec=100,
)
results.append(future)
return results
def _worker_func(hexagon_launcher, evaluator_config, alloc_repeat, artifact_path, args_info):
with hexagon_launcher.create_session() as session:
device = session.device
_, remote_path = os.path.split(artifact_path)
uploaded = session.upload(artifact_path, remote_path)
rt_mod = session.load_module(uploaded)
repeated_args = default_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
costs = default_run_evaluator(
session,
rt_mod,
device,
evaluator_config,
repeated_args,
)
return costs
def get_hexagon_local_builder():
"""Return Hexagon-compatible Builder for meta schedule."""
def export_func(mod):
binary_path = export_module(mod, tempfile.mkdtemp())
return str(binary_path)
return LocalBuilder(f_export=export_func)
def get_hexagon_rpc_runner(
hexagon_launcher: HexagonLauncherRPC, number=3, repeat=1, min_repeat_ms=100
):
"""Return Hexagon-compatible RPC Runner for meta schedule.
Parameters
----------
hexagon_launcher : HexagonLauncherRPC
The RPC launcher for Hexagon.
number: int
The number of times to run this function for taking average.
We call these runs as one `repeat` of measurement.
repeat: int
The number of times to repeat the measurement.
In total, the function will be invoked (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int
Minimum repeat time in ms. if the execution latency is too short,
increase the number of runs to the given time (in ms) to reduce the measurement error.
"""
evaluator_config = EvaluatorConfig(
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
enable_cpu_cache_flush=False,
)
return HexagonRPCRunner(
hexagon_launcher,
evaluator_config,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/profiling/process_lwp_data.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import csv
import subprocess
import argparse
import os
from re import search, compile
from collections import OrderedDict
ENABLE_DEBUG = False
"""
Process lightweight profiling output and generate a CSV file with processor
cycles for the instrumented functions and loops.
Please note that some assumptions have been made while processing
the lightweight profiling output. They are as follows:
1) We don't expect profiled functions to call another profiled function.
This constraint can be relaxed if needed but it simplifies the processing
significantly without introducing any limitations for our use case.
2) For now, it's also assumed that every unique section (loop) ID has same start
and end offset which will not be true while a loop gets unrolled as it will
create multiple profiling section with the same ID. The current
implementation doesn't handle this case.
"""
def get_func_info(model_so):
"""Get all the .text sections along with their start and end offset values"""
hexagon_nm_path = os.environ["HEXAGON_TOOLCHAIN"] + "/bin/hexagon-nm"
out = subprocess.Popen(
[hexagon_nm_path, "--print-size", model_so],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdo, stde = out.communicate()
stdo = stdo.decode("utf-8")
func_info = []
for l in stdo.split("\n"):
info = {}
if search(" (T|t) ", l): # If .text section
parts = l.split(" ")
assert len(parts) == 4
info["start"] = int(parts[0], base=16)
info["end"] = int(parts[0], base=16) + int(parts[1], base=16)
info["name"] = parts[3]
func_info.append(info)
# Sort the entries in the increasing order of the start offset value.
func_info = sorted(func_info, key=lambda d: d["start"])
if ENABLE_DEBUG:
print("func_info :\n ")
for f in func_info:
print(f)
return func_info
def find_func(func_info, offset):
"""For a given offset, find the function it belongs to."""
fidx = 0
lidx = len(func_info) - 1
while fidx <= lidx:
midx = (fidx + lidx) // 2
ms = func_info[midx]["start"]
me = func_info[midx]["end"]
if fidx == lidx:
assert (
offset >= ms and offset <= me
), f"Couldn't find a function for this offset: {offset}"
return fidx
else:
if offset > me:
fidx = midx + 1
elif offset < ms:
lidx = midx - 1
else:
return midx
assert False, "Possible mismatch between model .so and LWP data"
def accumulate_cycles(overall_cycles, func_cycles, func_name):
"""Accumulate function cycles"""
acc_cycles = overall_cycles[func_name]
for id in func_cycles:
assert id in acc_cycles, f"id [{id}] missing in the existing function record"
assert (
acc_cycles[id]["start"] == func_cycles[id]["start"]
), "Offset value doesn't match with the existing function record."
acc_cycles[id]["cycles"] += func_cycles[id]["cycles"]
acc_cycles[id]["count"] += func_cycles[id]["count"]
overall_cycles.update({func_name: acc_cycles})
return overall_cycles
def adjust_per_loop_counts(overall_cycles, data):
"""
Use execution count and the number of entries recorded for each function/loop
to compute the overall cycles spent on them.
"""
for func in overall_cycles:
func_cycles = overall_cycles[func]
for id in func_cycles:
exec_count = data["loop_counts"][id]
rec_count = func_cycles[id]["count"]
assert exec_count != 0, "Execution count should have been non-zero."
assert rec_count != 0, "Entry count should have been non-zero."
exec_cycles = ((int(func_cycles[id]["cycles"])) * exec_count) // rec_count
func_cycles[id]["cycles"] = exec_cycles
func_cycles[id]["count"] = exec_count
overall_cycles.update({func: OrderedDict(sorted(func_cycles.items()))})
return overall_cycles
def create_csv_report(overall_cycles, fname):
"""Create csv report"""
header = [
"function name",
"loop/function id",
"loop depth",
"start offset",
"end offset",
"pcycles",
"parent count",
]
with open(fname, "w") as f:
writer = csv.writer(f)
writer.writerow(header)
for func in overall_cycles:
func_cycles = overall_cycles[func]
data = []
root = -1
outer_most = -1
for key, value in func_cycles.items():
if value["parent"] == -1:
assert root == -1, "Can't have multiple root nodes."
root = key
data.append(func)
data.append(key)
if value["parent"] == -1:
data.append("-") # Total cycles over all invocations of this function.
elif value["parent"] == root:
data.append(0)
outer_most = key
else:
if outer_most > -1:
data.append(key - outer_most)
else:
data.append(key - value["parent"])
data.append(hex(value["start"]))
data.append(hex(value["end"]))
data.append(value["cycles"])
data.append(value["count"])
writer.writerow(data)
data.clear()
def process_data(data, func_info, so_ld_addr):
"""Process data"""
# Keep an ordered list of loop IDs as they are being visited. This is used
# to match entry and exit pairs. Once the function/loop is processed, it's
# removed from the list.
ordered_visited_list = []
# Store information regarding visited nodes as they are being processed. Once
# the function/loop is processed, it's removed from the set.
visited_set = {}
# Dictionary to store cycles for the entire model which is grouped into functions.
overall_cycles = {}
func_cycles = {}
func_idx = -1
func_name = ""
prev_func_name = ""
func_start = 0
func_end = 0
save_data = False
# Iterate over all the entries in the LWP data file and process them
# to construct a report.
for entry in data["entries"]:
id = entry["id"]
offset = entry["ret"] - so_ld_addr
# Recorded return address should fall within the function begin and end
# offsets. If not, find the function it belongs to.
if offset < func_start or offset > func_end:
prev_func_name = func_name
if ENABLE_DEBUG:
print("offset : ", offset)
print("id : ", id)
func_idx = find_func(func_info, offset)
func_name = func_info[func_idx]["name"]
func_start = func_info[func_idx]["start"]
func_end = func_info[func_idx]["end"]
if ENABLE_DEBUG:
print("func_name : ", func_name)
if save_data:
# overall_cycles = save_func_cycles(prev_func_name, overall_cycles, func_cycles, ordered_visited_list)
# Done processing the previous function, copy its info into 'overall_cycles'.
if prev_func_name not in overall_cycles:
overall_cycles[prev_func_name] = func_cycles.copy()
else:
# Accumulate cycles into existing function entry.
overall_cycles = accumulate_cycles(overall_cycles, func_cycles, prev_func_name)
# We don't allow for fused operators (functions) calling another operator.
if ENABLE_DEBUG:
print("ordered_visited_list : ", ordered_visited_list)
assert len(ordered_visited_list) == 0, (
f"\nDone processing function [{prev_func_name}] but ordered_visited_list not empty.\n"
f"\t Possible reasons -- \n"
f"\t\t1) Mismatch between model .so and json file.\n"
f"\t\t2) LWP buffer may have overflowed resulting into missing entries!"
)
func_cycles.clear()
save_data = True
if id not in visited_set: # Found 'entry' record
visited_info = {"func_idx": func_idx, "ret": offset, "cyc": entry["cyc"]}
visited_set[id] = visited_info
ordered_visited_list.append(id)
else: # Found 'exit' record
# This should be the last entry in the ordered_visited_list. If not, error out.
assert ordered_visited_list[-1] == id, (
"Problem with LWP output - Interleaved handler calls found."
f"Loop [{ordered_visited_list[-1]}] hasn't exited yet."
)
ordered_visited_list.pop()
entry_node = visited_set.pop(id)
assert (
entry_node["func_idx"] == func_idx
), f'Error - Found under a different function name : {entry_node["func_idx"]}'
cycles = entry["cyc"] - entry_node["cyc"]
parent = -1
if ordered_visited_list:
parent = int(ordered_visited_list[-1])
if id in func_cycles:
fcycles = func_cycles[id]
fcycles["cycles"] += cycles
fcycles["count"] += 1
func_cycles[id] = fcycles
else:
func_cycles[id] = {
"cycles": cycles,
"start": entry_node["ret"],
"end": offset,
"parent": parent,
"count": 1,
}
# Done processing the previous function, copy its info into 'overall_cycles'.
if func_name not in overall_cycles:
overall_cycles[func_name] = func_cycles.copy()
else:
# Accumulate cycles into existing function entry.
overall_cycles = accumulate_cycles(overall_cycles, func_cycles, func_name)
# We don't allow for fused operators (functions) calling another operator.
if ENABLE_DEBUG:
print("ordered_visited_list : ", ordered_visited_list)
assert len(ordered_visited_list) == 0, (
f"\nDone processing function [{prev_func_name}] but ordered_visited_list not empty.\n"
f"\t Possible reasons -- \n"
f"\t\t1) Mismatch between model .so and json file.\n"
f"\t\t2) LWP buffer may have overflowed resulting into missing entries!" % prev_func_name
)
overall_cycles = adjust_per_loop_counts(overall_cycles, data)
return overall_cycles
def get_load_addr(serial_number: str, lwp_json: str, run_log: str):
"""Get load address of the binary file"""
if serial_number == "simulator":
basedir = os.path.dirname(lwp_json)
if run_log is None:
run_log = os.path.join(basedir, "stdout.txt")
else:
# If the directory name is specified for the run_log of the
# simulator (stdout.txt) then it must be same as lwp_json.
run_log_dir = os.path.dirname(run_log)
assert (
run_log_dir == "" or run_log_dir == basedir
), f"stdout.txt and {os.path.basename(lwp_json)} must be in the same directory"
run_log = os.path.join(basedir, os.path.basename(run_log))
# To extract load address for the simulator run
pattern = compile(r"Model.*: (\w+):")
else:
# To extract load address for on-device run
pattern = compile(r"Model.*: (\w+)")
with open(run_log, "r") as f:
lines = f.read()
a = pattern.search(lines)
load_addr = int(a.group(1), 16)
if ENABLE_DEBUG:
print("load_addr : ", load_addr)
return load_addr
def process_lwp_output(
binary_path: str,
serial_number: str,
lwp_json: str,
run_log: str,
lwp_out: str,
enable_debug: bool = False,
):
"""Process lightweight profiling data"""
# Enable debug messages
global ENABLE_DEBUG
ENABLE_DEBUG = enable_debug
# Get load address for the binary
load_addr = get_load_addr(serial_number, lwp_json, run_log)
# Opening JSON file
with open(lwp_json, "r") as f:
# Returns JSON object as a dictionary
data = json.load(f)
# Get function names, and their start and end offsets from the model .so
func_info = get_func_info(binary_path)
# Get the load address for model .so.
so_ld_addr = load_addr
# Process profiling data to construct a CSV report.
overall_cycles = process_data(data, func_info, so_ld_addr)
create_csv_report(overall_cycles, lwp_out)
print("lwp processed output written to -- ", lwp_out)
print("[NOTE: Use '--hexagon-debug' to keep the temp directory]")
def get_args():
"""Add commandline arguments to run the script manually if needed"""
parser = argparse.ArgumentParser()
parser.add_argument("--lwp-json", help="LWP json file", required=True)
parser.add_argument("--serial-num", help="device-id/simulator", required=True)
parser.add_argument("--test-so", help="Test shared library", required=True)
parser.add_argument(
"--run-log",
help="Logcat file for on-device run and stdout.txt for simulator run",
required=True,
)
parser.add_argument("--lwp-out", help="LWP output file name", required=True)
parser.add_argument(
"--debug",
help="Enable debug output from the script",
dest="debug",
action="store_true",
required=False,
)
parser.set_defaults(debug=False)
args = parser.parse_args()
global ENABLE_DEBUG
ENABLE_DEBUG = args.debug
return args
if __name__ == "__main__":
args = get_args()
process_lwp_output(
args.test_so, args.serial_num, args.lwp_json, args.run_log, args.lwp_out, args.debug
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/pytest_plugin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,redefined-outer-name
""" Hexagon testing fixtures used to deduce testing argument
values from testing parameters """
import os
import random
from typing import Optional, Union
import pytest
import tvm
import tvm.rpc.tracker
from tvm.contrib.hexagon.build import HexagonLauncher, HexagonLauncherRPC
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon.tools import HEXAGON_SIMULATOR_NAME
HEXAGON_TOOLCHAIN = "HEXAGON_TOOLCHAIN"
TVM_TRACKER_HOST = "TVM_TRACKER_HOST"
TVM_TRACKER_PORT = "TVM_TRACKER_PORT"
ANDROID_REMOTE_DIR = "ANDROID_REMOTE_DIR"
ANDROID_SERIAL_NUMBER = "ANDROID_SERIAL_NUMBER"
ADB_SERVER_SOCKET = "ADB_SERVER_SOCKET"
RNG_SEEDED = False
HEXAGON_AOT_LLVM_TARGET = (
"llvm -keys=hexagon "
"-mattr=+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp "
"-mcpu=hexagonv68 -mtriple=hexagon"
)
@tvm.testing.fixture
def shape_nhwc(batch, in_channel, in_size):
return (batch, in_size, in_size, in_channel)
def _compose(args, decs):
"""Helper to apply multiple markers"""
if len(args) > 0:
func = args[0]
for dec in reversed(decs):
func = dec(func)
return func
return decs
requires_hexagon_toolchain = tvm.testing.requires_hexagon(support_required="compile-only")
def android_serial_number() -> Optional[str]:
"""Return the android serial number"""
serial = os.getenv(ANDROID_SERIAL_NUMBER, default="")
# Setting ANDROID_SERIAL_NUMBER to an empty string should be
# equivalent to having it unset.
if not serial.strip():
return None
# Split android serial numbers into a list
serial = serial.split(",")
return serial
# NOTE on server ports:
# These tests use different port numbers for the RPC server (7070 + ...).
# The reason is that an RPC session cannot be gracefully closed without
# triggering TIME_WAIT state on the server socket. This prevents another
# server to bind to the same port until the wait time elapses.
LISTEN_PORT_MIN = 6000 # Avoid hitting well-known Android debug ports
LISTEN_PORT_MAX = 9000 # Below the search range end (port_end=9199) of RPC server
PREVIOUS_PORT = None
def get_free_port() -> int:
"""Return the next port that is available to listen on"""
global PREVIOUS_PORT
global RNG_SEEDED
if tvm.testing.utils.IS_IN_CI and not RNG_SEEDED:
random.seed(0)
RNG_SEEDED = True
if PREVIOUS_PORT is None:
port = random.randint(LISTEN_PORT_MIN, LISTEN_PORT_MAX)
else:
port = PREVIOUS_PORT + 1
if port > LISTEN_PORT_MAX:
port = LISTEN_PORT_MIN
while tvm.contrib.hexagon.build._is_port_in_use(port):
port = port + 1 if port < LISTEN_PORT_MAX else LISTEN_PORT_MIN
PREVIOUS_PORT = port
return port
@pytest.fixture(scope="session")
def _tracker_info() -> Union[str, int]:
env_tracker_host = os.getenv(TVM_TRACKER_HOST, default="")
env_tracker_port = os.getenv(TVM_TRACKER_PORT, default="")
if env_tracker_host or env_tracker_port:
# A tracker is already running, and we should connect to it
# when running tests.
assert env_tracker_host, "TVM_TRACKER_PORT is defined, but TVM_TRACKER_HOST is not"
assert env_tracker_port, "TVM_TRACKER_HOST is defined, but TVM_TRACKER_PORT is not"
env_tracker_port = int(env_tracker_port)
try:
tvm.rpc.connect_tracker(env_tracker_host, env_tracker_port)
except RuntimeError as exc:
message = (
"Could not connect to external tracker "
"specified by $TVM_TRACKER_HOST and $TVM_TRACKER_PORT "
f"({env_tracker_host}:{env_tracker_port})"
)
raise RuntimeError(message) from exc
yield (env_tracker_host, env_tracker_port)
else:
# No tracker is provided to the tests, so we should start one
# for the tests to use.
tracker = tvm.rpc.tracker.Tracker("127.0.0.1", get_free_port())
try:
yield (tracker.host, tracker.port)
finally:
tracker.terminate()
@pytest.fixture(scope="session")
def tvm_tracker_host(_tracker_info) -> str:
host, _ = _tracker_info
return host
@pytest.fixture(scope="session")
def tvm_tracker_port(_tracker_info) -> int:
_, port = _tracker_info
return port
@pytest.fixture(scope="session")
def rpc_server_port_for_session() -> int:
return get_free_port()
@pytest.fixture()
def rpc_server_port() -> int:
return get_free_port()
@pytest.fixture(scope="session")
def adb_server_socket() -> str:
return os.getenv(ADB_SERVER_SOCKET, default="tcp:5037")
@pytest.fixture(scope="session")
def hexagon_server_process(
request,
rpc_server_port_for_session,
adb_server_socket,
skip_rpc,
hexagon_debug,
sysmon_profile,
clear_logcat,
) -> HexagonLauncherRPC:
"""Initials and returns hexagon launcher if ANDROID_SERIAL_NUMBER is defined.
This launcher is started only once per test session.
"""
android_serial_num = android_serial_number()
if android_serial_num is None:
pytest.skip("ANDROID_SERIAL_NUMBER is not set.")
if android_serial_num == [HEXAGON_SIMULATOR_NAME]:
yield None
else:
# Requesting these fixtures sets up a local tracker, if one
# hasn't been provided to us. Delaying the evaluation of
# these fixtures avoids starting a tracker unless necessary.
tvm_tracker_host = request.getfixturevalue("tvm_tracker_host")
tvm_tracker_port = request.getfixturevalue("tvm_tracker_port")
rpc_info = {
"rpc_tracker_host": tvm_tracker_host,
"rpc_tracker_port": tvm_tracker_port,
"rpc_server_port": rpc_server_port_for_session,
"adb_server_socket": adb_server_socket,
}
workerinput = getattr(request.config, "workerinput", None)
if workerinput is None: # single-process execution
device_adr = read_device_list()[0]
else: # running in a subprocess here
device_adr = workerinput["device_adr"]
launcher = HexagonLauncher(
serial_number=device_adr,
rpc_info=rpc_info,
hexagon_debug=hexagon_debug,
sysmon_profile=sysmon_profile,
clear_logcat=clear_logcat,
)
try:
if not skip_rpc:
launcher.start_server()
yield {"launcher": launcher, "device_adr": device_adr}
finally:
if not skip_rpc:
launcher.stop_server()
def read_device_list():
return android_serial_number()
def pytest_configure(config):
# read device list if we are on the master
if not hasattr(config, "workerinput"):
config.iplist = read_device_list()
def pytest_configure_node(node):
# the master for each node fills node input dictionary
# which pytest-xdist will transfer to the subprocess
if node.config.iplist is not None:
node.workerinput["device_adr"] = node.config.iplist.pop()
@pytest.fixture
def hexagon_launcher(
hexagon_server_process,
rpc_server_port,
tvm_tracker_host,
tvm_tracker_port,
adb_server_socket,
hexagon_debug,
sysmon_profile,
clear_logcat,
) -> HexagonLauncherRPC:
"""Initials and returns hexagon launcher which reuses RPC info and Android serial number."""
android_serial_num = android_serial_number()
if android_serial_num != [HEXAGON_SIMULATOR_NAME]:
rpc_info = hexagon_server_process["launcher"]._rpc_info
else:
rpc_info = {
"rpc_tracker_host": tvm_tracker_host,
"rpc_tracker_port": tvm_tracker_port,
"rpc_server_port": rpc_server_port,
"adb_server_socket": adb_server_socket,
}
try:
if android_serial_num == [HEXAGON_SIMULATOR_NAME]:
launcher = HexagonLauncher(serial_number=android_serial_num[0], rpc_info=rpc_info)
launcher.start_server()
else:
launcher = HexagonLauncher(
serial_number=hexagon_server_process["device_adr"],
rpc_info=rpc_info,
hexagon_debug=hexagon_debug,
sysmon_profile=sysmon_profile,
clear_logcat=clear_logcat,
)
yield launcher
finally:
if android_serial_num == [HEXAGON_SIMULATOR_NAME]:
launcher.stop_server()
elif not hexagon_debug:
launcher.cleanup_directory()
@pytest.fixture
def hexagon_session(hexagon_launcher: HexagonLauncherRPC) -> Session:
if hexagon_launcher is None:
yield None
else:
with hexagon_launcher.create_session() as session:
yield session
# If the execution aborts while an RPC server is running, the python
# code that is supposed to shut it down will never execute. This will
# keep pytest from terminating (indefinitely), so add a cleanup
# fixture to terminate any still-running servers.
@pytest.fixture(scope="session", autouse=True)
def terminate_rpc_servers():
# Since this is a fixture that runs regardless of whether the
# execution happens on simulator or on target, make sure the
# yield happens every time.
serial = os.environ.get(ANDROID_SERIAL_NUMBER)
yield []
if serial == [HEXAGON_SIMULATOR_NAME]:
os.system("ps ax | grep tvm_rpc_x86 | awk '{print $1}' | xargs kill")
aot_host_target = tvm.testing.parameter("c", HEXAGON_AOT_LLVM_TARGET)
@tvm.testing.fixture
def aot_target(aot_host_target):
if aot_host_target == "c":
yield tvm.target.hexagon("v68")
elif aot_host_target.startswith("llvm"):
yield aot_host_target
else:
assert False, "Incorrect AoT host target: {aot_host_target}. Options are [c, llvm]."
@pytest.fixture(scope="session")
def skip_rpc(request) -> bool:
return request.config.getoption("--skip-rpc")
@pytest.fixture(scope="session")
def hexagon_debug(request) -> bool:
return request.config.getoption("--hexagon-debug")
@pytest.fixture(scope="session")
def sysmon_profile(request) -> bool:
return request.config.getoption("--sysmon-profile")
@pytest.fixture(scope="session")
def clear_logcat(request) -> bool:
return request.config.getoption("--clear-logcat")
def pytest_addoption(parser):
"""Add pytest options."""
parser.addoption(
"--skip-rpc",
action="store_true",
default=False,
help="If set true, the RPC server initialization on Android would be skipped",
)
parser.addoption(
"--hexagon-debug",
action="store_true",
default=False,
help="If set true, it will keep the hexagon test directories on the target. "
+ "Additionally logcat logs will be copied from device and cdsp errors printed out.",
)
parser.addoption(
"--sysmon-profile",
action="store_true",
default=False,
help="If set true, it will run sysmon profiler during the tests.",
)
parser.addoption(
"--clear-logcat",
action="store_true",
default=False,
help="If set true, it will clear logcat before execution.",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/session.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a Session class for Hexagon devices."""
import os
import pathlib
import tempfile
from typing import Union
import tvm
from tvm import rpc as _rpc
import tvm.contrib.hexagon as hexagon
from tvm.relay.backend.executor_factory import (
ExecutorFactoryModule,
AOTExecutorFactoryModule,
GraphExecutorFactoryModule,
)
from .tools import export_module, HEXAGON_SIMULATOR_NAME
class Session:
"""Hexagon Device Session
Parameters
----------
remote_workspace : Union[str, pathlib.Path]
Remote workspace path
rpc_tracker : tuple(str, int)
RPC tracker host and port number.
rpc_server_key : str
RPC server key on remote device.
serial_number : str
Device serial number. `simulator` used for hexagon simulator.
session_name : str
Hexagon RPC session name.
remote_stack_size_bytes : int
The stack size of the remote device, to be passed to
tvm.contrib.hexagon.create_hexagon_session.
rpc_receive_buffer_size_bytes : int
RPC receive buffer size in bytes.
"""
def __init__(
self,
remote_workspace: Union[str, pathlib.Path],
rpc_tracker: tuple,
rpc_server_key: str,
serial_number: str,
session_name: str = "hexagon-rpc",
remote_stack_size_bytes: int = 256 * 1024, # Min size for main thread in QuRT/sim
rpc_receive_buffer_size_bytes: int = 256 * 1024 * 1024, # Size for passing hexagon tests
):
self._workspace = str(remote_workspace)
self._rpc_tracker = rpc_tracker
self._rpc_server_key = rpc_server_key
self._serial_number = serial_number
self._session_name: str = session_name
self._remote_stack_size_bytes: int = remote_stack_size_bytes
self._rpc_receive_buffer_size_bytes: int = rpc_receive_buffer_size_bytes
self._rpc = None
self._requires_cpu_device = False
self._device = None
def __enter__(self):
if self._rpc:
# Already initialized
return self
tracker = _rpc.connect_tracker(self._rpc_tracker[0], self._rpc_tracker[1])
try:
self._rpc = tracker.request(
self._rpc_server_key,
priority=0,
session_timeout=0,
session_constructor_args=[
"tvm.contrib.hexagon.create_hexagon_session",
self._session_name,
self._remote_stack_size_bytes,
os.environ.get("HEXAGON_SIM_ARGS", ""),
self._rpc_receive_buffer_size_bytes,
],
)
func = self._rpc.get_function("device_api.hexagon.acquire_resources")
func()
return self
except RuntimeError as exception:
raise exception
def __exit__(self, exc_type, exc_value, exc_traceback):
try:
func = self._rpc.get_function("device_api.hexagon.release_resources")
func()
except RuntimeError as exception:
print(
"Exception occurred while calling release_resources() during Session __exit__: ",
exception,
)
finally:
# close session to the tracker
shutdown_func = self._rpc._sess.get_function("CloseRPCConnection")
shutdown_func()
del self._rpc
@property
def device(self):
"""Session device."""
if self._device is not None:
return self._device
if self._requires_cpu_device:
self._device = self._rpc.cpu(0)
else:
self._device = self._rpc.hexagon(0)
return self._device
def is_simulator(self):
return self._serial_number == HEXAGON_SIMULATOR_NAME
def get_function(self, name):
return self._rpc.get_function(name)
def upload(self, local_path: Union[str, pathlib.Path], remote_filename: str) -> pathlib.Path:
"""Upload a local file to the remote workspace.
Parameters
----------
local_path : str or pathlib.Path
Path to the local file to be copied.
remote_filename : str
Name of the file in the remote workspace.
Returns
-------
pathlib.Path :
Uploaded file remote path.
"""
upload_func = self._rpc.get_function("tvm.rpc.server.upload")
remote_path = f"{self._workspace}/{remote_filename}"
with open(local_path, mode="rb") as src_f:
data = bytearray(src_f.read())
upload_func(remote_path, data)
return remote_path
def load_module(self, module: Union[str, pathlib.Path, tvm.runtime.Module]):
"""Load TVM module.
The session must be established (via __enter__) prior to
calling this function.
Parameters
----------
module : Union[str, pathlib.Path, tvm.runtime.Module]
The module to load. If `module` is a
`tvm.runtime.Module`, it will be uploaded to the remote
session and loaded.
If the object passed is a string or pathlib.Path, it must
be a full path in the remote system.
Returns
-------
TVMModule :
TVM module object.
"""
assert self._rpc is not None, "Hexagon session must be started using __enter__ prior to use"
if isinstance(module, tvm.runtime.Module):
with tempfile.TemporaryDirectory() as temp_dir:
binary_name = "test_binary.so"
binary_path = export_module(module, temp_dir, binary_name)
remote_file_path = self.upload(binary_path, binary_name)
else:
remote_file_path = module
assert isinstance(remote_file_path, (str, pathlib.Path)), "Invalid path type:" + str(
type(remote_file_path)
)
return self._rpc.get_function("tvm.hexagon.load_module")(str(remote_file_path))
def get_graph_executor(
self,
graph_json: str,
module_name: Union[str, pathlib.Path, tvm.runtime.Module],
):
"""Create a local GraphModule which consumes a remote libmod.
The session must be established (via __enter__) prior to
calling this function.
Parameters
----------
module_name : Union[str, pathlib.Path, tvm.runtime.Module]
The remote module filename, following the same restrictions
as `load_module`.
graph_json : str
The string with the graph JSON.
Returns
-------
GraphModule :
Runtime graph module that can be used to execute the graph.
"""
graph_mod = self.load_module(module_name)
self._set_device_type(graph_mod)
return tvm.contrib.graph_executor.create(graph_json, graph_mod, self.device)
def get_aot_executor(
self,
module_file: Union[str, pathlib.Path],
):
"""Create a local GraphModule which consumes a remote libmod.
The session must be established (via __enter__) prior to
calling this function.
Parameters
----------
module_file : Union[str, pathlib.Path]
The remote module filename, following the same restrictions
as `load_module`. The filename should be an absolute path.
Returns
-------
GraphModule :
Runtime graph module that can be used to execute the graph.
"""
aot_mod = self.load_module(module_file)
return tvm.runtime.executor.AotModule(aot_mod["default"](self.device))
def get_graph_debug_executor(
self,
graph_json: str,
module_name: Union[str, pathlib.Path, tvm.runtime.Module],
dump_root: Union[str, pathlib.Path] = None,
):
"""Create a local GraphModuleDebug which consumes a remote libmod.
Parameters
----------
graph_json : str
The string with the graph JSON.
module_name : Union[str, pathlib.Path, tvm.runtime.Module]
The remote module filename, following the same restrictions
as `load_module`.
session : Session
Remote session. The session must be established (via __enter__)
prior to calling this function.
Returns
-------
GraphModuleDebug :
Runtime debug graph module that can be used to debug the graph.
"""
graph_debug_mod = self.load_module(module_name)
self._set_device_type(graph_debug_mod)
return tvm.contrib.debugger.debug_executor.create(
graph_json, graph_debug_mod, self.device, dump_root=str(dump_root)
)
def get_executor_from_factory(self, module: ExecutorFactoryModule):
"""Create a local GraphModule which consumes a remote libmod.
Parameters
----------
module : ExecutorFactoryModule
The module to upload to the remote
session and load.
"""
if isinstance(module, AOTExecutorFactoryModule):
return self._aot_executor_from_factory(module)
if isinstance(module, GraphExecutorFactoryModule):
return self._graph_executor_from_factory(module)
raise TypeError(f"Unsupported executor type: {type(module)}")
def _set_device_type(self, module: Union[str, pathlib.Path, GraphExecutorFactoryModule]):
"""Set session device type(hexagon, cpu) based on target in module.
Parameters
----------
module: TVMModule
TVM module object.
"""
# for cases when module is a single schedule without target attribute.
if not hasattr(module, "target"):
self._requires_cpu_device = False
else:
assert len(module.target) == 1
for target in module.target:
target_type = str(target).split()[0]
if target_type == "llvm":
self._requires_cpu_device = True
else:
self._requires_cpu_device = False
def _graph_executor_from_factory(
self,
module: Union[str, pathlib.Path, GraphExecutorFactoryModule],
):
"""Create a local GraphModule which consumes a remote libmod.
The session must be established (via __enter__) prior to
calling this function.
Parameters
----------
module : GraphExecutorFactoryModule
The graph executor module to upload to the remote and load.
This will typically be the output of `tvm.relay.build`,
when passing `executor=Executor("graph")`.
Returns
-------
GraphModule :
Runtime graph module that can be used to execute the graph.
"""
return self.get_graph_executor(module.get_graph_json(), module.get_lib())
def _aot_executor_from_factory(
self,
module: Union[str, pathlib.Path, AOTExecutorFactoryModule],
):
"""Create a local GraphModule which consumes a remote libmod.
The session must be established (via __enter__) prior to
calling this function.
Parameters
----------
module : AOTExecutorFactoryModule
The graph executor module to upload to the remote and load.
This will typically be the output of `tvm.relay.build`,
when passing `executor=Executor("aot")`.
Returns
-------
GraphModule :
Runtime graph module that can be used to execute the graph.
"""
hexagon_arch = set(
target.mcpu.replace("hexagon", "")
for target in module.target
if "hexagon" in target.keys
)
self._set_device_type(module)
for target in module.target:
target_type = str(target).split()[0]
assert hexagon_arch, "No hexagon target architecture found"
assert len(hexagon_arch) == 1, f"Inconsistent hexagon architecture found, {hexagon_arch}"
hexagon_arch = hexagon_arch.pop()
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = pathlib.Path(temp_dir)
binary_name = "test_binary.so"
binary_path = temp_dir / binary_name
if target_type == "hexagon":
module.export_library(
str(binary_path),
fcompile=hexagon.create_aot_shared,
hexagon_arch=hexagon_arch,
)
elif target_type == "llvm":
module.export_library(
str(binary_path),
cc=hexagon.hexagon_clang_plus(),
)
else:
raise ValueError(
f"Incorrect Target kind.\n"
f"Target kind should be from these options: [hexagon, llvm]."
)
remote_file_path = self.upload(binary_path, binary_name)
return self.get_aot_executor(remote_file_path)
def get_profile_output(self, mode: str, path: str):
assert isinstance(mode, str), f"Invalid mode type, {type(mode)} != str"
assert isinstance(path, str), f"Invalid path type, {type(path)} != str"
return self._rpc.get_function("tvm.hexagon.get_profile_output")(mode, path)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/tools.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Tools/compilers/linkers for Hexagon"""
import os
import pathlib
from typing import Union
import numpy
import tvm
import tvm.contrib.cc as cc
from ..._ffi.registry import register_func
# Linking Hexagon shared libraries.
#
# link_shared(name-of-shared-library, list-of-objects, kw-args)
#
# To use a custom linker, define a function that returns the path to the
# linker, and pass it to 'register_linker':
#
# def custom_linker_path():
# return '/path/to/hexagon/linker'
#
# register_linker(custom_linker_path)
#
# Subsequent calls to 'link_shared' will use the newly registered linker.
HEXAGON_TOOLCHAIN = os.environ.get("HEXAGON_TOOLCHAIN", default="") # pylint: disable=invalid-name
HEXAGON_SDK_ROOT = os.environ.get("HEXAGON_SDK_ROOT", default="") # pylint: disable=invalid-name
HEXAGON_LINK_MAIN = (
pathlib.Path(HEXAGON_TOOLCHAIN) / "bin" / "hexagon-link"
) # pylint: disable=invalid-name
HEXAGON_CLANG_PLUS = (
pathlib.Path(HEXAGON_TOOLCHAIN) / "bin" / "hexagon-clang++"
) # pylint: disable=invalid-name
HEXAGON_SDK_INCLUDE_DIRS = [ # pylint: disable=invalid-name
pathlib.Path(HEXAGON_SDK_ROOT) / "incs",
pathlib.Path(HEXAGON_SDK_ROOT) / "incs" / "stddef",
]
HEXAGON_SIMULATOR_NAME = "simulator"
def register_linker(f):
"""Register a function that will return the path to the Hexagon linker."""
return register_func("tvm.contrib.hexagon.hexagon_link", f, True)
@register_func("tvm.contrib.hexagon.hexagon_link")
def hexagon_link() -> str:
"""Return path to the Hexagon linker."""
return str(HEXAGON_LINK_MAIN)
def hexagon_clang_plus() -> str:
"""Return path to the Hexagon clang++."""
return str(HEXAGON_CLANG_PLUS)
@register_func("tvm.contrib.hexagon.link_shared")
def link_shared(so_name, objs, extra_args=None):
"""Link shared library on Hexagon using the registered Hexagon linker.
Parameters
----------
so_name : str
Name of the shared library file.
objs : list[str,StringImm]
extra_args : dict (str->str) or Map<String,String>
Additional arguments:
'hex_arch' - Hexagon architecture, e.g. v66
'verbose' - Print additional information if the key is present
Returns
-------
ret_val : int
This function returns 0 at the moment.
"""
# The list of object files can be passed as built-in Python strings,
# or as tvm.tir.StringImm's.
def to_str(s):
if isinstance(s, tvm.tir.StringImm):
return s.value
assert isinstance(s, str), 'argument "' + str(s) + '" should be a string or StrImm'
return s
objs = [to_str(s) for s in objs]
if not extra_args:
extra_args = {}
hex_arch = extra_args.get("hex_arch") or "v66"
linker = tvm.get_global_func("tvm.contrib.hexagon.hexagon_link")()
if extra_args.get("verbose"):
print("tvm.contrib.hexagon.link_shared:")
print(" Using linker:", linker)
print(" Library name:", so_name)
print(" Object files:", objs)
print(" Architecture:", hex_arch)
if not os.access(linker, os.X_OK):
message = 'The linker "' + linker + '" does not exist or is not executable.'
if not os.environ.get("HEXAGON_TOOLCHAIN"):
message += (
" The environment variable HEXAGON_TOOLCHAIN is unset. Please export "
+ "HEXAGON_TOOLCHAIN in your environment, so that ${HEXAGON_TOOLCHAIN}/bin/"
+ "hexagon-link exists."
)
else:
message += (
" Please verify the value of the HEXAGON_LINKER environment variable "
+ '(currently set to "'
+ HEXAGON_TOOLCHAIN
+ '").'
)
raise Exception(message)
libpath = os.path.join(HEXAGON_TOOLCHAIN, "target", "hexagon", "lib", hex_arch, "G0")
cc.create_shared(
so_name,
objs,
# pylint: disable=bad-whitespace
options=[
"-Bdynamic",
"-shared",
"-export-dynamic",
os.path.join(libpath, "pic", "libgcc.so"),
],
cc=linker,
)
return 0
def create_aot_shared(so_name: Union[str, pathlib.Path], files, hexagon_arch: str, options=None):
"""Export Hexagon AOT module."""
options = options or []
if not os.access(str(HEXAGON_CLANG_PLUS), os.X_OK):
raise Exception(
'The Clang++ "' + str(HEXAGON_CLANG_PLUS) + '" does not exist or is not executable.'
)
if not HEXAGON_TOOLCHAIN:
raise Exception(
" The environment variable HEXAGON_TOOLCHAIN is unset. Please export "
+ "HEXAGON_TOOLCHAIN in your environment."
)
if not HEXAGON_SDK_ROOT:
raise Exception(
" The environment variable HEXAGON_SDK_ROOT is unset. Please export "
+ "HEXAGON_SDK_ROOT in your environment."
)
# The AOT C codegen uses TVM runtime functions
# (e.g. TVMBackendAllocWorkspace) directly. On Hexagon these calls
# should be made using functions pointers provided as __TVM*
# variables in the provided context. This workaround allows the
# the TVM runtime symbols to be visible to the compiled shared
# library.
#
# This workaround can be removed when AOT codegen can be done with
# LLVM codegen.
workaround_link_flags = os.environ.get("HEXAGON_SHARED_LINK_FLAGS")
if workaround_link_flags:
options.extend(workaround_link_flags.split())
tvm_dir = pathlib.Path(os.path.dirname(os.path.realpath(__file__))) / ".." / ".." / ".." / ".."
compute_arch = f"compute{hexagon_arch}"
compile_options = [
f"-O3",
f"-I{tvm_dir / 'include'}",
f"-I{tvm_dir / '3rdparty' / 'dlpack' / 'include'}",
f"-I{tvm_dir / '3rdparty' / 'dmlc-core' / 'include'}",
f"-I{pathlib.Path(HEXAGON_SDK_ROOT) / 'rtos' / 'qurt' / compute_arch / 'include'/ 'posix'}",
f"-I{pathlib.Path(HEXAGON_SDK_ROOT) / 'rtos' / 'qurt' / compute_arch / 'include' / 'qurt'}",
f"-DDMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>",
f"-D_MACH_I32=int",
]
# For debugging
for path in HEXAGON_SDK_INCLUDE_DIRS:
compile_options.append(f"-I{str(path)}")
cross_compile = cc.cross_compiler(compile_func=hexagon_clang_plus())
cross_compile.output_format = "o"
c_files = [str(file) for file in files]
cross_compile(str(so_name), c_files, options=compile_options + options)
def export_module(module, out_dir, binary_name="test_binary.so"):
"""Export Hexagon shared object to a file."""
binary_path = pathlib.Path(out_dir) / binary_name
module.save(str(binary_path))
return binary_path
def allocate_hexagon_array(
dev, tensor_shape=None, dtype=None, data=None, axis_separators=None, mem_scope=None
):
"""
Allocate a hexagon array which could be a 2D array
on physical memory defined by axis_separators
"""
if tensor_shape is None:
assert data is not None, "Must provide either tensor shape or numpy data array"
tensor_shape = data.shape
elif data is not None:
assert (
tensor_shape == data.shape
), "Mismatch between provided tensor shape and numpy data array shape"
if dtype is None:
assert data is not None, "Must provide either dtype or numpy data array"
dtype = data.dtype.name
elif data is not None:
assert dtype == data.dtype, "Mismatch between provided dtype and numpy data array dtype"
if axis_separators is None:
axis_separators = []
boundaries = [0, *axis_separators, len(tensor_shape)]
physical_shape = [
numpy.prod(tensor_shape[dim_i:dim_f])
for dim_i, dim_f in zip(boundaries[:-1], boundaries[1:])
]
arr = tvm.nd.empty(physical_shape, dtype=dtype, device=dev, mem_scope=mem_scope)
if data is not None:
arr.copyfrom(data.reshape(physical_shape))
return arr._create_view(tensor_shape)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/hexagon/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Hexagon-specific IR transformations"""
import functools as ft
import tvm
from ..._ffi.registry import register_func
### VTCM
vtcm_size = 4 * 1024 * 1024 # pylint: disable=invalid-name
@register_func("tvm.info.mem.local.vtcm")
def mem_info_vtcm():
# pylint: disable=bad-whitespace
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_num_bits=vtcm_size * 8,
max_simd_bits=128 * 8,
head_address=tvm.runtime.const(100, "uint32"),
)
def lower_vtcm_(get_alloc, get_free, def_align, func, mod, ctx): # pylint: disable=unused-argument
"""Generic VTCM allocation
Parameters
----------
get_alloc : function: tir.Allocate, int -> tir.expr (dtype='handle')
The VTCM allocation function. It takes an Allocate statement, and the required
alignment, and returns a pointer to the allocated VTCM buffer.
get_free : function: tir.expr (dtype='handle') -> None
The VTCM deallocation function. It takes the address of the allocated buffer
and frees it. It returns no value.
def_align : int
The default alignment that will be passed to the allocation function, if the
program does not specify the alignment via a 'storage_alignment' attribute.
func : tir.PrimFunc
mod : tvm.IRModule
ctx : transform.PassContext
Returns
-------
stmt : tvm.stmt
Transformed function body.
"""
vtcm_buffers = []
alignments = {}
def buf_align(var):
"""Determine the alignment of the buffer with variable 'var'."""
if var in alignments and alignments[var]:
return alignments[var][-1]
return def_align
def visit(stmt):
"""Collect information about VTCM buffers and their alignments."""
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "storage_alignment":
if not stmt.node in alignments:
alignments[stmt.node] = []
alignments[stmt.node].append(stmt.value)
elif isinstance(stmt, tvm.tir.Allocate):
scope = stmt.buffer_var.type_annotation.storage_scope
if scope == "local.vtcm":
vtcm_buffers.append(stmt.buffer_var)
def mutate(stmt):
"""Insert calls to VTCM allocation and deallocation routines."""
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "storage_alignment":
alignments[stmt.node].pop()
return stmt
if isinstance(stmt, tvm.tir.Allocate):
var = stmt.buffer_var
scope = var.type_annotation.storage_scope
is_vtcm = var in vtcm_buffers
if scope == "local.vtcm":
vtcm_buffers.pop()
if is_vtcm:
is_null = tvm.tir.call_intrin("bool", tvm.ir.Op.get("tir.isnullptr"), var)
throw_error = tvm.tir.call_intrin(
"int32", tvm.ir.Op.get("tir.tvm_throw_last_error")
)
body_w_free = tvm.tir.SeqStmt([stmt.body, tvm.tir.Evaluate(get_free(var))])
body_w_check = tvm.tir.IfThenElse(
is_null, tvm.tir.Evaluate(throw_error), body_w_free
)
return tvm.tir.LetStmt(
stmt.buffer_var, get_alloc(stmt, buf_align(var)), body_w_check
)
return stmt
raise ValueError("Wrong argument type (" + type(stmt) + ") to 'mutate'")
f = func.with_body(
tvm.tir.stmt_functor.ir_transform(
func.body, visit, mutate, ["tir.Allocate", "tir.AttrStmt"]
)
)
return f
def ir_lower_vtcm():
"""Create a VTCM lowering pass.
VTCM memory has to be allocated using special functions.
"""
def get_alloc(stmt, align):
assert isinstance(stmt, tvm.tir.Allocate)
return tvm.tir.call_extern(
"handle",
"HexagonBackendAllocateVTCM",
ft.reduce(lambda x, y: x * y, stmt.extents, 1),
align,
)
def get_free(var):
return tvm.tir.call_extern("handle", "HexagonBackendFreeVTCM", var)
# pylint: disable=bad-whitespace
@tvm.tir.transform.prim_func_pass(opt_level=0, name="Lower VTCM pass")
def transform(func, mod, ctx):
return lower_vtcm_(get_alloc, get_free, 2048, func, mod, ctx)
return transform
def ir_lower_vtcm_pass():
return [(3, ir_lower_vtcm())]
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/miopen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to MIOpen library."""
# pylint: disable-msg=C0103
import ctypes
import numpy as np
import tvm
import tvm._ffi
from tvm import te
def _get_np_int32_array_handle(arr):
"""Return a void_p handle for a numpy array
Parameters
----------
arr: numpy.NDArray
source numpy array
Returns
-------
ptr: ctypes.c_void_p
pointer to the data
"""
assert arr.dtype == np.int32
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
return ctypes.cast(ptr, ctypes.c_void_p)
def conv2d_forward(
x,
w,
stride_h=1,
stride_w=1,
pad_h=0,
pad_w=0,
dilation_h=1,
dilation_w=1,
conv_mode=0,
data_type=1,
group_count=1,
):
"""Create an extern op that compute 2D convolution with MIOpen
Parameters
----------
x: Tensor
input feature map
w: Tensor
convolution weight
stride_h: int
height stride
stride_w: int
width stride
pad_h: int
height pad
pad_w: int
weight pad
dilation_h: int
height dilation
dilation_w: int
width dilation
conv_mode: int
0: miopenConvolution
1: miopenTranspose
data_type: int
0: miopenHalf (fp16)
1: miopenFloat (fp32)
group_count: int
number of groups
Returns
-------
y: Tensor
The result tensor
"""
assert 0 <= conv_mode <= 2, "0: miopenConvolution / 1: miopenTranspose / 2: miopenGroupConv"
if group_count > 1:
conv_mode = 2
oshape = np.zeros((len(x.shape)), dtype=np.int32)
xshape = x.shape
wshape = w.shape
setup_func = tvm._ffi.get_global_func("tvm.contrib.miopen.conv2d.setup")
algo = setup_func(
conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
xshape[0].value,
xshape[1].value,
xshape[2].value,
xshape[3].value,
wshape[0].value,
wshape[1].value,
wshape[2].value,
wshape[3].value,
group_count,
_get_np_int32_array_handle(oshape),
)
return te.extern(
list(oshape),
[x, w],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.miopen.conv2d.forward",
conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
algo,
ins[0],
ins[1],
outs[0],
),
name="y",
)
def softmax(x, axis=-1):
"""Compute softmax with MIOpen
Parameters
----------
x : tvm.te.Tensor
The input tensor
axis : int
The axis to compute softmax over
Returns
-------
ret : tvm.te.Tensor
The result tensor
"""
return te.extern(
x.shape,
[x],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.miopen.softmax.forward", ins[0], outs[0], axis
),
name="y",
)
def log_softmax(x, axis=-1):
"""Compute log softmax with MIOpen
Parameters
----------
x : tvm.te.Tensor
The input tensor
axis : int
The axis to compute log softmax over
Returns
-------
ret : tvm.te.Tensor
The result tensor
"""
return te.extern(
x.shape,
[x],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.miopen.log_softmax.forward", ins[0], outs[0], axis
),
name="y",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/mkl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to BLAS libraries."""
import tvm
from tvm import te
def matmul(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs,
)
def matmul_u8s8s32(lhs, rhs, transa=False, transb=False, **kwargs):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
n = lhs.shape[1] if transa else lhs.shape[0]
m = rhs.shape[0] if transb else rhs.shape[1]
return te.extern(
(n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.matmul_u8s8s32", ins[0], ins[1], outs[0], transa, transb
),
name="C",
**kwargs,
)
def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs):
"""Create an extern op that compute batched matrix mult of A and rhs with mkl
This function serves as an example on how to call external libraries.
Parameters
----------
lhs: Tensor
The left matrix operand
rhs: Tensor
The right matrix operand
transa: bool
Whether transpose lhs
transb: bool
Whether transpose rhs
Returns
-------
C: Tensor
The result tensor.
"""
b = te.max(lhs.shape[0], rhs.shape[0])
n = lhs.shape[2] if transa else lhs.shape[1]
m = rhs.shape[1] if transb else rhs.shape[2]
return te.extern(
(b, n, m),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mkl.batch_matmul"
if not iterative
else "tvm.contrib.mkl.batch_matmul_iterative",
ins[0],
ins[1],
outs[0],
transa,
transb,
),
name="C",
**kwargs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/mps.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to MPS libraries."""
import tvm
from tvm import te
# pylint: disable=C0103,W0612
def matmul(lhs, rhs, transa=False, transb=False):
"""Create an extern op that compute matrix mult of A and rhs with CrhsLAS
This function serves as an example on how to calle external libraries.
Parameters
----------
lhs : Tensor
The left matrix operand
rhs : Tensor
The right matrix operand
transa : bool
Whether transpose lhs
transb : bool
Whether transpose rhs
Returns
-------
C : Tensor
The result tensor.
"""
m = lhs.shape[0] if transa is False else lhs.shape[1]
n = rhs.shape[1] if transb is False else rhs.shape[0]
if transa:
m = b
if transb:
n = c
return te.extern(
(m, n),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mps.matmul", ins[0], ins[1], outs[0], transa, transb
),
name="C",
)
def conv2d(data, weight, pad="SAME", stride=1):
"""
Create an extern op that compute data * weight and return result in output
Parameters:
----------
data: Tensor
The input data, format NHWC
weight: Tensor
The conv weight, format output_feature * kH * kW * input_feature
pad: str
Padding method, 'SAME' or 'VALID'
stride: int
convolution stride
Returns
-------
output: Tensor
The result tensor
"""
n, hi, wi, ci = data.shape
co, kh, kw, ciw = weight.shape
padding = 0 if pad == "SAME" else 1
ho = hi // stride
wo = wi // stride
return te.extern(
(n, ho, wo, co),
[data, weight],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.mps.conv2d", ins[0], ins[1], outs[0], padding, stride
),
name="C",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MXNet bridge wrap Function MXNet's async function."""
from __future__ import absolute_import as _abs
import tvm._ffi.registry
import tvm.runtime._ffi_api
from tvm.runtime import Module
# pylint: disable=invalid-name
_wrap_async = None
def to_mxnet_func(func, const_loc=None):
"""Wrap a TVM function as MXNet function
MXNet function runs asynchrously via its engine.
Parameters
----------
func : Function
A TVM function that can take positional arguments
const_loc : list of int
List of integers indicating the argument position
of read only NDArray argument.
The NDArray argument location that are not annotated
will be viewed as mutable arrays in MXNet's engine.
Returns
-------
async_func : Function
A function that can take MXNet NDArray as argument
in places that used to expect TVM NDArray.
Run asynchrously in MXNet's async engine.
"""
# only import mxnet when wrap get called.
# pylint: disable=import-self, import-outside-toplevel
import mxnet
if isinstance(func, Module):
func = func.entry_func
def _get_bridge_func():
"""Get MXNet bridge function"""
if not mxnet.base._LIB.MXTVMBridge:
raise RuntimeError(
"MXTVMBridge not exist in mxnet package," " please update to latest version"
)
fdict = tvm._ffi.registry.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge)
ret = fdict["WrapAsyncCall"]
ret.is_global = True
return ret
global _wrap_async
if _wrap_async is None:
# Register extension type in first time
_wrap_async = _get_bridge_func()
tvm._ffi.registry.register_extension(mxnet.nd.NDArray)
const_loc = const_loc if const_loc else []
return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream, len(const_loc), *const_loc)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/ndk.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke NDK compiler toolchain."""
# pylint: disable=invalid-name
from __future__ import absolute_import as _abs
import subprocess
import os
from .._ffi.base import py_str
from .cc import get_target_by_dump_machine
def create_shared(output, objects, options=None):
"""Create shared library.
Parameters
----------
output : str
The target shared library.
objects : list
List of object files.
options : list of str, optional
The additional options.
"""
if "TVM_NDK_CC" not in os.environ:
raise RuntimeError(
"Require environment variable TVM_NDK_CC" " to be the NDK standalone compiler"
)
compiler = os.environ["TVM_NDK_CC"]
cmd = [compiler]
cmd += ["-o", output]
if isinstance(objects, str):
cmd += [objects]
else:
cmd += objects
options = options if options else ["-shared", "-fPIC", "-lm"]
cmd += options
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
# assign output format
create_shared.output_format = "so"
create_shared.get_target_triple = (
get_target_by_dump_machine(os.environ["TVM_NDK_CC"]) if "TVM_NDK_CC" in os.environ else None
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/nnpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to NNPACK libraries."""
import tvm
from tvm import te
import tvm._ffi
def is_available():
"""Check whether NNPACK is available, that is, `nnp_initialize()`
returns `nnp_status_success`.
"""
return _initialize() == 0
def fully_connected_inference(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 1D tensor lhs and
2D tensor rhs with nnpack.
Parameters
----------
lhs : Tensor
lhs 1D array input[input_channels] of FP32 elements
rhs : Tensor
lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
Returns
-------
C : Tensor
lhs 1D array out[output_channels] of FP32 elements.
"""
m = rhs.shape[0]
return te.extern(
(m,),
[lhs, rhs],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.fully_connected_inference", ins[0], ins[1], outs[0], nthreads
),
name="C",
)
class ConvolutionAlgorithm:
AUTO = 0
FFT_8x8 = 1
FFT_16x16 = 2
WT_8x8 = 3
IMPLICIT_GEMM = 4
DIRECT = 5
WT_8x8_FP16 = 6
class ConvolutionTransformStrategy:
COMPUTE = 1
PRECOMPUTE = 2
def convolution_inference(
data, kernel, bias, padding, stride, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO
):
"""Create an extern op to do inference convolution of 4D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, kernel_height, kernel_width = kernel.shape
idxdiv = te.indexdiv
output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return te.extern(
(batch, output_channels, output_height, output_width),
[data, kernel, bias] if bias is not None else [data, kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0],
padding[0],
padding[1],
padding[2],
padding[3],
stride[0],
stride[1],
nthreads,
algorithm,
),
name="C",
)
def convolution_inference_without_weight_transform(
data, transformed_kernel, bias, padding, stride, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO
):
"""Create an extern op to do inference convolution of 4D tensor data and
4D pre-transformed tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch][input_channels][input_height][input_width] of
FP32 elements.
transformed_kernel : Tensor
transformed_kernel 4D tensor kernel[output_channels][input_channels][tile]
[tile] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 4D tensor output[batch][output_channels][output_height][output_width]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
batch, _, input_height, input_width = data.shape
output_channels, _, _, _ = transformed_kernel.shape
kernel_height, kernel_width = (3, 3)
idxdiv = te.indexdiv
output_height = idxdiv(input_height + padding[0] + padding[1] - kernel_height, stride[0]) + 1
output_width = idxdiv(input_width + padding[0] + padding[1] - kernel_width, stride[1]) + 1
return te.extern(
(batch, output_channels, output_height, output_width),
[data, transformed_kernel, bias] if bias is not None else [data, transformed_kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform",
ins[0],
ins[1],
ins[2] if bias is not None else 0,
outs[0],
padding[0],
padding[1],
padding[2],
padding[3],
stride[0],
stride[1],
nthreads,
algorithm,
),
name="C",
dtype="float32",
)
def convolution_inference_weight_transform(
kernel, nthreads=1, algorithm=ConvolutionAlgorithm.AUTO, dtype="float32"
):
"""Create an extern op to do inference convolution of 3D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
Returns
-------
output : Tensor
output 4D tensor output[output_channels][input_channels][tile][tile]
of FP32 elements.
"""
assert algorithm in (ConvolutionAlgorithm.WT_8x8, ConvolutionAlgorithm.WT_8x8_FP16)
output_channels, input_channels, _, _ = kernel.shape
transform_tile_size = 8
if not isinstance(dtype, str):
dtype = dtype.dtype
return te.extern(
(output_channels, input_channels, transform_tile_size, transform_tile_size),
[kernel],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.nnpack.convolution_inference_weight_transform",
ins[0],
outs[0],
nthreads,
algorithm,
),
name="transform_kernel",
dtype=dtype,
)
tvm._ffi._init_api("tvm.contrib.nnpack")
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/nvcc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke nvcc compiler in the system"""
from __future__ import absolute_import as _abs
import subprocess
import os
import warnings
import tvm._ffi
from tvm.target import Target
from . import utils
from .._ffi.base import py_str
def compile_cuda(code, target_format="ptx", arch=None, options=None, path_target=None):
"""Compile cuda code with NVCC from env.
Parameters
----------
code : str
The cuda code.
target_format : str
The target format of nvcc compiler.
arch : str
The cuda architecture.
options : str or list of str
The additional options.
path_target : str, optional
Output file.
Return
------
cubin : bytearray
The bytearray of the cubin
"""
if arch is None:
# If None, then it will use `tvm.target.Target.current().arch`.
# Target arch could be a str like "sm_xx", or a list, such as
# [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
compute_version = "".join(
get_target_compute_version(Target.current(allow_none=True)).split(".")
)
arch = ["-gencode", f"arch=compute_{compute_version},code=sm_{compute_version}"]
temp = utils.tempdir()
if target_format not in ["cubin", "ptx", "fatbin"]:
raise ValueError("target_format must be in cubin, ptx, fatbin")
temp_code = temp.relpath("my_kernel.cu")
temp_target = temp.relpath("my_kernel.%s" % target_format)
with open(temp_code, "w") as out_file:
out_file.write(code)
file_target = path_target if path_target else temp_target
cmd = ["nvcc"]
cmd += ["--%s" % target_format, "-O3"]
if isinstance(arch, list):
cmd += arch
elif isinstance(arch, str):
cmd += ["-arch", arch]
if options:
if isinstance(options, str):
cmd += [options]
elif isinstance(options, list):
cmd += options
else:
raise ValueError("options must be str or list of str")
cmd += ["-o", file_target]
cmd += [temp_code]
# NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler
# just in case it is not in the path. On Windows it is not in the path by default.
# However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env.
# Because it is hard to do runtime compiler detection, we require nvcc is configured
# correctly by default.
# if cxx_compiler_path != "":
# cmd += ["-ccbin", cxx_compiler_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = code
msg += "\nCompilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
with open(file_target, "rb") as f:
data = bytearray(f.read())
if not data:
raise RuntimeError("Compilation error: empty result is generated")
return data
def find_cuda_path():
"""Utility function to find cuda path
Returns
-------
path : str
Path to cuda root.
"""
if "CUDA_PATH" in os.environ:
return os.environ["CUDA_PATH"]
cmd = ["which", "nvcc"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
return os.path.realpath(os.path.join(str(out).strip(), "../.."))
cuda_path = "/usr/local/cuda"
if os.path.exists(os.path.join(cuda_path, "bin/nvcc")):
return cuda_path
raise RuntimeError("Cannot find cuda path")
def get_cuda_version(cuda_path=None):
"""Utility function to get cuda version
Parameters
----------
cuda_path : Optional[str]
Path to cuda root. If None is passed, will use
`find_cuda_path()` as default.
Returns
-------
version : float
The cuda version
"""
if cuda_path is None:
cuda_path = find_cuda_path()
version_file_path = os.path.join(cuda_path, "version.txt")
if not os.path.exists(version_file_path):
# Debian/Ubuntu repackaged CUDA path
version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt")
try:
with open(version_file_path) as f:
version_str = f.read().strip().split()[-1]
return tuple(int(field) for field in version_str.split("."))
except FileNotFoundError:
pass
cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
release_line = [l for l in out.split("\n") if "release" in l][0]
release_fields = [s.strip() for s in release_line.split(",")]
version_str = [f[1:] for f in release_fields if f.startswith("V")][0]
return tuple(int(field) for field in version_str.split("."))
raise RuntimeError("Cannot read cuda version file")
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate fatbin code for better optimization"""
ptx = compile_cuda(code, target_format="fatbin")
return ptx
@tvm._ffi.register_func("tvm_callback_libdevice_path")
def find_libdevice_path(arch):
"""Utility function to find libdevice
Parameters
----------
arch : int
The compute architecture in int
Returns
-------
path : str
Path to libdevice.
"""
cuda_path = find_cuda_path()
lib_path = os.path.join(cuda_path, "nvvm/libdevice")
if not os.path.exists(lib_path):
# Debian/Ubuntu repackaged CUDA path
lib_path = os.path.join(cuda_path, "lib/nvidia-cuda-toolkit/libdevice")
selected_ver = 0
selected_path = None
cuda_ver = get_cuda_version(cuda_path)
major_minor = (cuda_ver[0], cuda_ver[1])
if major_minor in (
(9, 0),
(9, 1),
(10, 0),
(10, 1),
(10, 2),
(11, 0),
(11, 1),
(11, 2),
(11, 3),
):
path = os.path.join(lib_path, "libdevice.10.bc")
else:
for fn in os.listdir(lib_path):
if not fn.startswith("libdevice"):
continue
try:
# expected pattern: libdevice.${ARCH}.10.bc
# e.g., libdevice.compute_20.10.bc
ver = int(fn.split(".")[-3].split("_")[-1])
if selected_ver < ver <= arch:
selected_ver = ver
selected_path = fn
except ValueError:
# it can just be `libdevice.10.bc` in CUDA 10
selected_path = fn
if selected_path is None:
raise RuntimeError("Cannot find libdevice for arch {}".format(arch))
path = os.path.join(lib_path, selected_path)
return path
def callback_libdevice_path(arch):
try:
return find_libdevice_path(arch)
except RuntimeError:
warnings.warn("Cannot find libdevice path")
return ""
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the target arch in three different places, first in the target input, then the
Target.current() scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.6")
"""
# 1. input target object
# 2. Target.current()
target = target or Target.current()
if target and target.arch:
major, minor = target.arch.split("_")[1]
return major + "." + minor
# 3. GPU compute version
if tvm.cuda(0).exist:
return tvm.cuda(0).compute_version
raise ValueError(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
def parse_compute_version(compute_version):
"""Parse compute capability string to divide major and minor version
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.0")
Returns
-------
major : int
major version number
minor : int
minor version number
"""
split_ver = compute_version.split(".")
try:
major = int(split_ver[0])
minor = int(split_ver[1])
return major, minor
except (IndexError, ValueError) as err:
# pylint: disable=raise-missing-from
raise RuntimeError("Compute version parsing error: " + str(err))
def have_fp16(compute_version):
"""Either fp16 support is provided in the compute capability or not
Parameters
----------
compute_version: str
compute capability of a GPU (e.g. "6.0")
"""
major, minor = parse_compute_version(compute_version)
# fp 16 support in reference to:
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions
if major == 5 and minor == 3:
return True
if major >= 6:
return True
return False
def have_int8(compute_version):
"""Either int8 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.1")
"""
major, _ = parse_compute_version(compute_version)
if major >= 6:
return True
return False
def have_tensorcore(compute_version=None, target=None):
"""Either TensorCore support is provided in the compute capability or not
Parameters
----------
compute_version : str, optional
compute capability of a GPU (e.g. "7.0").
target : tvm.target.Target, optional
The compilation target, will be used to determine arch if compute_version
isn't specified.
"""
if compute_version is None:
if tvm.cuda(0).exist:
compute_version = tvm.cuda(0).compute_version
else:
if target is None or "arch" not in target.attrs:
warnings.warn(
"Tensorcore will be disabled due to no CUDA architecture specified."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return False
compute_version = target.attrs["arch"]
# Compute version will be in the form "sm_{major}{minor}"
major, minor = compute_version.split("_")[1]
compute_version = major + "." + minor
major, _ = parse_compute_version(compute_version)
if major >= 7:
return True
return False
def have_cudagraph():
"""Either CUDA Graph support is provided"""
try:
cuda_ver = get_cuda_version()
if cuda_ver < (10, 0):
return False
return True
except RuntimeError:
return False
def have_bf16(compute_version):
"""Either bf16 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
major, _ = parse_compute_version(compute_version)
if major >= 8:
return True
return False
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/peak.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""measure bandwidth and compute peak"""
import logging
import tvm
from tvm import te
from tvm.target import Target
from . import utils
from .. import rpc
def _convert_to_remote(func, remote):
"""convert module function to remote rpc function"""
temp = utils.tempdir()
path_dso = temp.relpath("tmp_func.tar")
func.export_library(path_dso)
remote.upload(path_dso)
func = remote.load_module("tmp_func.tar")
return func
def measure_bandwidth_sum(
total_item,
item_per_thread,
stride,
base_type,
bits,
lanes,
target,
target_host,
remote,
dev,
n_times,
):
"""measure memory bandwidth of gpu by product reduction for a given type
The IR for measurement is
for each thread
for i in 1..num_per_thread:
y[global_id] = y[global_id] * x[base + i * stride]
Parameters
----------
total_item: int
number of elements in input array
item_per_thread: int
number of elements each thread accumulates
stride: int
stride in memory access
base_type: str
can be "int", "float"
bits: int
can be 16, 32
lanes: int
lane of the vector type, can be 1, 2, 4, 8, 16
target: :any:`tvm.target.Target`
the target and option of the compilation.
target_host : str or :any:`tvm.target.Target`
host compilation target
dev: Device
the device of array
remote: tvm.rpc.RPCSession
remote rpc session
n_times: int
number of runs for taking mean
Returns
-------
GBPS: float
gigabyte per second
"""
target, target_host = Target.canon_target_and_host(target, target_host)
n, m = total_item, item_per_thread
n //= lanes
base_type = str(base_type) + str(bits)
dtype = base_type if lanes == 1 else base_type + "x" + str(lanes)
k = te.reduce_axis((0, m), name="k")
x = te.placeholder((n,), dtype=dtype, name="x")
op = te.comm_reducer(lambda x, y: x * y, lambda t: tvm.tir.const(1, dtype=t), name="sum")
y = te.compute(
(n // m,), lambda i: op(x[i // stride * stride * m + i % stride + k * stride], axis=k)
)
s = te.create_schedule(y.op)
yo, yi = s[y].split(y.op.axis[0], target.max_num_threads)
s[y].bind(yo, te.thread_axis("blockIdx.x"))
s[y].bind(yi, te.thread_axis("threadIdx.x"))
s[y].unroll(k)
try:
func = tvm.build(s, [x, y], target)
x = tvm.nd.empty((n,), dtype=dtype, device=dev)
y = tvm.nd.empty((n // m,), dtype=dtype, device=dev)
func = _convert_to_remote(func, remote)
time_f = func.time_evaluator(func.entry_name, dev, number=n_times)
time = time_f(x, y).mean
except tvm._ffi.base.TVMError:
# build error (occur when device does not support half)
return -1
return 1.0 * (total_item * bits / 8) / 1e9 / time
def measure_bandwidth_all_types(
total_item, item_per_thread, n_times, target, target_host, remote, dev, verbose=True
):
"""measure memory bandwidth for all types
Parameters
----------
total_item: int
number of elements in input array
item_per_thread: int
number of elements each thread accmulates
n_times: int
number of runs for averaging
target: :any:`tvm.target.Target`
the target and option of the compilation.
target_host : str or :any:`tvm.target.Target`
host compilation target
remote: tvm.rpc.RPCSession
remote rpc session
dev: Device
the device of array
verbose: bool
whether outputs immediate result
Returns
-------
result: list
a list of (type_name, GBPS) pairs
"""
target, target_host = Target.canon_target_and_host(target, target_host)
max_threads = target.max_num_threads
result = []
for base_type in ["float"]:
for bits in [32]:
for lanes in [1, 2, 4, 8, 16]:
max_speed = -1e9
# try different strides
for stride in [max_threads, total_item // (lanes * item_per_thread)]:
speed = measure_bandwidth_sum(
total_item,
item_per_thread,
stride,
base_type,
bits,
lanes,
target,
target_host,
remote,
dev,
n_times,
)
max_speed = max(max_speed, speed)
type_name = base_type + str(bits)
result.append(["%sx%d" % (type_name, lanes), max_speed])
if verbose:
logging.info("\t%-10s %.2f GBPS", result[-1][0], result[-1][1])
return result
def measure_compute_mad(
total_item, item_per_thread, base_type, bits, lanes, target, target_host, remote, dev, n_times
):
"""measure peak compute speed by computing mad for a type
The IR for measurement is
for each thread
for i in 1..item_per_thread
x = mad(x, x, y)
y = mad(y, y, x)
Parameters
----------
total_item: int
number of elements in input array
item_per_thread: int
number of operations each thread does
base_type: str
can be "int", "float"
bits: int
can be 16, 32
lanes: int
lane of the vector type, can be 1, 2, 4, 8, 16
target: :any:`tvm.target.Target`
the target and option of the compilation.
target_host : str or :any:`tvm.target.Target`
host compilation target
remote: tvm.rpc.RPCSession
if it is not None, use remote rpc session
dev: Device
the device of array
n_times: int
number of runs for taking mean
Returns
-------
GOPS: float
giga operation per second
"""
target, target_host = Target.canon_target_and_host(target, target_host)
n = total_item
if bits >= 64 or lanes >= 16:
n //= 2
max_threads = target.max_num_threads
base_type = str(base_type) + str(bits)
dtype = base_type if lanes == 1 else base_type + "x" + str(lanes)
def extern(ins, outs):
# pylint: disable=unused-argument
"""construct measurement function by building IR directly"""
ib = tvm.tir.ir_builder.create()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", n // max_threads)
ib.scope_attr(tx, "thread_extent", max_threads)
idx = bx.var * max_threads + tx.var
a = ib.allocate(dtype, (1), name="a", scope="local")
b = ib.allocate(dtype, (1), name="b", scope="local")
a[0] = outs[0].vload(idx, dtype)
b[0] = outs[0].vload(idx, dtype)
if base_type.find("float") != -1:
def mad_func(x, y):
return x * x + y
else:
def mad_func(x, y):
return y * y + x
for _ in range(item_per_thread // 4 // lanes):
a[0] = mad_func(a[0], b[0])
b[0] = mad_func(b[0], a[0])
ib.emit(outs[0].vstore(idx, b[0]))
return ib.get()
y = te.extern((n,), [], extern, name="y", dtype=dtype)
s = te.create_schedule(y.op)
try:
func = tvm.build(s, [y], target)
func = _convert_to_remote(func, remote)
time_f = func.time_evaluator(func.entry_name, dev, number=n_times)
y = tvm.nd.empty((n,), dtype=dtype, device=dev)
time = time_f(y).mean
except tvm._ffi.base.TVMError:
# build error (occur when device does not support half)
return -1
return 1.0 * (n * item_per_thread) / 1e9 / time
def measure_compute_all_types(
total_item, item_per_thread, n_times, target, target_host, remote, dev, verbose=True
):
"""measure peak flops for all types
Parameters
----------
total_item: int
number of elements in input array
item_per_thread: int
number of elements each thread accmulates
n_times: int
number of runs for averaging
target: :any:`tvm.target.Target`
the target and option of the compilation.
target_host : str or :any:`tvm.target.Target`
host compilation target
remote: tvm.rpc.RPCSession
remote rpc session
dev: Device
the device of array
verbose: bool
whether outputs immediate result
Returns
-------
result: list
a list of (type_name, GFLOPS/GIOPS) pairs
"""
target, target_host = Target.canon_target_and_host(target, target_host)
result = []
for base_type in ["float", "int"]:
for bits in [16, 32, 64]:
for lanes in [1, 2, 4, 8, 16]:
if base_type == "int" and bits != 32: # only measure int32
continue
max_speed = -1e9
for per_thread in [item_per_thread // 2, item_per_thread, item_per_thread * 2]:
speed = measure_compute_mad(
total_item,
per_thread,
base_type,
bits,
lanes,
target,
target_host,
remote,
dev,
n_times,
)
max_speed = max(max_speed, speed)
type_name = base_type + str(bits)
result.append(["%sx%d" % (type_name, lanes), max_speed])
unit = "GFLOPS" if base_type == "float" else "GIOPS"
if verbose:
logging.info("\t%-10s %.2f %s", result[-1][0], result[-1][1], unit)
return result
def measure_peak_all(target, target_host, host, port):
"""measure memory bandwidth and peak compute for gpu devices
Parameters
----------
target: str or :any:`tvm.target.Target`
target_host: str
host: str
port: int
"""
target, target_host = Target.canon_target_and_host(target, target_host)
remote = rpc.connect(host, port)
n_times = 20
bandwidth_total_item = 1 << 25
bandwidth_item_per_thread = 32
compute_total_item = 1 << 21
compute_item_per_thread = 4096
if str(target).startswith("opencl"):
dev = remote.cl()
elif str(target).startswith("cuda"):
dev = remote.cuda()
elif str(target).startswith("metal"):
dev = remote.metal()
else:
raise RuntimeError("Unsupported target")
logging.info("========== measure memory bandwidth ==========")
measure_bandwidth_all_types(
bandwidth_total_item, bandwidth_item_per_thread, n_times, target, target_host, remote, dev
)
logging.info("========== measure peak compute ==========")
measure_compute_all_types(
compute_total_item, compute_item_per_thread, n_times, target, target_host, remote, dev
)
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/pickle_memoize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Memoize result of function via pickle, used for cache testcases."""
# pylint: disable=broad-except,superfluous-parens
import os
import sys
import atexit
from decorator import decorate
from .._ffi.base import string_types
try:
import cPickle as pickle
except ImportError:
import pickle
class Cache(object):
"""A cache object for result cache.
Parameters
----------
key: str
The file key to the function
save_at_exit: bool
Whether save the cache to file when the program exits
"""
cache_by_key = {}
def __init__(self, key, save_at_exit):
cache_dir = ".pkl_memoize_py{0}".format(sys.version_info[0])
try:
os.mkdir(cache_dir)
except FileExistsError:
pass
else:
self.cache = {}
self.path = os.path.join(cache_dir, key)
if os.path.exists(self.path):
try:
self.cache = pickle.load(open(self.path, "rb"))
except Exception:
self.cache = {}
else:
self.cache = {}
self.dirty = False
self.save_at_exit = save_at_exit
def save(self):
if self.dirty:
print("Save memoize result to %s" % self.path)
with open(self.path, "wb") as out_file:
pickle.dump(self.cache, out_file, pickle.HIGHEST_PROTOCOL)
@atexit.register
def _atexit():
"""Save handler."""
for value in Cache.cache_by_key.values():
if value.save_at_exit:
value.save()
def memoize(key, save_at_exit=False):
"""Memoize the result of function and reuse multiple times.
Parameters
----------
key: str
The unique key to the file
save_at_exit: bool
Whether save the cache to file when the program exits
Returns
-------
fmemoize : function
The decorator function to perform memoization.
"""
def _register(f):
"""Registration function"""
allow_types = (string_types, int, float, tuple)
fkey = key + "." + f.__name__ + ".pkl"
if fkey not in Cache.cache_by_key:
Cache.cache_by_key[fkey] = Cache(fkey, save_at_exit)
cache = Cache.cache_by_key[fkey]
cargs = tuple(x.cell_contents for x in f.__closure__) if f.__closure__ else ()
cargs = (len(cargs),) + cargs
def _memoized_f(func, *args, **kwargs):
assert not kwargs, "Only allow positional call"
key = cargs + args
for arg in key:
if isinstance(arg, tuple):
for x in arg:
assert isinstance(x, allow_types)
else:
assert isinstance(arg, allow_types)
if key in cache.cache:
return cache.cache[key]
res = func(*args)
cache.cache[key] = res
cache.dirty = True
return res
return decorate(f, _memoized_f)
return _register
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/pipeline_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pipeline executor that executes a series of modules in a pipeline fashion."""
import json
import os
import time
from tvm import runtime
from tvm._ffi import get_global_func
from tvm.contrib import graph_executor
def pipeline_executor_enabled():
"""Check if the pipeline executor is enabled.
Return
-------
enable: bool
Return whether the pipeline executor is enabled.
"""
return get_global_func("tvm.pipeline_executor.create", allow_missing=True) is not None
class PipelineModule(object):
"""Wrapper of runtime module, caller can use this module to set parameters and get outputs.
Parameters
----------
module : Union[PipelineExecutorFactoryModule, Module]
Common interface for pipeline executor factory modules or Module.
"""
def __init__(self, module):
if isinstance(module, PipelineExecutorFactoryModule):
self.module = module.get_pipeline_executor_module()
else:
self.module = module
# Get the packed functions from the pipeline executor.
self._get_params_group_pipeline_map = self.module["get_params_group_pipeline_map"]
self._run = self.module["run"]
self._set_param = self.module["set_param"]
self._set_input = self.module["set_input"]
self._get_input = self.module["get_input"]
self._get_output = self.module["get_output"]
self._get_num_outputs = self.module["get_num_outputs"]
self._get_num_inputs = self.module["get_num_inputs"]
self._get_input_pipeline_map = self.module["get_input_pipeline_map"]
self._get_pipe_execute_count = self.module["get_execute_count"]
def run(self):
"""Run the pipeline executor."""
self._run()
def get_input_pipeline_map(self, name):
"""Using the "name" to get the corresponding subgraph index and also get the "input name"
of the corresponding subgraph interface.
Returns
-------
input map: Array[str]
Returning the index and "input name" of the subgraph.
"""
return self._get_input_pipeline_map(name)
def get_params_group_pipeline_map(self, name):
"""Use the name of the parameters group to get the corresponding runtime module index.
Parameters
----------
name: str
The parameter group name.
Returns
-------
module_index: int
The index of the runtime module.
"""
return self._get_params_group_pipeline_map(name)
def set_input(self, key, value):
"""Set the input via input name.
Parameters
----------
key : str
The input name
value : array_like.
The input value
"""
self._set_input(key, value)
def set_params(self, params_group_name, params_data):
"""Set the parameter group value given the parameter group name. Note that the parameter
group name is declared in the pipeline executor config.
Parameters
----------
params_group_name : str
The parameters group name.
params_data : Dict[str, NDArray]
A map from parameter name to data.
"""
if not params_data:
raise RuntimeError('"params_data is empty!"')
for key, val in params_data.items():
self._set_param(params_group_name, key, val)
def get_input(self, key):
"""Get the input via an input name.
Parameters
----------
key : str
The input key
Returns
-------
data : NDArray
The input data.
"""
return self._get_input(key)
def get_output(self, synchronize=True, sleep_interval=0.001):
"""Get the output.
Returns
-------
data : Array[NDArray]
A list of output data.
synchronize : BOOL
Whether to do a synchronize poll.
sleep_interval : Float32
When doing the synchronize loop poll, how many seconds the loop should sleep for yield.
"""
outputs = []
if not synchronize:
outputs = self._get_output()
else:
while not outputs:
outputs = self._get_output()
time.sleep(sleep_interval)
return outputs
@property
def num_executing_pipeline(self):
"""Getting the count of running pipeline.
Returns
-------
count : int
The count of running pipeline.
"""
return self._get_pipe_execute_count()
@property
def num_outputs(self):
"""Get the number of outputs.
Returns
-------
count : int
The number of outputs.
"""
return self._get_num_outputs()
@property
def num_inputs(self):
"""Get the number of inputs
Returns
-------
count : int
The number of inputs
"""
return self._get_num_inputs()
@staticmethod
def load_library(config_file_name):
"""Import files to create a pipeline executor.
Parameters
----------
config_file_name : str
Path and name of the configuration file, the configuration file contains the
disk path of the parameter file, library file, and JSON file.
"""
with open(config_file_name, "r") as file_handle:
config = file_handle.read()
config = json.loads(config)
if "load_config" not in config or "pipeline_config" not in config:
raise RuntimeError(
'"load_config" or "pipeline_config" is missing in %s' % config_file_name
)
# The config file used to load library, prameters, and JSON files.
with open(config["load_config"], "r") as file_handle:
load_config = file_handle.read()
# The config file used to load pipeline compute config.
with open(config["pipeline_config"], "r") as file_handle:
pipeline_config = file_handle.read()
# Load a PipelineExecutor from the disk files.
load_library = get_global_func("tvm.pipeline_executor.load", allow_missing=False)
module = load_library(load_config, pipeline_config)
return PipelineModule(module)
class PipelineExecutorFactoryModule(object):
"""Common interface for pipeline executor factory modules.
Parameters
----------
pipeline_mods : List[GraphExecutorFactoryModule]
List of GraphExecutorFactoryModule.
mod_config : Dict[int, Dict[str, Any]]
Modules dependency configuration information.
"""
def __init__(self, pipeline_mods, mods_config):
self.pipeline_mods = pipeline_mods
self.mods_config = mods_config
self.module = None
def get_pipeline_executor_module(self):
"""Get the pipeline executor module.
Returns
-------
module : Module
Common interface for pipeline executor factory Module.
"""
if not self.module:
graph_executors, config = self.graph_executor_create(
self.pipeline_mods, self.mods_config
)
self.pipeline_create = get_global_func(
"tvm.pipeline_executor.create", allow_missing=False
)
self.module = self.pipeline_create(graph_executors, config)
return self.module
def graph_executor_create(self, pipeline_mods, mod_config):
"""Create graph_executor list and return configuration as a json string.
Parameters
----------
pipeline_mods : List[GraphExecutorFactoryModule]
List of GraphExecutorFactoryModule
mod_config : Dict[str, Any]
Modules dependency configuration information.
Returns
-------
mods : List[Module]
The Module list.
mod_config : str
The Modudle configuration.
"""
# Should store modules in the list named 'mods' in index order.
mods = [None for _ in range(len(pipeline_mods))]
for lib_index in pipeline_mods:
pipeline_lib = pipeline_mods[lib_index]["lib"]
dev = pipeline_mods[lib_index]["dev"]
lib = graph_executor.GraphModule(pipeline_lib["default"](dev))
# Return a module list sorted by lib_index.
mods[lib_index] = lib.module
return mods, json.dumps(mod_config)
def export_library(self, directory_path):
"""Export the pipeline executor into disk files.
Parameters
----------
directory_path : str
Export the files to this directory.
"""
if not self.pipeline_mods:
raise RuntimeError("The pipeline executor has not been initialized.")
# Check if the directory_path exists.
if not os.path.exists(directory_path):
raise RuntimeError("The directory {directory_path} does not exist.")
# Create an load configuration.
load_config_file_name = "{}/load_config".format(directory_path)
pipeline_config_file_name = "{}/pipeline_config".format(directory_path)
config = {}
config["load_config"] = load_config_file_name
config["pipeline_config"] = pipeline_config_file_name
load_config = []
# Export the library, JSON, and parameter into files, then export these files path
# into a configuration file.
for lib_index in self.pipeline_mods:
mconfig = {}
mconfig["mod_idx"] = lib_index
mconfig["lib_name"] = "{}/lib{}.so".format(directory_path, lib_index)
mconfig["json_name"] = "{}/json{}".format(directory_path, lib_index)
mconfig["params_name"] = "{}/params{}".format(directory_path, lib_index)
mconfig["dev"] = "{},{}".format(
self.pipeline_mods[lib_index]["dev"].device_type,
self.pipeline_mods[lib_index]["dev"].device_id,
)
# Get the graph, lib, and parameters from GraphExecutorFactoryModule.
lib = self.pipeline_mods[lib_index]["lib"]
# Export the lib, graph, and parameters to disk.
if self.pipeline_mods[lib_index]["export_cc"]:
lib.export_library(
mconfig["lib_name"], cc=self.pipeline_mods[lib_index]["export_cc"]
)
else:
lib.export_library(mconfig["lib_name"])
with open(mconfig["json_name"], "w") as file_handle:
file_handle.write(lib.graph_json)
with open(mconfig["params_name"], "wb") as file_handle:
file_handle.write(runtime.save_param_dict(lib.params))
load_config.append(mconfig)
with open(load_config_file_name, "w") as file_handle:
json.dump(load_config, file_handle)
with open(pipeline_config_file_name, "w") as file_handle:
json.dump(self.mods_config, file_handle)
config_file_name = "{}/config".format(directory_path)
with open(config_file_name, "w") as file_handle:
json.dump(config, file_handle)
return config_file_name
| https://github.com/zk-ml/tachikoma |
python/tvm/contrib/pipeline_executor_build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pipeline executor that executes a series of modules in a pipeline fashion."""
import json
import os
import tvm._ffi
from tvm import relay
from tvm.relay.transform import InferType
from tvm.contrib.pipeline_executor import PipelineExecutorFactoryModule
def pipeline_executor_build_enabled():
"""Check if the pipeline executor build is enabled.
Return
-------
enable: bool
Return whether the pipeline executor is enabled.
"""
return tvm.contrib.pipeline_executor.pipeline_executor_enabled()
def build(pipe_configs):
"""Build modules used in the pipeline executor, then use these modules and configuration
to create a pipeline executor.
Parameters
----------
pipe_configs: PipelineConfig
Build Configuration information.
Returns
-------
ret: PipelineExecutorFactoryModule
Common interface for pipeline executor factory modules.
"""
libs = {}
config = pipe_configs.get_config()
if "module_connection" not in config:
raise RuntimeError('"module_connection" is missing')
if "input_connection" not in config:
raise RuntimeError('"input_connection" is missing')
if "param_connection" not in config:
raise RuntimeError('"param_connection" is missing')
mod_n_configs = config["module_connection"]
config_len = len(mod_n_configs)
module_string_config = [{} for _ in range(config_len)]
# Use hardware configurations to build backend modules for each subgraph.
for ir_mod, mod_config in mod_n_configs.items():
pipe_config = mod_config["pipeline"].copy()
mod_idx = pipe_config["mod_idx"]
dev = mod_config["dev"]
target = mod_config["target"]
build_func = relay.build
# Callers may need to use a customized building function to wrap the pre-building logic
# and the backend building logic. For example, in order to support a backend which only
# can do "int8" computation, the caller may need to merge the "quantization" logic
# into the building logic to creat a customized building function.
if "build" in mod_config and mod_config["build"]:
build_func = mod_config["build"]
lib = build_func(
ir_mod,
target,
params=mod_config["params"],
target_host=mod_config["target_host"],
mod_name=mod_config["mod_name"],
)
pipe_config["dev"] = "{},{}".format(dev.device_type, dev.device_id)
# Use "mod_idx" as the key to create a "module_connection" map which is not only
# for the module index but also for the module connection used to build the pipeline.
module_string_config[mod_idx] = pipe_config
libs[mod_idx] = {
"lib": lib,
"dev": dev,
"fcompile": mod_config["fcompile"],
"export_cc": mod_config["export_cc"],
}
# Creating a text form configuration to record the "input_connection" and the
# "module_connection" information. The "input_connection" is used to record the
# map of global input and subgraph input, and the "module_connection" is used to
# record module dependency.
string_config = {}
string_config["param_connection"] = config["param_connection"]
string_config["input_connection"] = config["input_connection"]
string_config["module_connection"] = module_string_config
return PipelineExecutorFactoryModule(libs, string_config)
def export_library(factory, directory_path):
"""Export the pipeline executor into disk files.
Parameters
----------
factory : PipelineExecutorFactoryModule
The pipeline executor factory
directory_path : str
Export the files to this directory.
"""
if not factory.pipeline_mods:
raise RuntimeError("The pipeline executor has not been initialized.")
# Check if the directory_path exists.
if not directory_path or not os.path.exists(directory_path):
raise RuntimeError("The directory {directory_path} does not exist.")
# Create an load configuration.
load_config_file_name = "{}/load_config".format(directory_path)
pipeline_config_file_name = "{}/pipeline_config".format(directory_path)
config = {}
config["load_config"] = load_config_file_name
config["pipeline_config"] = pipeline_config_file_name
load_config = []
# Export the library, JSON, and parameter into files, then export these files path
# into a configuration file.
for lib_index in factory.pipeline_mods:
mconfig = {}
mconfig["mod_idx"] = lib_index
mconfig["lib_name"] = "{}/lib{}.so".format(directory_path, lib_index)
mconfig["json_name"] = "{}/json{}".format(directory_path, lib_index)
mconfig["params_name"] = "{}/params{}".format(directory_path, lib_index)
lib_config = factory.pipeline_mods[lib_index]
mconfig["dev"] = "{},{}".format(lib_config["dev"].device_type, lib_config["dev"].device_id)
fcompile = lib_config["fcompile"]
if not fcompile:
fcompile = False
# Get the graph, lib, and parameters from GraphExecutorFactoryModule.
lib = factory.pipeline_mods[lib_index]["lib"]
# Export the lib, graph, and parameters to disk.
lib.export_library(mconfig["lib_name"], fcompile)
with open(mconfig["json_name"], "w") as file_handle:
file_handle.write(lib.graph_json)
with open(mconfig["params_name"], "wb") as file_handle:
file_handle.write(relay.save_param_dict(lib.params))
load_config.append(mconfig)
with open(load_config_file_name, "w") as file_handle:
json.dump(load_config, file_handle)
with open(pipeline_config_file_name, "w") as file_handle:
json.dump(factory.mods_config, file_handle)
config_file_name = "{}/config".format(directory_path)
with open(config_file_name, "w") as file_handle:
json.dump(config, file_handle)
return config_file_name
class PipelineConfig(object):
"""Pipeline configuration information, this class contains the DAG that expresses
the dependency of each module involved in a pipeline and the parameters for building
each module.
"""
class Binding:
"""This class defines the module connections information.
The type can only be "input" or "output".
Parameters
----------
owner : ModuleWrapper
The class who owns this interface.
io_type : str
The I/O type of this interface. It can only be "input" or "output".
name : str/integer
Name, for input it is string such as "data0", for output it is an integer such as 0.
data_type: TensorType
The data type of this interface.
"""
def __init__(self, owner, io_type, name, data_type=None):
self.io_owner = owner
self.io_type = io_type
self.name = str(name)
# Child interfaces that depend on this interface.
self.bindings = []
# Parents interfaces that this interface depend on.
self.parents = []
self.data_type = data_type
def get_name(self):
# Return name of this interface and the name of owner who owns this interface.
owner_name = ""
if isinstance(self.io_owner, PipelineConfig.ModuleWrapper):
owner_name = self.io_owner.name
return owner_name, self.name
def get_owner_idx(self):
# If the owner is ModuleWrapper return the owner index, if not return 0.
if isinstance(self.io_owner, PipelineConfig.ModuleWrapper):
return self.io_owner.idx
return -1
def is_pipeline_executor_interface(self):
"""The pipeline interface is used to interact with the caller. There are two types
of interfaces, one is 'input' another is 'output'. The pipeline input interface
is responsible for passing parameters to the internal module interface, and the
pipeline output interface is responsible for outputting the results computed by
the pipeline executor to the caller.
"""
return not isinstance(self.io_owner, PipelineConfig.ModuleWrapper)
def __repr__(self):
# Geting the binding information in the form of text.
str_format = " |{}: ".format(self.name)
for binding in self.bindings:
mname, dname = binding.get_name()
str_format += "{0}:{1} ".format(mname, dname)
return str_format
def check_binding_dict(self, connection_dict):
"""Checking the binding dictionary.
Parameter
---------
connection_dict : Dict[str, Any]
It is a dictionary of module connections.
"""
if "interface_name" not in connection_dict:
raise RuntimeError('"inteface_name" is missing in global config!"')
if "connection" not in connection_dict:
raise RuntimeError(f'"connection" is missing!"')
# The global interface mapping should be one-to-one.
if not connection_dict["connection"]:
raise RuntimeError("The global interface map is empty!")
if len(connection_dict["connection"]) > 1:
raise RuntimeError("A global interface maps multiple module interfaces!")
if "mod_idx" not in connection_dict["connection"][0]:
raise RuntimeError('"mod_idx" is missing!')
def get_binding_dict(self):
"""Returning the binding information in the form of dictionary.
Returns
-------
data : Dict[str, Any]
The binding information is in the form of dictionary.
"""
dict_format = {"interface_name": self.name, "connection": []}
for binding in self.bindings:
_, dname = binding.get_name()
midx = binding.get_owner_idx()
dict_format["connection"].append({"mod_idx": midx, "interface_name": dname})
self.check_binding_dict(dict_format)
return dict_format
def check_dag_acyclic(self, start, inputs):
"""This is to check whether the DAG containing these input interfaces is acyclic.
Parameters
----------
start: ModuleWrapper
The starting node of the cycle check algorithm.
inputs: Binding
These interfaces are used to connect to each other to build DAG.
Return
------
Return true if there is no cycle in the DAG.
"""
for binding in inputs.values():
if start == binding.io_owner:
return False
for p in binding.parents:
if not self.check_dag_acyclic(start, p.io_owner.input_bindings.bindings):
return False
return True
def connect(self, binding):
"""Connect the current interface to the destination interface.
Correct connections are as follows: 1. the pipeline input connected to a module input,
2. the module output connected to a pipeline output, 3. the module output connected to
a module input.
Parameters
----------
binding: Binding
The destination of this connection.
"""
# Check whether the binding setting is correct or not.
if self.io_owner == binding.io_owner:
raise RuntimeError("Can not bind itself.")
if self.io_type == "param" and not self.is_pipeline_executor_interface():
raise RuntimeError(
'The "param" binding can only be used by a pipeline executor interface!'
)
if not self.is_pipeline_executor_interface() and self.io_type == "input":
raise RuntimeError("Module can only bind from output interface!")
if self.io_type == "param" and binding.io_type != "param":
raise RuntimeError(
'A global "param" interface can only be bind with a module "param" interface!'
)
if (
not self.is_pipeline_executor_interface()
and not binding.is_pipeline_executor_interface()
and binding.io_type == "output"
):
raise RuntimeError("Can not bind module output with another module output!")
if (
not self.is_pipeline_executor_interface()
and binding.is_pipeline_executor_interface()
and binding.io_type == "input"
):
raise RuntimeError("Can not bind module output with pipeline input!")
if self.is_pipeline_executor_interface() and self.io_type == "output":
raise RuntimeError("Global output can not be used as binding start point.")
if (
self.is_pipeline_executor_interface()
and self.io_type == "input"
and binding.io_type != "input"
):
raise RuntimeError("Global input can only bind with module input.")
self.bindings.append(binding)
if not self.is_pipeline_executor_interface():
# Check whether the data types of the source and destination are the same.
if (
isinstance(binding.io_owner, PipelineConfig.ModuleWrapper)
and self.data_type != binding.data_type
):
raise RuntimeError(
f"Illegal type (%s vs. %s): binding type is not same!"
% (self.data_type, binding.data_type)
)
binding.parents.append(self)
# Do acyclic check after increasing the in-degree of child node by setting
# current interface as a parent of the child node.
if not self.check_dag_acyclic(
binding.io_owner, self.io_owner.input_bindings.bindings
):
raise RuntimeError("Illegal connection: Cause a cycle!")
class BindingList:
"""Container for bindings(input or output interface).
Parameters
----------
owner : ModuleWrapper/PipelineConfig
The owner of this class can be ModuleWrapper or PipelineConfig.
io_type : str
The type of this class can be "input" or "output".
"""
def __init__(self, owner, io_type):
self.bindings = {}
self.io_owner = owner
self.binding_type = io_type
def get_binding_data_type(self, key):
if isinstance(self.io_owner, PipelineConfig.ModuleWrapper):
return self.io_owner.get_data_type(key, self.binding_type)
return None
def __getitem__(self, key):
if key not in self.bindings:
data_type = self.get_binding_data_type(key)
if not data_type and isinstance(self.io_owner, PipelineConfig.ModuleWrapper):
raise RuntimeError(f"Can not find {key} in binding list {self.binding_type}.")
self.bindings[key] = PipelineConfig.Binding(
self.io_owner, self.binding_type, key, data_type
)
return self.bindings[key]
class ModuleWrapper:
"""This class is a wrapper representing the module and contains information such as
module information, binding information and building information.
"""
def __init__(self, mod=None):
self.target_host = None
self.build_func = None
self.params = None
self.target = None
self.fcompile = None
self.name = None
self.dev = None
self.export_cc = None
self.cpu_affinity = ""
self.idx = None
self.mod = mod
self.input_params = InferType()(mod)["main"].params
self.output_type = InferType()(mod)["main"].checked_type.ret_type
self.input_bindings = PipelineConfig.BindingList(self, "input")
self.output_bindings = PipelineConfig.BindingList(self, "output")
self.param_binding = PipelineConfig.Binding(self, "param", "param")
def __eq__(self, other):
if isinstance(other, PipelineConfig.ModuleWrapper):
return self.mod == other.mod
return False
def __getitem__(self, key):
if isinstance(key, str):
if key == "input":
return self.input_bindings
if key == "output":
return self.output_bindings
if key == "param":
return self.param_binding
raise RuntimeError(f"{key} not found!")
raise RuntimeError('The data type of "key" is not supported!')
def get_data_type(self, key, interface_type):
"""Get the module interface data type according to the key value and interface type.
Parameters
----------
key: str
The interface name.
interface_type:
The interface type.
Return
-------
Return data type.
"""
if interface_type == "input":
for param in self.input_params:
if param.name_hint == key:
return param._checked_type_
if interface_type == "output":
if isinstance(self.output_type, tvm.ir.type.TupleType):
if int(key) < len(self.output_type.fields):
return self.output_type.fields[int(key)]
elif int(key) == 0:
return self.output_type
return None
def set_idx_name(self, idx):
# Set the index value and generate the module name.
self.idx = idx
self.name = "mod{}".format(str(idx))
def is_root_mod(self):
"""Check whether this node is the root node in DAG, this function is used
in topological sort.
"""
return all([not b.parents for b in self.input_bindings.bindings.values()])
def remove_self_from_bindings(self):
"""Remove the current node from child dependencies to reduce the in-degree
of child node, this function is used in topological sort.
"""
for binding in self.output_bindings.bindings.values():
for child in binding.bindings:
if binding in child.parents:
child.parents.remove(binding)
def __init__(self):
self.mod_wrapper = {}
self.input_bindings = self.BindingList(self, "input")
self.output_bindings = self.BindingList(self, "output")
# There is a map of global parameters group and module index.
self.param_group_bindings = self.BindingList(self, "param")
def __str__(self):
# Get configuration information as a string.
# Use topological sort to get correct module order.
self.dag_topology_sort()
# Getting the parameters dependencies.
param_dump = "Params\n"
for param_name in self.param_group_bindings.bindings:
inf = self.param_group_bindings.bindings[param_name]
param_dump += str(inf) + "\n"
# Get the input dependencies.
input_dump = "\nInputs\n"
for input_name in self.input_bindings.bindings:
inf = self.input_bindings.bindings[input_name]
input_dump += str(inf) + "\n"
# Get the connections information of each module.
output = {}
connections_dump = "\nconnections\n"
for mod in self.mod_wrapper:
for interface in self.mod_wrapper[mod].output_bindings.bindings.values():
if interface.bindings:
mname, dname = interface.get_name()
iname = mname + ".output(" + dname + ")->"
for dep in interface.bindings:
dep_mname, dep_dname = dep.get_name()
if isinstance(dep.io_owner, PipelineConfig.ModuleWrapper):
iname += f" {dep_mname}.{dep_dname}"
connections_dump += f" |{iname}\n"
else:
output[dep_dname] = f"{mname}.output({dname})"
# Get the output dependencies.
output_dump = "\noutput\n"
for name in sorted(output.keys()):
output_dump += f" |output({name}) : {output[name]}\n"
return param_dump + input_dump + output_dump + connections_dump
def __getitem__(self, key):
if isinstance(key, tvm.ir.module.IRModule):
if key not in self.mod_wrapper:
self.mod_wrapper[key] = self.ModuleWrapper(key)
return self.mod_wrapper[key]
if isinstance(key, str):
if key == "input":
return self.input_bindings
if key == "output":
return self.output_bindings
if key == "param_group":
return self.param_group_bindings
raise RuntimeError(f"{key} not found!")
raise RuntimeError(f'The key type "{type(key)}" is not supported!')
def get_config(self):
"""Get the configuration information in dictionary form, this configuration
will be used to create pipeline executor.
"""
# Use topological sort to get the correct order of modules.
self.dag_topology_sort()
mconfig = {}
module_connection = {}
for mod in self.mod_wrapper:
# Generate pipeline configuration.
mconf = {}
output_conf = []
module = self.mod_wrapper[mod]
for _, binding in module.output_bindings.bindings.items():
dep_conf = []
output = {}
if binding.bindings:
for dep in binding.bindings:
dep_item = {}
_, dname = dep.get_name()
if dep.is_pipeline_executor_interface():
dep_item["global_output_index"] = int(dname)
else:
dep_item["mod_idx"] = dep.get_owner_idx()
dep_item["input_name"] = dname
dep_conf.append(dep_item)
# The value of output_idx start from 0.
output["output_idx"] = int(binding.name)
output["dependencies"] = dep_conf
output_conf.append(output)
mconf["mod_idx"] = module.idx
mconf["cpu_affinity"] = module.cpu_affinity
mconf["output"] = output_conf
module_connection[mod] = {
"pipeline": mconf,
"target_host": module.target_host,
"mod_name": "default",
"build": module.build_func,
"params": module.params,
"target": module.target,
"fcompile": module.fcompile,
"dev": module.dev,
"export_cc": module.export_cc,
}
# Creating a map including pipeline inputs and subgraph inputs.
input_connection = []
for input_name in self.input_bindings.bindings:
input_dict = self.input_bindings.bindings[input_name].get_binding_dict()
if "interface_name" not in input_dict["connection"][0]:
raise RuntimeError("interface_name is missing in connection config!")
# Creating the map including global interfaces and subgraph interfaces.
input_map = {
"global_interface_name": input_dict["interface_name"],
"mod_idx": input_dict["connection"][0]["mod_idx"],
"module_interface_name": input_dict["connection"][0]["interface_name"],
}
input_connection.append(input_map)
# Create a map including global parameters groups and modules.
param_connection = []
for param_name in self.param_group_bindings.bindings:
param_dict = self.param_group_bindings.bindings[param_name].get_binding_dict()
param_map = {
"global_param_name": param_dict["interface_name"],
"mod_idx": param_dict["connection"][0]["mod_idx"],
}
param_connection.append(param_map)
mconfig["module_connection"] = module_connection
mconfig["input_connection"] = input_connection
mconfig["param_connection"] = param_connection
return mconfig
def dag_topology_sort(self):
"""Use topological sort to get order of pipeline modules."""
mlist = []
mod_wrapper = self.mod_wrapper.copy()
while mod_wrapper:
temp_list = []
for mod, wrapper in mod_wrapper.items():
if wrapper.is_root_mod():
temp_list.append(mod)
wrapper.remove_self_from_bindings()
for mod in temp_list:
mod_wrapper.pop(mod, None)
mlist += temp_list
mod_wrapper_sort = {}
for mod, i in zip(mlist, range(len(mlist))):
self.mod_wrapper[mod].set_idx_name(i)
mod_wrapper_sort[mod] = self.mod_wrapper[mod]
self.mod_wrapper = mod_wrapper_sort
def get_mod_idx(self, mod):
# Return the module index.
idx = self.mod_wrapper[mod].idx
return idx
def pipe_input(self, name):
# Return the input interface according to the name.
return self.input_bindings[name]
def pipe_output(self, idx):
# Return the output interface according to the name.
return self.output_bindings[idx]
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.