file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/micro/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base definitions for MicroTVM"""
import tvm
import tvm._ffi
tvm._ffi._init_api("tvm.micro", "tvm.micro.base")
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines top-level glue functions for building microTVM artifacts."""
import json
import logging
import os
import contextlib
import enum
from typing import Union
from .._ffi import libinfo
from .. import rpc as _rpc
_LOG = logging.getLogger(__name__)
STANDALONE_CRT_DIR = None
class MicroTVMTemplateProject(enum.Enum):
ZEPHYR = "zephyr"
ARDUINO = "arduino"
CRT = "crt"
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
class CrtNotFoundError(Exception):
"""Raised when the standalone CRT dirtree cannot be found."""
class MicroTVMTemplateProjectNotFoundError(Exception):
"""Raised when the microTVM template project dirtree cannot be found."""
def get_standalone_crt_dir() -> str:
"""Find the standalone_crt directory.
Though the C runtime source lives in the tvm tree, it is intended to be distributed with any
binary build of TVM. This source tree is intended to be integrated into user projects to run
models targeted with --runtime=c.
Returns
-------
str :
The path to the standalone_crt
"""
global STANDALONE_CRT_DIR
if STANDALONE_CRT_DIR is None:
for path in libinfo.find_lib_path():
crt_path = os.path.join(os.path.dirname(path), "standalone_crt")
if os.path.isdir(crt_path):
STANDALONE_CRT_DIR = crt_path
break
else:
raise CrtNotFoundError()
return STANDALONE_CRT_DIR
def get_microtvm_template_projects(platform: str) -> str:
"""Find microTVM template project directory for specific platform.
Parameters
----------
platform : str
Platform type which should be defined in MicroTVMTemplateProject.
Returns
-------
str :
Path to template project directory for platform.
"""
if platform not in MicroTVMTemplateProject.list():
raise ValueError(f"platform {platform} is not supported.")
if platform == MicroTVMTemplateProject.CRT.value:
return os.path.join(get_standalone_crt_dir(), "template", "host")
microtvm_template_projects = None
for path in libinfo.find_lib_path():
template_path = os.path.join(os.path.dirname(path), "microtvm_template_projects")
if os.path.isdir(template_path):
microtvm_template_projects = template_path
break
else:
raise MicroTVMTemplateProjectNotFoundError()
return os.path.join(microtvm_template_projects, platform)
class AutoTvmModuleLoader:
"""MicroTVM AutoTVM Module Loader
Parameters
----------
template_project_dir : Union[os.PathLike, str]
project template path
project_options : dict
project generation option
project_dir: str
if use_existing is False: The path to save the generated microTVM Project.
if use_existing is True: The path to a generated microTVM Project for debugging.
use_existing: bool
skips the project generation and opens transport to the project at the project_dir address.
"""
def __init__(
self,
template_project_dir: Union[os.PathLike, str],
project_options: dict = None,
project_dir: Union[os.PathLike, str] = None,
use_existing: bool = False,
):
self._project_options = project_options
self._use_existing = use_existing
if isinstance(template_project_dir, (os.PathLike, str)):
self._template_project_dir = str(template_project_dir)
elif not isinstance(template_project_dir, str):
raise TypeError(f"Incorrect type {type(template_project_dir)}.")
if isinstance(project_dir, (os.PathLike, str)):
self._project_dir = str(project_dir)
else:
self._project_dir = None
@contextlib.contextmanager
def __call__(self, remote_kw, build_result):
with open(build_result.filename, "rb") as build_file:
build_result_bin = build_file.read()
tracker = _rpc.connect_tracker(remote_kw["host"], remote_kw["port"])
remote = tracker.request(
remote_kw["device_key"],
priority=remote_kw["priority"],
session_timeout=remote_kw["timeout"],
session_constructor_args=[
"tvm.micro.compile_and_create_micro_session",
build_result_bin,
self._template_project_dir,
json.dumps(self._project_options),
self._project_dir,
self._use_existing,
],
)
system_lib = remote.get_function("runtime.SystemLib")()
yield remote, system_lib
def autotvm_build_func():
"""A dummy build function which causes autotvm to use a different export format."""
# A sentinel value for the output format.
autotvm_build_func.output_format = ".model-library-format"
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/class_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a utility for representing deferred class instatiations as JSON."""
import importlib
import json
import typing
JsonSerializable = typing.Union[int, float, str, None, bool]
class SerializedFactoryError(Exception):
"""Raised when ClassFactory.from_json is invoked with an invalid JSON blob."""
class ClassFactory:
"""Describes a JSON-serializable class instantiation, for use with the RPC server."""
# When not None, the superclass from which all cls must derive.
SUPERCLASS = None
def __init__(
self,
cls: typing.Callable,
init_args: typing.List[JsonSerializable],
init_kw: typing.Dict[str, JsonSerializable],
):
self.cls = cls
self.init_args = init_args
self.init_kw = init_kw
def override_kw(self, **kw_overrides):
kwargs = self.init_kw
if kw_overrides:
kwargs = dict(kwargs)
for k, v in kw_overrides.items():
kwargs[k] = v
return self.__class__(self.cls, self.init_args, kwargs)
def instantiate(self):
return self.cls(*self.init_args, **self.init_kw)
@property
def to_json(self):
return json.dumps(
{
"cls": ".".join([self.cls.__module__, self.cls.__name__]),
"init_args": self.init_args,
"init_kw": self.init_kw,
}
)
EXPECTED_KEYS = ("cls", "init_args", "init_kw")
@classmethod
def from_json(cls, data):
"""Reconstruct a ClassFactory instance from its JSON representation.
Parameters
----------
data : str
The JSON representation of the ClassFactory.
Returns
-------
ClassFactory :
The reconstructed ClassFactory instance.
Raises
------
SerializedFactoryError :
If the JSON object represented by `data` is malformed.
"""
obj = json.loads(data)
if not isinstance(obj, dict):
raise SerializedFactoryError(f"deserialized json payload: want dict, got: {obj!r}")
for key in cls.EXPECTED_KEYS:
if key not in obj:
raise SerializedFactoryError(
f"deserialized json payload: expect key {key}, got: {obj!r}"
)
cls_package_name, cls_name = obj["cls"].rsplit(".", 1)
cls_package = importlib.import_module(cls_package_name)
cls_obj = getattr(cls_package, cls_name)
return cls(cls_obj, obj["init_args"], obj["init_kw"])
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/contrib/stm32/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of STM32 code generator."""
from .emitter import CodeEmitter, get_input_tensor_name, get_output_tensor_name
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/contrib/stm32/emitter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long
"""Code emission for the STM32 targets."""
import contextlib
import json
import os
import re
import shutil
import tarfile
import textwrap
from datetime import datetime
import numpy as np
import tvm
from tvm.contrib import utils
AI_API_VERSION_MAJOR = 1
AI_API_VERSION_MINOR = 0
AI_API_VERSION_MICRO = 0
AI_TOOLS_REVISION = "v1"
DBAR = "=" * 60
def _fix_name(node_name):
"""Replace ':' with '_' in names like 'InputImg:0'"""
return node_name.replace(":", "_")
def get_input_tensor_name(node_name):
return _fix_name(node_name)
def get_output_tensor_name(node_name, idx):
return _fix_name(node_name) + "_" + str(idx)
def _get_node_args_name(node_name):
return _fix_name(node_name) + "_args"
def _get_node_arg_types_name(node_name):
return _fix_name(node_name) + "_arg_type_ids"
def _get_type_size(dltype):
if dltype in ("uint64", "int64"):
return 8
if dltype in ("uint32", "int32", "float32"):
return 4
if dltype in ("uint16", "int16"):
return 2
if dltype in ("uint8", "int8"):
return 1
raise ValueError(f"Data type {dltype} is not supported")
C_TYPE_TO_DLTYPE = {
"uint64": "kDLUInt, 64, 1",
"int64": "kDLInt, 64, 1",
"float32": "kDLFloat, 32, 1",
"uint32": "kDLUInt, 32, 1",
"int32": "kDLInt, 32, 1",
"uint16": "kDLUInt, 16, 1",
"int16": "kDLInt, 16, 1",
"uint8": "kDLUInt, 8, 1",
"int8": "kDLInt, 8, 1",
}
def _get_type_data(dltype):
try:
return C_TYPE_TO_DLTYPE[dltype]
except KeyError:
raise ValueError(f"Data type {dltype} is not supported")
def _get_aligned_offset(offset, dltype):
align = _get_type_size(dltype)
if offset % align != 0:
offset = offset + (align - offset % align)
return offset
def _get_num_tensor_elts(shape):
size = 1
for dim in shape:
size = size * dim
return size
def _get_tensor_size_bytes(dims, dltype):
size = _get_num_tensor_elts(dims)
return size * _get_type_size(dltype)
def _preprocess_code(src):
"""Hack the C code implementing the model."""
dst = "#include <stdio.h>\n" "#include <math.h>\n\n"
dst = dst + src
return dst
class CodeEmitter(object):
"""Code emitter class."""
DATA_ALIGNMENT_BYTES = 8
def __init__(self, include_activations=True, include_inputs=True, include_outputs=True):
"""Initialize the Emitter instance.
Parameters
----------
include_activations:
The Emitter allocates the storage for the activations data
and places it in a specific data section. If Falsr, the
main application is responsible for allocating the activations
storage. Default: True.
include_inputs/include_outputs:
The Emitter allocates the storage for the input/output data.
This storage is shared with the activations and placed in the
specific activations data section. If False, the main
application is responsible for allocating the input/output
data storage. Default: True.
Returns
-------
CodeEmitter object.
"""
# Static model: activations placed into a nn_data_act section
# Dynamic model: activations need to be malloc'ed by the
# applications.
self.activations_static = include_activations
# Inputs/outputs may be allocated within the activations or
# separately.
# TODO: Separate the inputs from activations inside TVM.
if include_inputs:
assert (
self.activations_static == True
), "###Error: Static inputs are not allowed without activations."
self.inputs_static = include_inputs
if include_outputs:
assert (
self.activations_static == True
), "###Error: Static outputs are not allowed without activations."
self.outputs_static = include_outputs
# Parsed graph
self._nodes = []
self._arg_nodes = []
self._outputs = []
self._attrs = {}
self._node_row_ptr = []
# Parameters
self._params = {}
# Filled by data_placement()
self._weights = {}
self._activations = {}
self._input_data = {}
self._output_data = {}
self._nodes_size = 0
self._weights_size = 0
self._activations_size = 0
self._quantization = {}
def _extract_quantization_info(self, quantization):
"""Build dictionary with quantization infos."""
for dl_tensor_name in self._input_data:
if dl_tensor_name in quantization:
self._quantization[dl_tensor_name] = quantization[dl_tensor_name]
# Matching outputs is more difficult because TVM does not preserve
# output tensor names.
# We only support models with a single output now.
assert len(self._output_data) == 1, "Multiple outputs models are not yet supported."
for dl_tensor_name in self._output_data:
for name in quantization:
if name not in self._input_data:
self._quantization["output"] = quantization[name]
break
def _get_node_arg_name(self, arg):
arg_nid = arg[0]
arg_idx = arg[1]
arg_node = self._nodes[arg_nid]
arg_name = self._nodes[arg_nid]["name"]
if arg_node["op"] == "null":
# parameter
dl_tensor_name = get_input_tensor_name(arg_name)
elif arg_node["name"] == "reshape_nop":
# Handle __nop
src = arg_node["inputs"][0]
dl_tensor_name = self._get_node_arg_name(src)
else:
# activation
dl_tensor_name = get_output_tensor_name(arg_name, arg_idx)
return dl_tensor_name
def _tensor_is_output(self, nid, idx):
for out in self._outputs:
out_nid = out[0]
out_idx = out[1]
if out_nid == nid and out_idx == idx:
return True
return False
def _get_tensor_from_node(self, nid, idx):
# 'eid' is index into the dltype', 'shape', etc.
eid = self._node_row_ptr[nid] + idx
dltype = self._attrs["dltype"][1][eid]
dims = self._attrs["shape"][1][eid]
storage_id = self._attrs["storage_id"][1][eid]
ndim = len(dims)
size = _get_tensor_size_bytes(dims, dltype)
tensor = {
"dltype": dltype,
"ndim": ndim,
"dims": dims,
"strides": None,
"storage_id": storage_id,
"byte_offset": 0,
"offset": 0,
"size": size,
}
return tensor
def _compute_data_placement(self):
"""Compute inputs, outputs, weight, activation sizes"""
self._inputs = self._arg_nodes.copy()
# weights:
offset = 0
for key in self._params:
# First, find the node in graph
nid = 0
for node in self._nodes:
if node["name"] == key:
break
nid += 1
dl_tensor_name = get_input_tensor_name(key)
tensor = self._get_tensor_from_node(nid, 0)
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
for idx in self._arg_nodes:
node = self._nodes[idx]
node_name = node["name"]
if node_name == key:
self._inputs.remove(idx)
self._weights[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
self._weights_size = offset
# activations:
buffer_list_ = {}
nid = 0
for node in self._nodes:
if node["op"] == "null":
nid += 1
continue
if node["op"] != "tvm_op":
raise ValueError(f"Only TVM ops are supported")
node_name = node["name"]
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
num_outputs = int(node_attrs["num_outputs"])
if func_name == "__nop":
assert node_name == "reshape_nop", f"Unsupported __nop operator {node_name}."
assert num_outputs == 1
assert not self._tensor_is_output(nid, 0)
nid += 1
continue
for idx in range(num_outputs):
# Do not count the '_outputs'
if self._tensor_is_output(nid, idx):
continue
dl_tensor_name = get_output_tensor_name(node_name, idx)
tensor = self._get_tensor_from_node(nid, idx)
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
self._activations[dl_tensor_name] = tensor
self._nodes_size = self._nodes_size + 1
nid += 1
# Compute '_input_data'
offset = 0
for nid in self._inputs:
node = self._nodes[nid]
node_name = node["name"]
# Arthur: I suppose that input nodes only have a single
# output dependency
dl_tensor_name = get_input_tensor_name(node_name)
# This tensor is at some index inside '_input_data' dictionary
# depending on the '_inputs' list order. We refer to this position
# when generating the XXX.h file.
tensor = self._get_tensor_from_node(nid, 0)
if self.inputs_static:
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
else:
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
self._input_data[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
# Compute '_output_data'
offset = 0
for output in self._outputs:
nid = output[0]
idx = output[1]
node = self._nodes[nid]
node_name = node["name"]
dl_tensor_name = get_output_tensor_name(node_name, idx)
tensor = self._get_tensor_from_node(nid, idx)
if self.outputs_static:
# Remember this tensor with the storage id
storage_id = tensor["storage_id"]
if storage_id not in buffer_list_:
buffer_list_[storage_id] = []
buffer_entry = buffer_list_[storage_id]
buffer_entry.append(tensor)
else:
# Compute the offset
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
self._output_data[dl_tensor_name] = tensor
# Next offset
offset = aligned_offset + tensor["size"]
# Go over all storage IDs and compute offsets and _activations_size
offset = 0
for storage_id in buffer_list_:
buffer_entry = buffer_list_[storage_id]
new_offset = offset
for tensor in buffer_entry:
assert tensor["storage_id"] == storage_id
dltype = tensor["dltype"]
aligned_offset = _get_aligned_offset(offset, dltype)
tensor["offset"] = aligned_offset
size = tensor["size"]
if (aligned_offset + size) > new_offset:
new_offset = aligned_offset + size
offset = new_offset
self._activations_size = offset
def _parse_model(self, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
module : TVM module or ModuleLibraryFormat object
The module to parse
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
for key in self._graph:
if key == "nodes":
self._nodes = self._graph["nodes"]
elif key == "arg_nodes":
self._arg_nodes = self._graph["arg_nodes"]
elif key == "node_row_ptr":
self._node_row_ptr = self._graph["node_row_ptr"]
elif key == "heads":
self._outputs = self._graph["heads"]
elif key == "attrs":
self._attrs = self._graph["attrs"]
elif key == "metadata":
continue
else:
print("### Error: JSON key {} not supported".format(key))
assert False
# Build all tensor lists
self._compute_data_placement()
# Extract quantization info for inputs/outputs
if quantization is not None:
self._extract_quantization_info(quantization)
def parse_library_format(self, model_library_format_path, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
model_library_format_path :
The ModuleLibraryFormat object to parse
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
temp_dir = utils.tempdir()
extract_path = temp_dir.relpath("extract")
os.mkdir(extract_path)
with tarfile.TarFile(model_library_format_path) as f:
f.extractall(extract_path)
with open(os.path.join(extract_path, "metadata.json")) as metadata_f:
metadata = json.load(metadata_f)
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert len(metadata["modules"]) == 1, "Multiple modules is not supported."
# Extract informations from the Model Library Format
graph_file = os.path.join(
extract_path, "executor-config", "graph", f"{all_module_names[0]}.graph"
)
with open(graph_file, "r") as f:
# returns JSON object as a dictionary
graph_dict = json.load(f)
params_dict = {}
param_file = os.path.join(extract_path, "parameters", "default.params")
with open(param_file, "rb") as f:
params = tvm.runtime.load_param_dict(f.read())
# Map -> Python Dict
tmp_dict = {}
for (k, v) in params.items():
tmp_dict[k] = v
# Sort params for debugging
for k in sorted(tmp_dict.keys()):
params_dict[k] = tmp_dict[k]
src_dir = os.path.join(extract_path, "codegen", "host", "src")
# List of strings from Model Library Format C files
src_files = []
for filename in os.listdir(src_dir):
with open(os.path.join(src_dir, filename), "r") as fin:
src = fin.read()
src_files.append(src)
self._graph = graph_dict
self._params = params_dict
self._lib = src_files
self._parse_model(quantization)
def parse_module(self, module, quantization=None):
"""Parse the module. Build internal data structures.
Parameters
----------
module : TVM Runtime Module
The module to parse.
quantization: Dictionary
The quantization information for model inputs/outputs.
"""
graph = module.get_json()
if not isinstance(graph, (str,)):
try:
graph = graph._tvm_graph_json()
except AttributeError:
raise ValueError("Type %s is not supported" % type(graph))
# Sort params for debugging
params_dict = {}
tmp_params = module.get_params()
for k in sorted(tmp_params.keys()):
params_dict[k] = tmp_params[k]
self._graph = json.loads(graph)
self._params = params_dict
self._lib = module.get_lib()
self._parse_model(quantization)
def _emit_params_data(self, name, out_h, out_c):
"""Emits the network_data[c,h] files with parameters."""
name_upper = name.upper()
# XXX_data.h
out_h.write(
textwrap.dedent(
f"""\
#ifndef __{name_upper}_DATA_H_
#define __{name_upper}_DATA_H_
#include \"ai_runtime_api.h\"
AI_API_ENTRY
const ai_ptr ai_{name}_data_weights_get (void);
#endif /* __{name_upper}_DATA_H_ */
"""
)
)
# XXX_data.cc
out_c.write(
textwrap.dedent(
f"""
#include \"{name}_data.h\"
const ai_ptr ai_{name}_data_weights_get (void)
{{
AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) static const __attribute__ ((section(\".nn_weights\"))) uint8_t s_{name}_weights[] = {{
"""
)
)
# Weights are arranged in the order of 'params_'
offset = 0
for key in self._params:
data = self._params[key] # ND Array
npdata = data.asnumpy()
blob = npdata.tobytes()
out_c.write(f'// "{key}": \n')
out_c.write(f"\t")
count = 0
# Align by emitting garbage between un-aligned data
dl_tensor_name = get_input_tensor_name(key)
tensor = self._weights[dl_tensor_name]
tensor_offset = tensor["offset"]
tensor_size = tensor["size"]
while offset < tensor_offset:
count += 1
out_c.write("0x{:02X}, ".format(0))
if count == 12:
out_c.write("\n\t")
count = 0
offset += 1
for val in blob:
count += 1
out_c.write("0x{:02X}, ".format(val))
if count == 12:
out_c.write("\n\t")
count = 0
offset += tensor_size
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""\
}};
return (const ai_ptr)s_{name}_weights;
}}
"""
)
)
def _emit_open(self, name, out_h, out_c):
"""Emits the network.h file with a few network defines and
writes the header part of the network.c file."""
name_upper = name.upper()
input_size = len(self._input_data)
output_size = len(self._output_data)
# XXX.h
out_h.write(
textwrap.dedent(
f"""\
#ifndef __AI_{name_upper}_H__
#define __AI_{name_upper}_H__
#include \"ai_runtime_api.h\"
#define _{name_upper}_INPUTS_COUNT_ ({input_size})
#define _{name_upper}_OUTPUTS_COUNT_ ({output_size})
#define _{name_upper}_ACTIVATION_BYTES_ ({self._activations_size})
"""
)
)
# XXX.c
out_c.write(
textwrap.dedent(
f"""\
#include <stdio.h>
#include \"dlpack/dlpack.h\"
#include \"tvm/runtime/c_runtime_api.h\"
#include \"{name}.h\"
#include \"{name}_data.h\"
"""
)
)
def _emit_close(self, name, out_h, out_c):
"""Emits the ai_model_info structure."""
name_upper = name.upper()
# datetime object containing current date and time
now = datetime.now()
# dd/mm/YY H:M:S
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# XXX.h
out_h.write(f"#endif /*__AI_{name_upper}_H__*/ \n")
# XXX.c
if self.activations_static:
out_c.write(
f'AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) __attribute__ ((section(".{name}.nn_data_act"))) uint8_t {name}_activations[{self._activations_size}];\n'
)
else:
out_c.write(f"AI_STATIC ai_ptr {name}_activations = NULL;")
# Emit network structure
num_inputs = len(self._input_data)
num_outputs = len(self._output_data)
tool_version = tvm.__version__
api_version = f"{AI_API_VERSION_MAJOR}.{AI_API_VERSION_MINOR}.{AI_API_VERSION_MICRO}.0"
out_c.write(
textwrap.dedent(
f"""
AI_API_ENTRY __attribute__ ((section(".nn_models"))) ai_model_info {name}_network = {{
.name = \"{name}\",
.datetime = \"{dt_string}\",
.revision = \"{AI_TOOLS_REVISION}\",
.tool_version = \"{tool_version}\",
.api_version = \"{api_version}\",
.n_nodes = {self._nodes_size},
.n_inputs = {num_inputs},
.n_outputs = {num_outputs},
.activations_size = {self._activations_size},
.params_size = {self._weights_size},
.activations = {name}_activations,
.inputs = _InputsList,
.outputs = _OutputsList,
.ai_get_params = &ai_{name}_data_weights_get,
.ai_create = &ai_{name}_create,
.ai_destroy = &ai_{name}_destroy,
.ai_run = &ai_{name}_run
}};
"""
)
)
def _emit_tensor_shape(self, dl_tensor_name, ndim, shape, strides, out_c):
out_c.write(f"AI_STATIC int64_t {dl_tensor_name}_shape[{ndim}] = {{{shape[1:-1]}}}; \n")
assert strides is None, f"###Error: non-compact tensors are not handled yet."
out_c.write(f"AI_STATIC int64_t {dl_tensor_name}_strides[{ndim}] = {{}}; \n")
def _emit_tensor_quant(self, dl_tensor_name, out_c):
if dl_tensor_name in self._quantization:
quantization = self._quantization[dl_tensor_name]
# At this time, TVM only supports quantization info with
# single output models.
elif dl_tensor_name in self._output_data and "output" in self._quantization.keys():
quantization = self._quantization["output"]
else:
quantization = None
if quantization is not None:
scale = quantization["scale"]
zero_point = quantization["zero_point"]
# Sometimes we get a scalar with ScaleAsNumpy.
# This seem to mean not quantized ?
if not isinstance(scale, np.ndarray):
assert scale == 0.0, f"Non-quantized tensor with scale != 0.0"
assert (
not isinstance(zero_point, np.ndarray) and zero_point == 0
), f"Non-quantized tensor with zero_point != 0"
return None
scale_size = len(scale)
zero_point_size = len(zero_point)
assert len(scale) == len(
zero_point
), f"Inconsistent quantizations scale:{scale} vs zero-point:{zero_point}"
if len(scale) == 1:
quant_name = dl_tensor_name + "_quant"
out_c.write(f"AI_STATIC float {quant_name}_scale[{scale_size}] = {{ ")
for val in scale:
out_c.write(f"{val}, ")
out_c.write(f"}};\n")
out_c.write(f"AI_STATIC int32_t {quant_name}_zero_point[{zero_point_size}] = {{ ")
for val in zero_point:
out_c.write(f"{val}, ")
out_c.write(f"}};")
out_c.write(
textwrap.dedent(
f"""
AI_STATIC ai_quantization_info {quant_name} = {{
.scale = {quant_name}_scale,
.zero_point = {quant_name}_zero_point,
.dim = -1
}};
"""
)
)
return quant_name
return None
def _emit_tensor_init(self, dl_tensor_name, tensor, out_c):
"""Emits the tensor instantiation code."""
dltype = tensor["dltype"]
dims = tensor["dims"]
strides = tensor["strides"]
byte_offset = tensor["byte_offset"]
dtype = _get_type_data(dltype)
ndim = len(dims)
shape = str(dims)
self._emit_tensor_shape(dl_tensor_name, ndim, shape, strides, out_c)
# Quantization
quant_name = self._emit_tensor_quant(dl_tensor_name, out_c)
# Contents
#
# TODO: use the 'storage_id':
# " .ctx = {{ {} }}, \n".format(str(storage_id)[1:-1])
out_c.write(
textwrap.dedent(
f"""
AI_ALIGNED({self.DATA_ALIGNMENT_BYTES}) AI_STATIC ai_tensor {dl_tensor_name} = {{
.dltensor = {{
.data = (ai_ptr)(NULL),
.device = {{kDLCPU,0}},
.ndim = {ndim},
.dtype = {{{dtype}}},
.shape = {dl_tensor_name}_shape,
.strides = {dl_tensor_name}_strides,
.byte_offset = {byte_offset}
}},
"""
)
)
# Figure out quantization, if exists
if quant_name is not None:
out_c.write(f" .quant = &{quant_name} \n")
else:
out_c.write(f" .quant = NULL \n")
out_c.write(f"}}; \n")
def _emit_activation_buffers(self, name, out_c):
# pylint: disable=unused-argument
"""Emits activation tensors, including inputs/outputs."""
out_c.write(
textwrap.dedent(
f"""\
//
// Inputs:
//
"""
)
)
# shape/buffer
for dl_tensor_name in self._input_data:
tensor = self._input_data[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
out_c.write(f"\n")
# tensor
idx = 0
out_c.write(f"AI_STATIC ai_tensor * _InputsList[] = {{ \n")
for dl_tensor_name in self._input_data:
out_c.write(f" &{dl_tensor_name}, // [{idx}]\n")
idx = idx + 1
out_c.write(f"}}; \n")
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""\
//
// Activations:
//
"""
)
)
for dl_tensor_name in self._activations:
tensor = self._activations[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
# Outputs:
out_c.write(
textwrap.dedent(
f"""\
//
// Outputs:
//
"""
)
)
for dl_tensor_name in self._output_data:
tensor = self._output_data[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
out_c.write(f"\n")
idx = 0
out_c.write(f"AI_STATIC ai_tensor * _OutputsList[] = {{ \n")
for dl_tensor_name in self._output_data:
out_c.write(f" &{dl_tensor_name}, // [{idx}]\n")
idx = idx + 1
out_c.write(f"}}; \n")
out_c.write(f"\n")
def _emit_params_buffers(self, name, out_c):
"""Emits all parameter tensors."""
out_c.write(
textwrap.dedent(
f"""
//
// Weights: {name}
//
"""
)
)
for dl_tensor_name in self._weights:
tensor = self._weights[dl_tensor_name]
self._emit_tensor_init(dl_tensor_name, tensor, out_c)
out_c.write(f"\n")
def _emit_network(self, name, out_c):
"""Emits prototypes for the network operator functions."""
out_c.write(
textwrap.dedent(
f"""
//
// Network: {name}
//
"""
)
)
for node in self._nodes:
if node["op"] == "null":
continue
assert node["op"] == "tvm_op", f"###Error: Only TVM ops are supported."
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
if func_name == "__nop":
continue
out_c.write(
f"TVM_DLL int32_t {func_name}(void * args, void * arg_type_ids, int32_t num_args); \n"
)
out_c.write(f"\n")
def _emit_tensor_activation(self, dl_tensor_name, tensor, out_c):
storage_id = tensor["storage_id"]
offset = tensor["offset"]
out_c.write(
textwrap.indent(
textwrap.dedent(
f"""
//
// {dl_tensor_name}: storage_id:{storage_id}
//
{dl_tensor_name}.dltensor.data = (ai_ptr)(activations + {offset});
"""
),
" ",
)
)
def _emit_activation_init(self, name, out_c):
"""Emits buffer initialization code for activation tensors."""
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// {name}_configure_activations
// {DBAR}
AI_STATIC AI_INLINE
ai_status {name}_configure_activations (
const ai_ptr activations
)
{{
if (activations == NULL) {{
TVMAPISetLastError (\"Non-null activations arena is required for this model.\");
return AI_STATUS_ERROR;
}}
"""
)
)
# Allocate inputs with the static model
if self.inputs_static:
for dl_tensor_name in self._input_data:
tensor = self._input_data[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
# Prepare activation buffers
for dl_tensor_name in self._activations:
tensor = self._activations[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
# Allocate outputs with the static model
if self.outputs_static:
for dl_tensor_name in self._output_data:
tensor = self._output_data[dl_tensor_name]
self._emit_tensor_activation(dl_tensor_name, tensor, out_c)
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
def _emit_params_init(self, name, out_c):
"""Emits buffer initialization code for params tensors."""
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// {name}_configure_weights
// {DBAR}
AI_STATIC AI_INLINE
ai_status {name}_configure_weights (
const ai_ptr weights
)
{{
if (weights == NULL) {{
TVMAPISetLastError(\"Non-null weights arena is required for this model.\");
return AI_STATUS_ERROR;
}}
"""
)
)
for dl_tensor_name in self._weights:
tensor = self._weights[dl_tensor_name]
offset = tensor["offset"]
out_c.write(
textwrap.indent(
textwrap.dedent(
f"""\
//
// {dl_tensor_name}
//
{dl_tensor_name}.dltensor.data = (ai_ptr)(weights + {offset});
"""
),
" ",
)
)
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
def _emit_init(self, name, out_c):
"""Emits buffer initialization code."""
self._emit_activation_init(name, out_c)
self._emit_params_init(name, out_c)
def _emit_run(self, name, out_h, out_c):
"""Emits the run function code."""
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_run (
ai_tensor *inputs[],
ai_tensor *outputs[]
);
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_run
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_run (
ai_tensor *inputs[],
ai_tensor *outputs[]
)
{{
"""
)
)
# Execute nodes one by one
nid = 0
for node in self._nodes:
node_name = node["name"]
node_name_upper = node_name.upper()
nid += 1
if node["op"] == "null":
continue
assert node["op"] == "tvm_op", f"###Error: Only TVM ops are supported."
node_attrs = node["attrs"]
func_name = node_attrs["func_name"]
if func_name == "__nop":
continue
out_c.write(f" // \n")
out_c.write(f" // {func_name}\n")
out_c.write(f" // \n")
# Prepare TVM packed function - this is the one called
if name == "__nop":
print(" exec: __nop")
continue
if name == "__copy":
print(" exec: __copy")
continue
# Get function from the TVM module
#
# void * args : arg_values.data()
# void * arg_type_ids : arg_tcodes.data()
# int32_t num_args : arg_values.size()
dl_args_name = _get_node_args_name(node_name)
dl_arg_types_name = _get_node_arg_types_name(node_name)
num_inputs = len(node["inputs"])
num_outputs = int(node_attrs["num_outputs"])
num_args = num_inputs + num_outputs
out_c.write(f" TVMValue {dl_args_name}[{num_args}]; \n")
out_c.write(f" int32_t {dl_arg_types_name}[{num_args}]; \n")
curr_idx = 0
for arg in node["inputs"]:
dl_tensor_name = self._get_node_arg_name(arg)
#
# If this input is not an activation or a parameter => find the input
#
if dl_tensor_name not in self._weights and dl_tensor_name not in self._activations:
assert dl_tensor_name in self._input_data, "Tensor {} not registered ?".format(
dl_tensor_name
)
input_idx = 0
for dl_entry_name in self._input_data:
if dl_entry_name == dl_tensor_name:
break
input_idx += 1
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &inputs[{input_idx}]->dltensor; \n"
)
else:
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &{dl_tensor_name}.dltensor; \n"
)
out_c.write(f" {dl_arg_types_name}[{curr_idx}] = kTVMNDArrayHandle; \n")
curr_idx += 1
for idx in range(num_outputs):
dl_tensor_name = get_output_tensor_name(node_name, idx)
# If this output is not an activation => find the output
if dl_tensor_name not in self._activations:
assert dl_tensor_name in self._output_data
output_idx = 0
for dl_exit_name in self._output_data:
if dl_exit_name == dl_tensor_name:
break
output_idx += 1
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &outputs[{output_idx}]->dltensor; \n"
)
else:
out_c.write(
f" {dl_args_name}[{curr_idx}].v_handle = &{dl_tensor_name}.dltensor; \n"
)
out_c.write(f" {dl_arg_types_name}[{curr_idx}] = kTVMNDArrayHandle; \n")
out_c.write(f"\n")
curr_idx += 1
# call this function
out_c.write(
textwrap.dedent(
f"""
#if (_VERBOSE_ > 0)
printf (\" {func_name} ... \\r\\n\");
#endif
if ({func_name} ({dl_args_name}, {dl_arg_types_name}, {num_args})) {{
TVMAPISetLastError("Invalid handle");
return AI_STATUS_ERROR;
}}
#if (_VERBOSE_ > 0)
printf (\" {func_name} Done.\\r\\n\");
#endif
"""
)
)
out_c.write(f"\n")
out_c.write(
textwrap.dedent(
f"""
return AI_STATUS_OK;
}}
"""
)
)
out_c.write(f"\n")
def _emit_create_destroy(self, name, out_h, out_c):
"""Emits the create/destroy functions."""
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_create (
const ai_ptr weights,
const ai_ptr activations
);
"""
)
)
out_h.write(
textwrap.dedent(
f"""
AI_API_ENTRY
ai_status ai_{name}_destroy ();
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_create
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_create(
const ai_ptr weights,
const ai_ptr activations
)
{{
ai_status status = AI_STATUS_OK;
status = {name}_configure_weights (weights);
if (status != AI_STATUS_OK) {{
return status;
}}
status = {name}_configure_activations (activations);
if (status != AI_STATUS_OK) {{
return status;
}}
return AI_STATUS_OK;
}}
"""
)
)
out_c.write(
textwrap.dedent(
f"""
// {DBAR}
// ai_{name}_destroy
// {DBAR}
AI_API_ENTRY
ai_status ai_{name}_destroy ()
{{
return AI_STATUS_OK;
}}
"""
)
)
def emit_code(self, dest_dir, model_name):
"""Emits the C code implementing the model."""
# Build the directory structure
if os.path.exists(dest_dir):
raise ValueError(f"emit_code.Error: {dest_dir} exists.")
# Make a new one
os.makedirs(dest_dir)
# Fix the model name
model_name = re.sub("[^0-9a-zA-Z_]+", "_", model_name)
model_name = model_name.lower()
# Write the C code: we can parse the string
if isinstance(self._lib, list):
# List of strings from Model Library Format C files
for idx, src in enumerate(self._lib):
code = _preprocess_code(src)
filename = os.path.join(dest_dir, f"{model_name}_lib{idx}.c")
with open(filename, "w") as fout:
fout.write(code)
else:
# a TVM RuntimeGraphFactory
src = self._lib.get_source(fmt="c")
code = _preprocess_code(src)
filename = os.path.join(dest_dir, f"{model_name}_lib.c")
with open(filename, "w") as fout:
fout.write(code)
# Save params as binary data
saved_params = tvm.runtime.save_param_dict(self._params)
params_name = os.path.join(dest_dir, model_name + ".params")
with open(params_name, "wb") as f:
f.write(saved_params)
# Write the .json
graph_name = os.path.join(dest_dir, model_name + ".json")
json_string = json.dumps(self._graph, indent=4)
with open(graph_name, "w") as f:
print(json_string, file=f)
# emit X_data[c,h]
data_h_name = os.path.join(dest_dir, model_name + "_data.h")
data_c_name = os.path.join(dest_dir, model_name + "_data.c")
model_h_name = os.path.join(dest_dir, model_name + ".h")
model_c_name = os.path.join(dest_dir, model_name + ".c")
with contextlib.ExitStack() as exit_stack:
# emit X[c,h]
data_h = exit_stack.enter_context(open(data_h_name, "w"))
data_c = exit_stack.enter_context(open(data_c_name, "w"))
out_h = exit_stack.enter_context(open(model_h_name, "w"))
out_c = exit_stack.enter_context(open(model_c_name, "w"))
self._emit_params_data(model_name, data_h, data_c)
self._emit_open(model_name, out_h, out_c)
self._emit_params_buffers(model_name, out_c)
self._emit_activation_buffers(model_name, out_c)
self._emit_network(model_name, out_c)
self._emit_init(model_name, out_c)
self._emit_create_destroy(model_name, out_h, out_c)
self._emit_run(model_name, out_h, out_c)
self._emit_close(model_name, out_h, out_c)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/debugger.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines functions for controlling debuggers for micro TVM binaries."""
import atexit
import abc
import errno
import logging
import os
import shlex
import signal
import subprocess
import sys
import termios
import threading
import time
import psutil
from .._ffi import register_func
from . import class_factory
from . import transport
from .transport.file_descriptor import FdTransport
_LOG = logging.getLogger(__name__)
class Debugger(metaclass=abc.ABCMeta):
"""An interface for controlling micro TVM debuggers."""
@abc.abstractmethod
def start(self):
"""Start the debugger, but do not block on it.
The runtime will continue to be driven in the background.
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Terminate the debugger."""
raise NotImplementedError()
class GdbDebugger(Debugger):
"""Handles launching, suspending signals, and potentially dealing with terminal issues."""
# Number of seconds to wait in stop() for a graceful shutdown. After this time has elapsed,
# the debugger is kill()'d.
_GRACEFUL_SHUTDOWN_TIMEOUT_SEC = 5.0
# The instance of GdbDebugger that's currently started.
_STARTED_INSTANCE = None
@classmethod
def _stop_all(cls):
if cls._STARTED_INSTANCE:
cls._STARTED_INSTANCE.stop()
def __init__(self):
super(GdbDebugger, self).__init__()
self._is_running = False
self._is_running_lock = threading.RLock()
self._child_exited_event = threading.Event()
self._signals_reset_event = threading.Event()
@abc.abstractmethod
def popen_kwargs(self):
raise NotImplementedError()
def _internal_stop(self):
if not self._is_running:
return
os.kill(os.getpid(), signal.SIGUSR1)
self._signals_reset_event.wait()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, self.old_termios)
try:
children = psutil.Process(self.popen.pid).children(recursive=True)
for c in children:
c.terminate()
_, alive = psutil.wait_procs(children, timeout=self._GRACEFUL_SHUTDOWN_TIMEOUT_SEC)
for a in alive:
a.kill()
except psutil.NoSuchProcess:
pass
finally:
self.__class__._STARTED_INSTANCE = None
self._is_running = False
self._child_exited_event.set()
def _wait_for_child(self):
self.popen.wait()
with self._is_running_lock:
self._internal_stop()
@classmethod
def _sigusr1_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigusr1 handler should not be invoked when GDB not started"
signal.signal(signal.SIGINT, cls._STARTED_INSTANCE.old_sigint_handler)
signal.signal(signal.SIGUSR1, cls._STARTED_INSTANCE.old_sigusr1_handler)
cls._STARTED_INSTANCE._signals_reset_event.set()
@classmethod
def _sigint_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigint handler should not be invoked when GDB not started"
with cls._STARTED_INSTANCE._is_running_lock:
exists = cls._STARTED_INSTANCE._is_running
if exists:
try:
os.killpg(cls._STARTED_INSTANCE.child_pgid, signal.SIGINT)
except ProcessLookupError:
pass
def start(self):
with self._is_running_lock:
assert not self._is_running
assert not self._STARTED_INSTANCE
kwargs = self.popen_kwargs()
self.did_start_new_session = kwargs.setdefault("start_new_session", True)
self.old_termios = termios.tcgetattr(sys.stdin.fileno())
self.popen = subprocess.Popen(**kwargs)
self._is_running = True
self.old_sigint_handler = signal.signal(signal.SIGINT, self._sigint_handler)
self.old_sigusr1_handler = signal.signal(signal.SIGUSR1, self._sigusr1_handler)
self.__class__._STARTED_INSTANCE = self
try:
self.child_pgid = os.getpgid(self.popen.pid)
except Exception:
self.stop()
raise
with self._is_running_lock:
self._is_child_alive = True
t = threading.Thread(target=self._wait_for_child)
t.daemon = True
t.start()
def stop(self):
self._child_exited_event.wait()
atexit.register(GdbDebugger._stop_all)
class GdbTransportDebugger(GdbDebugger):
"""A debugger that uses a single GDB subprocess as both the transport and the debugger.
Opens pipes for the target's stdin and stdout, launches GDB and configures GDB's target
arguments to read and write from the pipes using /dev/fd.
"""
def __init__(self, args, **popen_kw):
super(GdbTransportDebugger, self).__init__()
self.args = args
self.popen_kw = popen_kw
def popen_kwargs(self):
stdin_read, stdin_write = os.pipe()
stdout_read, stdout_write = os.pipe()
os.set_inheritable(stdin_read, True)
os.set_inheritable(stdout_write, True)
sysname = os.uname()[0]
if sysname == "Darwin":
args = [
"lldb",
"-O",
f"target create {self.args[0]}",
"-O",
f"settings set target.input-path /dev/fd/{stdin_read}",
"-O",
f"settings set target.output-path /dev/fd/{stdout_write}",
]
if len(self.args) > 1:
args.extend(
["-O", "settings set target.run-args {}".format(" ".join(self.args[1:]))]
)
elif sysname == "Linux":
args = [
"gdb",
"-ex",
f"file {self.args[0]}",
"-ex",
(
f"set args {' '.join(shlex.quote(a) for a in self.args[1:])} "
f"</dev/fd/{stdin_read} >/dev/fd/{stdout_write}"
),
]
else:
raise NotImplementedError(f"System {sysname} is not yet supported")
self.fd_transport = FdTransport(
stdout_read, stdin_write, transport.debug_transport_timeouts()
)
self.fd_transport.open()
return {
"args": args,
"pass_fds": [stdin_read, stdout_write],
}
def _internal_stop(self):
self.fd_transport.close()
super(GdbTransportDebugger, self)._internal_stop()
class _Transport(transport.Transport):
def __init__(self, gdb_transport_debugger):
self.gdb_transport_debugger = gdb_transport_debugger
def timeouts(self):
return transport.debug_transport_timeouts()
def open(self):
pass # Pipes opened by parent class.
def write(self, data, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.write(data, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when writes are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def read(self, n, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.read(n, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when reads are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def close(self):
pass # Pipes closed by parent class (DebugWrapperTransport calls stop() next).
def transport(self):
return self._Transport(self)
class GdbRemoteDebugger(GdbDebugger):
"""A Debugger that invokes GDB and attaches to a remote GDBserver-based target."""
def __init__(
self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, **popen_kw
):
super(GdbRemoteDebugger, self).__init__()
self.gdb_binary = gdb_binary
self.remote_hostport = remote_hostport
self.debug_binary = debug_binary
self.wrapping_context_manager = wrapping_context_manager
self.popen_kw = popen_kw
def popen_kwargs(self):
kwargs = {
"args": [
self.gdb_binary,
"-iex",
f"file {self.debug_binary}",
"-iex",
f"target remote {self.remote_hostport}",
],
}
kwargs.update(self.popen_kw)
return kwargs
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
super(GdbRemoteDebugger, self).start()
def stop(self):
try:
super(GdbRemoteDebugger, self).stop()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
GLOBAL_DEBUGGER = None
class DebuggerFactory(class_factory.ClassFactory):
SUPERCLASS = Debugger
def launch_debugger(debugger_factory, *args, **kw):
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
stop_debugger()
GLOBAL_DEBUGGER = debugger_factory.instantiate(*args, **kw)
GLOBAL_DEBUGGER.start()
@register_func("tvm.micro.debugger.launch_debugger")
def _launch_debugger(debugger_factory_json):
launch_debugger(DebuggerFactory.from_json(debugger_factory_json))
@register_func("tvm.micro.debugger.stop_debugger")
def stop_debugger():
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
try:
GLOBAL_DEBUGGER.stop()
finally:
GLOBAL_DEBUGGER = None
class RpcDebugger(Debugger):
"""A Debugger instance that launches the actual debugger on a remote TVM RPC server."""
def __init__(self, rpc_session, factory, wrapping_context_manager=None):
super(RpcDebugger, self).__init__()
self._factory = factory
self.launch_debugger = rpc_session.get_function("tvm.micro.debugger.launch_debugger")
self.stop_debugger = rpc_session.get_function("tvm.micro.debugger.stop_debugger")
self.wrapping_context_manager = wrapping_context_manager
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
try:
self.launch_debugger(self._factory.to_json)
except Exception:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
raise
try:
input("Press [Enter] when debugger is set")
except Exception:
self.stop()
raise
self._is_running = True
def stop(self):
try:
self.stop_debugger()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/model_library_format.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines functions for exporting to Model Library Format."""
import datetime
import json
import os
import pathlib
import re
import tarfile
import typing
import tvm
from tvm.ir.type import TupleType
from tvm.micro import get_standalone_crt_dir
from .._ffi import get_global_func
from ..contrib import utils
from ..driver import build_module
from ..relay.backend import executor_factory
from ..relay.backend.name_transforms import to_c_variable_style, prefix_generated_name
from ..relay import param_dict
from ..tir import expr
# This should be kept identical to runtime::symbol::tvm_module_main
MAIN_FUNC_NAME_STR = "__tvm_main__"
STANDALONE_CRT_URL = "./runtime"
METADATA_FILE = "metadata.json"
class UnsupportedInModelLibraryFormatError(Exception):
"""Raised when export_model_library_format does not support the given Module tree."""
def generate_c_interface_header(
module_name, inputs, outputs, pools, io_pool_allocations, devices, workspace_size, include_path
):
"""Generate C Interface header to be included in MLF"""
mangled_name = to_c_variable_style(prefix_generated_name(module_name))
metadata_header = os.path.join(include_path, f"{mangled_name}.h")
interface_c_create = tvm._ffi.get_global_func("runtime.InterfaceCCreate")
interface_c_module = interface_c_create(
module_name, inputs, outputs, pools, io_pool_allocations, devices, workspace_size
)
with open(metadata_header, "w") as header_file:
header_file.write(interface_c_module.get_source())
return metadata_header
# List of type_key for modules which are ephemeral and do not need to be exported.
EPHEMERAL_MODULE_TYPE_KEYS = ("metadata_module",)
def _populate_codegen_dir(
mods: typing.Union[
typing.List[executor_factory.ExecutorFactoryModule],
typing.List[tvm.runtime.Module],
],
codegen_dir: str,
):
"""Populate the codegen sub-directory as part of a Model Library Format export.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule], List[tvm.runtime.Module]
A list of the return value of tvm.relay.build, which
will be exported into Model Library Format.
codegen_dir : str
Path to the codegen directory on disk.
module_name: Optional[str]
Name used to prefix the generated source files
"""
dso_modules = []
for mod in mods:
if isinstance(mod, executor_factory.ExecutorFactoryModule):
lib = mod.lib
elif isinstance(mod, tvm.runtime.Module):
lib = mod
else:
raise RuntimeError(f"Not supported module type: {type(mod)}")
dso_modules = lib._collect_dso_modules()
non_dso_modules = lib._collect_from_import_tree(lambda m: m not in dso_modules)
# Filter ephemeral modules which cannot be exported.
dso_modules = [m for m in dso_modules if m.type_key not in EPHEMERAL_MODULE_TYPE_KEYS]
non_dso_modules = [
m for m in non_dso_modules if m.type_key not in EPHEMERAL_MODULE_TYPE_KEYS
]
if non_dso_modules:
raise UnsupportedInModelLibraryFormatError(
f"Don't know how to export non-c or non-llvm modules; found: {non_dso_modules!r}"
)
mod_indices = {"lib": 0, "src": 0}
host_codegen_dir = os.path.join(codegen_dir, "host")
lib_name = (
f"{mod.libmod_name}_lib"
if isinstance(mod, executor_factory.ExecutorFactoryModule)
else "lib"
)
for dso_mod in dso_modules:
if dso_mod.type_key == "c":
assert dso_mod.format in ["c", "cc", "cpp"]
ext = dso_mod.format
index = mod_indices["src"]
mod_indices["src"] += 1
parent_dir = os.path.join(host_codegen_dir, "src")
file_name = os.path.join(parent_dir, f"{lib_name}{index}.{ext}")
elif dso_mod.type_key == "llvm":
index = mod_indices["lib"]
mod_indices["lib"] += 1
parent_dir = os.path.join(host_codegen_dir, "lib")
file_name = os.path.join(parent_dir, f"{lib_name}{index}.o")
else:
assert (
False
), f"do not expect module with type_key={lib.type_key} from _collect_dso_modules"
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
dso_mod.save(file_name)
def _build_memory_map(mod):
ret = dict()
if isinstance(mod, executor_factory.GraphExecutorFactoryModule):
ret["sids"] = _build_sid_map(mod.graph_json)
ret["functions"] = _build_function_memory_map(mod.function_metadata)
return ret
def _build_sid_map(graph_json):
"""Build a simpler storage id info map from graph JSON.
Parameters
----------
graph_json : str
String representation of the graph_json created from tvm.relay.build().
Returns
-------
list :
A list with one entry per storage id describing that memory.
"""
graph = json.loads(graph_json)
seen_storage_ids = set()
memory_map = []
for node_id, storage_id in enumerate(graph["attrs"]["storage_id"][1]):
if storage_id in seen_storage_ids:
continue
seen_storage_ids.add(storage_id)
num_elements = 1
for dim in graph["attrs"]["shape"][1][storage_id]:
num_elements *= dim
dltype = graph["attrs"]["dltype"][1][storage_id]
m = re.match(r"^[a-zA-Z]+([0-9]+)$", dltype)
assert m, f"Exported graph contains unknown dltype {dltype}"
elem_bits = int(m.group(1))
map_entry = {
"storage_id": storage_id,
"size_bytes": (num_elements * elem_bits + 7) // 8,
}
if node_id in graph["arg_nodes"]:
map_entry["input_binding"] = graph["nodes"][node_id]["name"]
memory_map.append(map_entry)
return memory_map
def _build_function_memory_map(function_metadata):
"""Build a simple map that shows how much workspace is required to execute
each primitive function. The main_func describes how much memory is required
to execute the main control code.
Parameters
----------
function_metadata : Map<String, FunctionInfo>
This contains all the compiled metadata on a function basis
Returns
-------
dict :
This will have two entries:
1.) A list with one entry per function describing local memory it is using.
2.) A global memory requirement if all functions are executed sequentially
"""
device_max_workspace = dict()
main_func_metadata = function_metadata[MAIN_FUNC_NAME_STR]
func_entries = []
target_local_entries = dict()
for func_name, finfo in function_metadata.items():
# Skip a few unsupported cases:
# 1. The main function metadata is exported elsewhere.
# 2. BYOC operator implementations do not currently export useful FunctionInfo.
if func_name == MAIN_FUNC_NAME_STR or not finfo.tir_primfuncs:
continue
if func_name not in target_local_entries.keys():
target_local_entries[func_name] = list()
for target in dict(finfo.workspace_sizes).keys():
workspace_size = finfo.workspace_sizes[target]
target_entry = {
"device": int(target.get_target_device_type()),
"workspace_size_bytes": int(workspace_size),
}
target_local_entries[func_name].append(target_entry)
if workspace_size >= device_max_workspace.get(int(target.get_target_device_type()), 0):
device_max_workspace[int(target.get_target_device_type())] = workspace_size
for func_name, target_entries_ in target_local_entries.items():
func_entry = {
"function_name": str(func_name),
"workspace": target_entries_,
}
func_entries.append(func_entry)
target_main_entries = dict()
def _create_empty_entry(target_device_type):
return {
"device": int(target_device_type),
"workspace_size_bytes": 0,
"constants_size_bytes": 0,
"io_size_bytes": 0,
}
for target in dict(main_func_metadata.workspace_sizes).keys():
main_func_local_workspace = main_func_metadata.workspace_sizes[target]
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_entries[int(target.get_target_device_type())]["workspace_size_bytes"] = int(
device_max_workspace.get(int(target.get_target_device_type()), 0)
) + int(main_func_local_workspace)
for target in dict(main_func_metadata.constant_sizes).keys():
if int(target.get_target_device_type()) not in target_main_entries.keys():
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_entries[int(target.get_target_device_type())]["constants_size_bytes"] = int(
main_func_metadata.constant_sizes[target]
)
for target in dict(main_func_metadata.io_sizes).keys():
if int(target.get_target_device_type()) not in target_main_entries.keys():
target_main_entries[int(target.get_target_device_type())] = _create_empty_entry(
int(target.get_target_device_type())
)
target_main_entries[int(target.get_target_device_type())]["io_size_bytes"] = int(
main_func_metadata.io_sizes[target]
)
ret = {
"operator_functions": func_entries,
"main": list(target_main_entries.values()),
}
return ret
def _get_main_relay_func(mod: executor_factory.ExecutorFactoryModule):
main_func = mod.function_metadata[MAIN_FUNC_NAME_STR]
target = list(main_func.relay_primfuncs.keys())[0]
return main_func.relay_primfuncs[target]
def _convert_tuple_to_outputs(ret_type, offset=0):
outputs = []
added_fields = len(ret_type.fields)
for output_index in range(added_fields):
next_output = offset + len(outputs)
if isinstance(ret_type.fields[output_index], TupleType):
outputs.extend(_convert_tuple_to_outputs(ret_type.fields[output_index], next_output))
else:
outputs.append(f"output{next_output}")
return outputs
def _get_inputs_and_outputs_from_module(mod):
inputs = [str(input_var.name) for input_var in mod.executor_codegen_metadata.inputs]
outputs = list(mod.executor_codegen_metadata.outputs)
return inputs, outputs
def _get_pools_from_module(mod):
return list(dict(mod.executor_codegen_metadata.pool_inputs).values())
def _get_io_pool_allocation_from_module(mod):
return dict(mod.executor_codegen_metadata.io_pool_allocations)
def _should_generate_interface_header(mod):
return "interface-api" in mod.executor and mod.executor["interface-api"] == "c"
def _make_tar(source_dir, tar_file_path, modules):
"""Build a tar file from source_dir."""
with tarfile.open(tar_file_path, "w") as tar_f:
def reset(tarinfo):
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = "root"
return tarinfo
tar_f.add(str(source_dir), arcname=".", filter=reset)
for mod in modules:
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
if is_aot and str(mod.runtime) == "crt":
tar_f.add(get_standalone_crt_dir(), arcname=STANDALONE_CRT_URL)
break
_GENERATED_VERSION = 7
def _is_module_names_unique(mods: typing.List[executor_factory.ExecutorFactoryModule]):
"""Check if built modules have unique names.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule]
A list of the return value of tvm.relay.build,
which will be exported into Model Library Format.
"""
all_names = []
for mod in mods:
all_names.append(mod.libmod_name)
return len(set(all_names)) == len(all_names)
def _export_graph_model_library_format(
mods: typing.List[executor_factory.ExecutorFactoryModule], tempdir: pathlib.Path
):
"""Export a tvm.relay.build artifact in Model Library Format.
Parameters
----------
mods : List[tvm.relay.backend.executor_factory.ExecutorFactoryModule]
A list of the return value of tvm.relay.build,
which will be exported into Model Library Format.
tempdir : pathlib.Path
Temporary directory to populate with Model Library Format contents.
"""
assert _is_module_names_unique(mods), "Multiple modules should have unique names."
metadata = {
"version": _GENERATED_VERSION,
}
metadata["modules"] = {}
for mod in mods:
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
executor = ["aot"] if is_aot else ["graph"]
module_name = mod.libmod_name
metadata["modules"][module_name] = {
"model_name": module_name,
"export_datetime": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%SZ"),
"memory": _build_memory_map(mod),
"target": [str(t) for t in mod.target],
"executors": executor,
"style": "full-model",
}
if is_aot and (str(mod.runtime) == "crt"):
standalone_crt = {
"short_name": "tvm_standalone_crt",
"url": f"{STANDALONE_CRT_URL}",
"url_type": "mlf_path",
"version_spec": f"{tvm.__version__}",
}
external_dependencies = [standalone_crt]
metadata["modules"][module_name]["external_dependencies"] = external_dependencies
with open(tempdir / METADATA_FILE, "w") as json_f:
json.dump(metadata, json_f, indent=2, sort_keys=True)
codegen_dir = tempdir / "codegen"
codegen_dir.mkdir()
_populate_codegen_dir(mods, codegen_dir)
parameters_dir = tempdir / "parameters"
parameters_dir.mkdir()
src_dir = tempdir / "src"
src_dir.mkdir()
graph_config_dir = tempdir / "executor-config" / "graph"
for mod in mods:
if _should_generate_interface_header(mod):
include_path = codegen_dir / "host" / "include"
if not include_path.exists():
include_path.mkdir()
inputs, outputs = _get_inputs_and_outputs_from_module(mod)
devices = mod.get_devices()
pools = _get_pools_from_module(mod)
io_pool_allocations = _get_io_pool_allocation_from_module(mod)
workspace_size = int(
metadata["modules"][mod.libmod_name]["memory"]["functions"]["main"][0][
"workspace_size_bytes"
]
)
generate_c_interface_header(
mod.libmod_name,
inputs,
outputs,
pools,
io_pool_allocations,
devices,
workspace_size,
include_path,
)
is_aot = isinstance(mod, executor_factory.AOTExecutorFactoryModule)
param_filename = parameters_dir / f"{mod.libmod_name}.params"
with open(param_filename, "wb") as f:
f.write(param_dict.save_param_dict(mod.params))
with open(src_dir / f"{mod.libmod_name}.relay", "w") as f:
f.write(str(mod.ir_mod))
if not is_aot:
if not graph_config_dir.exists():
graph_config_dir.mkdir(parents=True)
with open(graph_config_dir / f"{mod.libmod_name}.graph", "w") as f:
f.write(mod.get_executor_config())
class NonStaticShapeError(Exception):
"""Raised when a shape has elements other than IntImm."""
def _shape_to_size(shape, dtype):
bits_per_item = int(
re.match(r"((float)|(int))(?P<width_bits>[0-9]+)", dtype).group("width_bits")
)
assert bits_per_item is not None, f"don't know how to compute size of type {dtype}"
total_bits = bits_per_item
for s in shape:
total_bits *= s
return (total_bits + 7) // 8
def _write_tir_and_build_operator_memory_map(src_dir, targets, ir_module_by_target):
def _eval_shape(param_name, buffer_shape):
shape = []
for x in buffer_shape:
if not isinstance(x, expr.IntImm):
raise NonStaticShapeError(
f"Parameter {param_name} has shape with non-IntImm elements: {buffer_shape}"
)
shape.append(x.value)
return shape
memory_map = {}
for target in targets:
# TODO(mbs): The device type is not unique, better would be to use target.kind.name
target_device_type = target.get_target_device_type()
ir_mod = ir_module_by_target[target]
printer = get_global_func("tir.ModelLibraryFormatPrinter")(False, None, False)
with open(src_dir / f"tir-{target_device_type}.txt", "w") as f:
f.write(printer["print"](ir_mod))
for v in ir_mod.get_global_vars():
map_entry = []
for p, b in ir_mod[v.name_hint].buffer_map.items():
shape = _eval_shape(p.name, b.shape)
buffer_size_bytes = _shape_to_size(shape, str(b.dtype))
# NOTE: cannot tell what is an input or output at this point.
map_entry.append(
{
"size_bytes": buffer_size_bytes,
"shape": [int(x) for x in b.shape],
"dtype": b.dtype,
"input_binding": printer["get_var_name"](p),
}
)
memory_map[v.name_hint] = map_entry
return memory_map
def _export_operator_model_library_format(mod: build_module.OperatorModule, tempdir):
"""Export the result of tvm.build() in Model Library Format.
Parameters
----------
mod : runtime.Module
The Module returned from tvm.build().
tempdir : str
Path to the .tar archive to generate.
"""
targets = []
for target in mod.ir_module_by_target.keys():
if str(target.kind) not in ("llvm", "c"):
raise UnsupportedInModelLibraryFormatError(
f"Operator has non-DSO-exportable target {target!s}, which is not yet supported in "
"Model Library Format"
)
targets.append(target)
src_dir = tempdir / "src"
src_dir.mkdir()
memory_map = _write_tir_and_build_operator_memory_map(src_dir, targets, mod.ir_module_by_target)
metadata = {
"version": _GENERATED_VERSION,
"model_name": mod.name,
"export_datetime": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%SZ"),
"memory": memory_map,
"target": [str(t) for t in targets],
"executors": [],
"style": "operator",
}
with open(tempdir / METADATA_FILE, "w") as metadata_f:
json.dump(metadata, metadata_f)
codegen_dir = tempdir / "codegen"
codegen_dir.mkdir()
_populate_codegen_dir(list([mod]), codegen_dir)
ExportableModule = typing.Union[
build_module.OperatorModule,
executor_factory.AOTExecutorFactoryModule,
executor_factory.GraphExecutorFactoryModule,
]
def export_model_library_format(
mods: typing.Union[ExportableModule, typing.List[ExportableModule]],
file_name: typing.Union[str, pathlib.Path],
):
"""Export the build artifact in Model Library Format.
This function creates a .tar archive containing the build artifacts in a standardized
layout. It's intended to allow downstream automation to build TVM artifacts against the C
runtime.
Parameters
----------
mod : ExportableModule, List[ExportableModule]
The return value of tvm.build or tvm.relay.build.
file_name : str
Path to the .tar archive to generate.
Returns
-------
file_name : str
The path to the generated .tar archive.
"""
modules = mods
if not isinstance(mods, list):
modules = list([mods])
operator_module_type = all(isinstance(mod, build_module.OperatorModule) for mod in modules)
graph_module_type = all(
isinstance(
mod,
(
executor_factory.AOTExecutorFactoryModule,
executor_factory.GraphExecutorFactoryModule,
),
)
for mod in modules
)
file_name = pathlib.Path(file_name)
tempdir = utils.tempdir()
if operator_module_type:
if len(modules) != 1:
raise RuntimeError("Multiple operator is not supported.")
_export_operator_model_library_format(modules[0], tempdir.path)
elif graph_module_type:
_export_graph_model_library_format(modules, tempdir.path)
else:
raise NotImplementedError(
f"Don't know how to export module of type {modules[0].__class__!r}"
)
_make_tar(tempdir.path, file_name, modules)
return file_name
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/project.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines glue wrappers around the Project API which mate to TVM interfaces."""
import pathlib
from typing import Union
from .. import __version__
from ..contrib import utils
from .build import get_standalone_crt_dir
from .model_library_format import ExportableModule, export_model_library_format
from .project_api import client
from .transport import Transport, TransportTimeouts
class ProjectTransport(Transport):
"""A Transport implementation that uses the Project API client."""
def __init__(self, api_client, options):
self._api_client = api_client
self._options = options
self._timeouts = None
def timeouts(self):
assert self._timeouts is not None, "Transport not yet opened"
return self._timeouts
def open(self):
reply = self._api_client.open_transport(self._options)
self._timeouts = TransportTimeouts(**reply["timeouts"])
def close(self):
if not self._api_client.is_shutdown:
self._api_client.close_transport()
self._api_client.shutdown()
def write(self, data, timeout_sec):
self._api_client.write_transport(data, timeout_sec)
def read(self, n, timeout_sec):
return self._api_client.read_transport(n, timeout_sec)["data"]
class TemplateProjectError(Exception):
"""Raised when the Project API server given to GeneratedProject reports is_template=True."""
class GeneratedProject:
"""Defines a glue interface to interact with a generated project through the API server."""
@classmethod
def from_directory(cls, project_dir: Union[pathlib.Path, str], options: dict):
return cls(client.instantiate_from_dir(project_dir), options)
def __init__(self, api_client, options):
self._api_client = api_client
self._options = options
self._info = self._api_client.server_info_query(__version__)
if self._info["is_template"]:
raise TemplateProjectError()
def build(self):
self._api_client.build(self._options)
def flash(self):
self._api_client.flash(self._options)
def transport(self):
return ProjectTransport(self._api_client, self._options)
def info(self):
return self._info
@property
def options(self):
return self._options
@options.setter
def options(self, options):
self._options = options
class NotATemplateProjectError(Exception):
"""Raised when the API server given to TemplateProject reports is_template=false."""
class TemplateProject:
"""Defines a glue interface to interact with a template project through the API Server."""
@classmethod
def from_directory(cls, template_project_dir):
return cls(client.instantiate_from_dir(template_project_dir))
def __init__(self, api_client):
self._api_client = api_client
self._info = self._api_client.server_info_query(__version__)
if not self._info["is_template"]:
raise NotATemplateProjectError()
def _check_project_options(self, options: dict):
"""Check if options are valid ProjectOptions"""
available_options = [option["name"] for option in self.info()["project_options"]]
if options and not set(options.keys()).issubset(available_options):
raise ValueError(
f"""options:{list(options)} include non valid ProjectOptions.
Here is a list of available options:{list(available_options)}."""
)
def generate_project_from_mlf(self, model_library_format_path, project_dir, options: dict):
"""Generate a project from MLF file."""
self._check_project_options(options)
self._api_client.generate_project(
model_library_format_path=str(model_library_format_path),
standalone_crt_dir=get_standalone_crt_dir(),
project_dir=project_dir,
options=options,
)
return GeneratedProject.from_directory(project_dir, options)
def info(self):
return self._info
def generate_project(self, graph_executor_factory, project_dir, options):
"""Generate a project given GraphRuntimeFactory."""
model_library_dir = utils.tempdir()
model_library_format_path = model_library_dir.relpath("model.tar")
export_model_library_format(graph_executor_factory, model_library_format_path)
return self.generate_project_from_mlf(model_library_format_path, project_dir, options)
def generate_project(
template_project_dir: Union[pathlib.Path, str],
module: ExportableModule,
generated_project_dir: Union[pathlib.Path, str],
options: dict = None,
):
"""Generate a project for an embedded platform that contains the given model.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
generated_project_path : pathlib.Path or str
Path to a directory to be created and filled with the built project.
module : ExportableModule
A runtime.Module exportable as Model Library Format. The value returned from tvm.relay.build
or tvm.build.
options : dict
If given, Project API options given to the microTVM API server found in both
template_project_path and generated_project_path.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project(module, str(generated_project_dir), options)
def generate_project_from_mlf(
template_project_dir: Union[pathlib.Path, str],
project_dir: Union[pathlib.Path, str],
mlf_path: Union[pathlib.Path, str],
options: dict,
):
"""Generate a project from a platform template and an existing Model Library Format archive.
Parameters
----------
template_project_path : pathlib.Path or str
Path to a template project containing a microTVM Project API server.
project_dir : pathlib.Path or str
Path to a directory where the project will be created.
mlf_path : pathlib.Path or str
Path to the Model Library Format archive that will be used when creating
the new project. The archive file will be copied to project_dir.
options : dict
Project API options given to the microTVM API server for the specified platform.
Returns
-------
GeneratedProject :
A class that wraps the generated project and which can be used to further interact with it.
"""
template = TemplateProject.from_directory(str(template_project_dir))
return template.generate_project_from_mlf(str(mlf_path), str(project_dir), options)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/project_api/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MicroTVM Project API Client and Server"""
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/project_api/client.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Project API client.
"""
import base64
import io
import json
import logging
import platform
import os
import pathlib
import subprocess
import sys
import typing
from . import server
_LOG = logging.getLogger(__name__)
class ProjectAPIErrorBase(Exception):
"""Base class for all Project API errors."""
class ConnectionShutdownError(ProjectAPIErrorBase):
"""Raised when a request is made but the connection has been closed."""
class MalformedReplyError(ProjectAPIErrorBase):
"""Raised when the server responds with an invalid reply."""
class MismatchedIdError(ProjectAPIErrorBase):
"""Raised when the reply ID does not match the request."""
class ProjectAPIServerNotFoundError(ProjectAPIErrorBase):
"""Raised when the Project API server can't be found in the repo."""
class UnsupportedProtocolVersionError(ProjectAPIErrorBase):
"""Raised when the protocol version returned by the API server is unsupported."""
class RPCError(ProjectAPIErrorBase):
def __init__(self, request, error):
ProjectAPIErrorBase.__init__()
self.request = request
self.error = error
def __str__(self):
return f"Calling project API method {self.request['method']}:" "\n" f"{self.error}"
class ProjectAPIClient:
"""A client for the Project API."""
def __init__(
self,
read_file: typing.BinaryIO,
write_file: typing.BinaryIO,
testonly_did_write_request: typing.Optional[typing.Callable] = None,
):
self.read_file = io.TextIOWrapper(read_file, encoding="UTF-8", errors="strict")
self.write_file = io.TextIOWrapper(
write_file, encoding="UTF-8", errors="strict", write_through=True
)
self.testonly_did_write_request = testonly_did_write_request
self.next_request_id = 1
@property
def is_shutdown(self):
return self.read_file.closed
def shutdown(self):
if self.is_shutdown: # pylint: disable=using-constant-test
return
self.read_file.close()
self.write_file.close()
def _request_reply(self, method, params):
if self.is_shutdown: # pylint: disable=using-constant-test
raise ConnectionShutdownError("connection already closed")
request = {
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.next_request_id,
}
self.next_request_id += 1
request_str = json.dumps(request)
self.write_file.write(request_str)
_LOG.debug("send -> %s", request_str)
self.write_file.write("\n")
if self.testonly_did_write_request:
self.testonly_did_write_request() # Allow test to assert on server processing.
reply_line = self.read_file.readline()
_LOG.debug("recv <- %s", reply_line)
if not reply_line:
self.shutdown()
raise ConnectionShutdownError("got EOF reading reply from API server")
reply = json.loads(reply_line)
if reply.get("jsonrpc") != "2.0":
raise MalformedReplyError(
f"Server reply should include 'jsonrpc': '2.0'; "
f"saw jsonrpc={reply.get('jsonrpc')!r}"
)
if reply["id"] != request["id"]:
raise MismatchedIdError(
f"Reply id ({reply['id']}) does not equal request id ({request['id']}"
)
if "error" in reply:
raise server.JSONRPCError.from_json(f"calling method {method}", reply["error"])
if "result" not in reply:
raise MalformedReplyError(f"Expected 'result' key in server reply, got {reply!r}")
return reply["result"]
def server_info_query(self, tvm_version: str):
reply = self._request_reply("server_info_query", {"tvm_version": tvm_version})
if reply["protocol_version"] != server.ProjectAPIServer._PROTOCOL_VERSION:
raise UnsupportedProtocolVersionError(
f'microTVM API Server supports protocol version {reply["protocol_version"]}; '
f"want {server.ProjectAPIServer._PROTOCOL_VERSION}"
)
return reply
def generate_project(
self,
model_library_format_path: str,
standalone_crt_dir: str,
project_dir: str,
options: dict = None,
):
return self._request_reply(
"generate_project",
{
"model_library_format_path": model_library_format_path,
"standalone_crt_dir": standalone_crt_dir,
"project_dir": project_dir,
"options": (options if options is not None else {}),
},
)
def build(self, options: dict = None):
return self._request_reply("build", {"options": (options if options is not None else {})})
def flash(self, options: dict = None):
return self._request_reply("flash", {"options": (options if options is not None else {})})
def open_transport(self, options: dict = None):
return self._request_reply(
"open_transport", {"options": (options if options is not None else {})}
)
def close_transport(self):
return self._request_reply("close_transport", {})
def read_transport(self, n, timeout_sec):
reply = self._request_reply("read_transport", {"n": n, "timeout_sec": timeout_sec})
reply["data"] = base64.b85decode(reply["data"])
return reply
def write_transport(self, data, timeout_sec):
return self._request_reply(
"write_transport",
{"data": str(base64.b85encode(data), "utf-8"), "timeout_sec": timeout_sec},
)
# NOTE: windows support untested
SERVER_LAUNCH_SCRIPT_FILENAME = (
f"launch_microtvm_api_server.{'sh' if platform.system() != 'Windows' else '.bat'}"
)
SERVER_PYTHON_FILENAME = "microtvm_api_server.py"
def instantiate_from_dir(project_dir: typing.Union[pathlib.Path, str], debug: bool = False):
"""Launch server located in project_dir, and instantiate a Project API Client
connected to it."""
args = None
project_dir = pathlib.Path(project_dir)
python_script = project_dir / SERVER_PYTHON_FILENAME
if python_script.is_file():
args = [sys.executable, str(python_script)]
launch_script = project_dir / SERVER_LAUNCH_SCRIPT_FILENAME
if launch_script.is_file():
args = [str(launch_script)]
if args is None:
raise ProjectAPIServerNotFoundError(
f"No Project API server found in project directory: {project_dir}"
"\n"
f"Tried: {SERVER_LAUNCH_SCRIPT_FILENAME}, {SERVER_PYTHON_FILENAME}"
)
api_server_read_fd, tvm_write_fd = os.pipe()
tvm_read_fd, api_server_write_fd = os.pipe()
args.extend(["--read-fd", str(api_server_read_fd), "--write-fd", str(api_server_write_fd)])
if debug:
args.append("--debug")
api_server_proc = subprocess.Popen( # pylint: disable=unused-variable
args, bufsize=0, pass_fds=(api_server_read_fd, api_server_write_fd), cwd=project_dir
)
os.close(api_server_read_fd)
os.close(api_server_write_fd)
return ProjectAPIClient(
os.fdopen(tvm_read_fd, "rb", buffering=0), os.fdopen(tvm_write_fd, "wb", buffering=0)
)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/project_api/server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a basic Project API server template.
This file is meant to be imported or copied into Project API servers, so it should not have any
imports or dependencies outside of things strictly required to run the API server.
"""
import abc
import argparse
import base64
import collections
import enum
import io
import json
import logging
import os
import pathlib
import re
import select
import sys
import time
import traceback
import typing
_LOG = logging.getLogger(__name__)
_ProjectOption = collections.namedtuple(
"ProjectOption", ("name", "choices", "default", "type", "required", "optional", "help")
)
class ProjectOption(_ProjectOption):
"""Class used to keep the metadata associated to project options."""
def __new__(cls, name, **kw):
"""Override __new__ to force all options except name to be specified as kwargs."""
assert "name" not in kw
assert (
"required" in kw or "optional" in kw
), "at least one of 'required' or 'optional' must be specified."
assert "type" in kw, "'type' field must be specified."
kw["name"] = name
for param in ["choices", "default", "required", "optional"]:
kw.setdefault(param, None)
return super().__new__(cls, **kw)
def replace(self, attributes):
"""Update attributes associated to the project option."""
updated_option = self
return updated_option._replace(**attributes)
ServerInfo = collections.namedtuple(
"ServerInfo", ("platform_name", "is_template", "model_library_format_path", "project_options")
)
# Timeouts supported by the underlying C++ MicroSession.
#
# session_start_retry_timeout_sec : float
# Number of seconds to wait for the device to send a kSessionStartReply after sending the
# initial session start message. After this time elapses another
# kSessionTerminated-kSessionStartInit train is sent. 0 disables this.
# session_start_timeout_sec : float
# Total number of seconds to wait for the session to be established. After this time, the
# client gives up trying to establish a session and raises an exception.
# session_established_timeout_sec : float
# Number of seconds to wait for a reply message after a session has been established. 0
# disables this.
TransportTimeouts = collections.namedtuple(
"TransportTimeouts",
[
"session_start_retry_timeout_sec",
"session_start_timeout_sec",
"session_established_timeout_sec",
],
)
class ErrorCode(enum.IntEnum):
"""Enumerates error codes which can be returned. Includes JSON-RPC standard and custom codes."""
# Custom (in reserved error code space).
SERVER_ERROR = -32000 # A generic error was raised while processing the request.
# JSON-RPC standard
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
class JSONRPCError(Exception):
"""An error class with properties that meet the JSON-RPC error spec."""
def __init__(self, code, message, data, client_context=None):
Exception.__init__(self)
self.code = code
self.message = message
self.data = data
self.client_context = client_context
def to_json(self):
return {
"code": self.code,
"message": self.message,
"data": self.data,
}
def __str__(self):
data_str = ""
if self.data:
if isinstance(self.data, dict) and self.data.get("traceback"):
data_str = f'\n{self.data["traceback"]}'
else:
data_str = f"\n{self.data!r}"
return f"JSON-RPC error # {self.code}: {self.message}" + data_str
@classmethod
def from_json(cls, client_context, json_error):
"""Convert an encapsulated ServerError into JSON-RPC compliant format."""
found_server_error = False
try:
if ErrorCode(json_error["code"]) == ErrorCode.SERVER_ERROR:
found_server_error = True
except ValueError:
ServerError.from_json(client_context, json_error)
if found_server_error:
return ServerError.from_json(client_context, json_error)
return cls(
json_error["code"],
json_error["message"],
json_error.get("data", None),
client_context=client_context,
)
class ServerError(JSONRPCError):
"""Superclass for JSON-RPC errors which occur while processing valid requests."""
@classmethod
def from_exception(cls, exc, **kw):
to_return = cls(**kw)
to_return.set_traceback(traceback.TracebackException.from_exception(exc).format())
return to_return
def __init__(self, message=None, data=None, client_context=None):
if self.__class__ == ServerError:
assert message is not None, "Plain ServerError must have message="
else:
assert (
message is None
), f"ServerError subclasses must not supply message=; got {message!r}"
message = self.__class__.__name__
super(ServerError, self).__init__(ErrorCode.SERVER_ERROR, message, data)
self.client_context = client_context
def __str__(self):
context_str = f"{self.client_context}: " if self.client_context is not None else ""
super_str = super(ServerError, self).__str__()
return context_str + super_str
def set_traceback(self, traceback): # pylint: disable=redefined-outer-name
"""Format a traceback to be embedded in the JSON-RPC format."""
if self.data is None:
self.data = {}
if "traceback" not in self.data:
# NOTE: TVM's FFI layer reorders Python stack traces several times and strips
# intermediary lines that start with "Traceback". This logic adds a comment to the first
# stack frame to explicitly identify the first stack frame line that occurs on the
# server.
traceback_list = list(traceback)
# The traceback list contains one entry per stack frame, and each entry contains 1-2
# lines:
# File "path/to/file", line 123, in <method>:
# <copy of the line>
# We want to place a comment on the first line of the outermost frame to indicate this
# is the server-side stack frame.
first_frame_list = traceback_list[1].split("\n")
self.data["traceback"] = (
traceback_list[0]
+ f"{first_frame_list[0]} # <--- Outermost server-side stack frame\n"
+ "\n".join(first_frame_list[1:])
+ "".join(traceback_list[2:])
)
@classmethod
def from_json(cls, client_context, json_error):
assert json_error["code"] == ErrorCode.SERVER_ERROR
for sub_cls in cls.__subclasses__():
if sub_cls.__name__ == json_error["message"]:
return sub_cls(
data=json_error.get("data"),
client_context=client_context,
)
return cls(
json_error["message"], data=json_error.get("data"), client_context=client_context
)
class TransportClosedError(ServerError):
"""Raised when a transport can no longer be used due to underlying I/O problems."""
class IoTimeoutError(ServerError):
"""Raised when the I/O operation could not be completed before the timeout.
Specifically:
- when no data could be read before the timeout
- when some of the write data could be written before the timeout
Note the asymmetric behavior of read() vs write(), since in one case the total length of the
data to transfer is known.
"""
class UnsupportedTVMVersionError(ServerError):
"""Raised when the version of TVM supplied to server_info_query is unsupported."""
class ProjectAPIHandler(metaclass=abc.ABCMeta):
"""The interface class for all Project API implementations.
Extend this class in your microtvm_api_server.py and implement each function defined here.
"""
@abc.abstractmethod
def server_info_query(self, tvm_version: str) -> ServerInfo:
"""Initial request issued by TVM to retrieve metadata about this API server and project.
Should this API server not
Parameters
----------
tvm_version : str
The value of tvm.__version__.
Returns
-------
ServerInfo :
A ServerInfo namedtuple containing the metadata needed by TVM.
Raises
------
UnsupportedTVMVersionError :
When tvm_version indicates a known-unsupported version of TVM.
"""
raise NotImplementedError()
@abc.abstractmethod
def generate_project(
self,
model_library_format_path: pathlib.Path,
standalone_crt_dir: pathlib.Path,
project_dir: pathlib.Path,
options: dict,
):
"""Generate a project from the given artifacts, copying ourselves to that project.
Parameters
----------
model_library_format_path : pathlib.Path
Path to the Model Library Format tar archive.
standalone_crt_dir : pathlib.Path
Path to the root directory of the "standalone_crt" TVM build artifact. This contains the
TVM C runtime.
project_dir : pathlib.Path
Path to a nonexistent directory which should be created and filled with the generated
project.
options : dict
Dict mapping option name to ProjectOption.
"""
raise NotImplementedError()
@abc.abstractmethod
def build(self, options: dict):
"""Build the project, enabling the flash() call to made.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the build, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def flash(self, options: dict):
"""Program the project onto the device.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the programming process, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def open_transport(self, options: dict) -> TransportTimeouts:
"""Open resources needed for the transport layer.
This function might e.g. open files or serial ports needed in write_transport or
read_transport.
Calling this function enables the write_transport and read_transport calls. If the
transport is not open, this method is a no-op.
Parameters
----------
options : Dict[str, ProjectOption]
ProjectOption which may influence the programming process, keyed by option name.
"""
raise NotImplementedError()
@abc.abstractmethod
def close_transport(self):
"""Close resources needed to operate the transport layer.
This function might e.g. close files or serial ports needed in write_transport or
read_transport.
Calling this function disables the write_transport and read_transport calls. If the
transport is not open, this method is a no-op.
"""
raise NotImplementedError()
@abc.abstractmethod
# pylint: disable=unidiomatic-typecheck
def read_transport(self, n: int, timeout_sec: typing.Union[float, type(None)]) -> bytes:
"""Read data from the transport.
Parameters
----------
n : int
The exact number of bytes to read from the transport.
timeout_sec : Union[float, None]
Number of seconds to wait for at least one byte to be written before timing out. If
timeout_sec is 0, write should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, write should block until all `n` bytes of data can be returned.
Returns
-------
bytes :
Data read from the channel. Should be exactly `n` bytes long.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
@abc.abstractmethod
def write_transport(self, data: bytes, timeout_sec: float):
"""Write data to the transport.
This function should either write all bytes in `data` or raise an exception.
Parameters
----------
data : bytes
The data to write over the channel.
timeout_sec : Union[float, None]
Number of seconds to wait for all bytes to be written before timing out. If timeout_sec
is 0, write should attempt to service the request in a non-blocking fashion. If
timeout_sec is None, write should block until it has written all data.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
class ProjectAPIServer:
"""Base class for Project API Servers.
This API server implements communication using JSON-RPC 2.0:
https://www.jsonrpc.org/specification
Suggested use of this class is to import this module or copy this file into Project Generator
implementations, then instantiate it with server.start().
This RPC server is single-threaded, blocking, and one-request-at-a-time. Don't get anxious.
"""
_PROTOCOL_VERSION = 1
def __init__(
self, read_file: typing.BinaryIO, write_file: typing.BinaryIO, handler: ProjectAPIHandler
):
"""Initialize a new ProjectAPIServer.
Parameters
----------
read_file : BinaryIO
A file-like object used to read binary data from the client.
write_file : BinaryIO
A file-like object used to write binary data to the client.
handler : ProjectAPIHandler
A class which extends the abstract class ProjectAPIHandler and implements the server RPC
functions.
"""
self._read_file = io.TextIOWrapper(read_file, encoding="UTF-8", errors="strict")
self._write_file = io.TextIOWrapper(
write_file, encoding="UTF-8", errors="strict", write_through=True
)
self._handler = handler
def serve_forever(self):
"""Serve requests until no more are available."""
has_more = True
while has_more:
has_more = self.serve_one_request()
def serve_one_request(self):
"""Read, process, and reply to a single request from read_file.
When errors occur reading the request line or loading the request into JSON, they are
propagated to the caller (the stream is then likely corrupted and no further requests
should be served. When errors occur past this point, they are caught and send back to the
client.
Return
----------
bool :
True when more data could be read from read_file, False otherwise.
"""
try:
line = self._read_file.readline()
_LOG.debug("read request <- %s", line)
if not line:
return False
request = json.loads(line)
except EOFError:
_LOG.error("EOF")
return False
except Exception as exc: # pylint: disable=broad-except
_LOG.error("Caught error reading request", exc_info=1)
return False
did_validate = False
try:
self._validate_request(request)
did_validate = True
self._dispatch_request(request)
except JSONRPCError as exc:
if isinstance(exc, ServerError):
exc.set_traceback(traceback.TracebackException.from_exception(exc).format())
request_id = None if not did_validate else request.get("id")
self._reply_error(request_id, exc)
return did_validate
except Exception as exc: # pylint: disable=broad-except
message = "validating request"
if did_validate:
message = f"calling method {request['method']}"
exc = ServerError.from_exception(exc, message=message)
request_id = None if not isinstance(request, dict) else request.get("id")
self._reply_error(request_id, exc)
return did_validate
return True
VALID_METHOD_RE = re.compile("^[a-zA-Z0-9_]+$")
def _validate_request(self, request):
if not isinstance(request, dict):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f"request: want dict; got {request!r}", None
)
jsonrpc = request.get("jsonrpc")
if jsonrpc != "2.0":
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["jsonrpc"]: want "2.0"; got {jsonrpc!r}', None
)
method = request.get("method")
if not isinstance(method, str):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["method"]: want str; got {method!r}', None
)
if not self.VALID_METHOD_RE.match(method):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST,
f'request["method"]: should match regex {self.VALID_METHOD_RE.pattern}; '
f"got {method!r}",
None,
)
params = request.get("params")
if not isinstance(params, dict):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST, f'request["params"]: want dict; got {type(params)}', None
)
request_id = request.get("id")
# pylint: disable=unidiomatic-typecheck
if not isinstance(request_id, (str, int, type(None))):
raise JSONRPCError(
ErrorCode.INVALID_REQUEST,
f'request["id"]: want str, number, null; got {request_id!r}',
None,
)
def _dispatch_request(self, request):
method = request["method"]
interface_method = getattr(ProjectAPIHandler, method, None)
if interface_method is None:
raise JSONRPCError(
ErrorCode.METHOD_NOT_FOUND, f'{request["method"]}: no such method', None
)
has_preprocessing = True
dispatch_method = getattr(self, f"_dispatch_{method}", None)
if dispatch_method is None:
dispatch_method = getattr(self._handler, method)
has_preprocessing = False
request_params = request["params"]
params = {}
for var_name, var_type in typing.get_type_hints(interface_method).items():
if var_name in ("self", "return"):
continue
# NOTE: types can only be JSON-compatible types, so var_type is expected to be of type
# 'type'.
if var_name not in request_params:
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'method {request["method"]}: parameter {var_name} not given',
None,
)
param = request_params[var_name]
if not has_preprocessing and not isinstance(param, var_type):
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'method {request["method"]}: parameter {var_name}: want {var_type!r}, '
f"got {type(param)!r}",
None,
)
params[var_name] = param
extra_params = [p for p in request["params"] if p not in params]
if extra_params:
raise JSONRPCError(
ErrorCode.INVALID_PARAMS,
f'{request["method"]}: extra parameters: {", ".join(extra_params)}',
None,
)
return_value = dispatch_method(**params)
self._write_reply(request["id"], result=return_value)
def _write_reply(self, request_id, result=None, error=None):
reply_dict = {
"jsonrpc": "2.0",
"id": request_id,
}
if error is not None:
assert (
result is None
), f"Want either result= or error=, got result={result!r} and error={error!r})"
reply_dict["error"] = error
else:
reply_dict["result"] = result
reply_str = json.dumps(reply_dict)
_LOG.debug("write reply -> %r", reply_dict)
self._write_file.write(reply_str)
self._write_file.write("\n")
def _reply_error(self, request_id, exception):
self._write_reply(request_id, error=exception.to_json())
def _dispatch_generate_project(
self, model_library_format_path, standalone_crt_dir, project_dir, options
):
return self._handler.generate_project(
pathlib.Path(model_library_format_path),
pathlib.Path(standalone_crt_dir),
pathlib.Path(project_dir),
options,
)
def _dispatch_server_info_query(self, tvm_version):
query_reply = self._handler.server_info_query(tvm_version)
to_return = query_reply._asdict()
if to_return["model_library_format_path"] is not None:
to_return["model_library_format_path"] = str(to_return["model_library_format_path"])
to_return.setdefault("protocol_version", self._PROTOCOL_VERSION)
to_return["project_options"] = [o._asdict() for o in query_reply.project_options]
return to_return
def _dispatch_open_transport(self, options):
reply = self._handler.open_transport(options)
return {"timeouts": reply._asdict()}
def _dispatch_read_transport(self, n, timeout_sec):
reply_data = self._handler.read_transport(n, timeout_sec)
return {"data": str(base64.b85encode(reply_data), "utf-8")}
def _dispatch_write_transport(self, data, timeout_sec):
self._handler.write_transport(base64.b85decode(data), timeout_sec)
def _await_nonblocking_ready(rlist, wlist, timeout_sec=None, end_time=None):
if end_time is None:
return True
if timeout_sec is None:
timeout_sec = max(0, end_time - time.monotonic())
rlist, wlist, xlist = select.select(rlist, wlist, rlist + wlist, timeout_sec)
if not rlist and not wlist and not xlist:
raise IoTimeoutError()
return True
def read_with_timeout(fd, n, timeout_sec): # pylint: disable=invalid-name
"""Read data from a file descriptor, with timeout.
This function is intended as a helper function for implementations of ProjectAPIHandler
read_transport. Tested on Linux and OS X. Not tested on Windows.
Parameters
----------
fd : int
File descriptor to read from. Must be opened in non-blocking mode (e.g. with O_NONBLOCK)
if timeout_sec is not None.
n : int
Maximum number of bytes to read.
timeout_sec : float or None
If not None, maximum number of seconds to wait before raising IoTimeoutError.
Returns
-------
bytes :
If at least one byte was received before timeout_sec, returns a bytes object with length
in [1, n]. If timeout_sec is None, returns the equivalent of os.read(fd, n).
Raises
------
IoTimeoutException :
When timeout_sec is not None and that number of seconds elapses before any data is read.
"""
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
while True:
_await_nonblocking_ready([fd], [], end_time=end_time)
try:
to_return = os.read(fd, n)
break
except BlockingIOError:
pass
# When EOF is reached, close the file.
if not to_return:
os.close(fd)
raise TransportClosedError()
return to_return
def write_with_timeout(fd, data, timeout_sec): # pylint: disable=invalid-name
"""Write data to a file descriptor, with timeout.
This function is intended as a helper function for implementations of ProjectAPIHandler
write_transport. Tested on Linux and OS X. Not tested on Windows.
Parameters
----------
fd : int
File descriptor to read from. Must be opened in non-blocking mode (e.g. with O_NONBLOCK)
if timeout_sec is not None.
data : bytes
Data to write.
timeout_sec : float or None
If not None, maximum number of seconds to wait before raising IoTimeoutError.
Returns
-------
int :
The number of bytes written to the file descriptor, if any bytes were written. A value
in [1, len(data)]. If timeout_sec is None, returns the equivalent of os.write(fd, data).
Raises
------
IoTimeoutException :
When timeout_sec is not None and that number of seconds elapses before any data is read.
"""
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
num_written = 0
while data:
try:
_await_nonblocking_ready([], [fd], end_time=end_time)
except IoTimeoutError as exc:
if num_written:
return num_written
raise exc
num_written_this_cycle = os.write(fd, data)
if not num_written_this_cycle:
os.close(fd)
raise base.TransportClosedError()
data = data[num_written_this_cycle:]
num_written += num_written_this_cycle
return num_written
def default_project_options(**kw) -> typing.List[ProjectOption]:
"""Get default Project Options
Attributes of any default option can be updated. Here is an example
when attribute `optional` from `verbose` option needs to be updates:
default_project_options(verbose={"optional": ["build"]})
This will update the `optional` attribute of `verbose` ProjectOption
to be `["build"]`.
Returns
-------
options: List[ProjectOption]
A list of default ProjectOption with modifications.
"""
options = [
ProjectOption(
"verbose",
optional=["generate_project"],
type="bool",
default=False,
help="Run build with verbose output.",
),
ProjectOption(
"project_type",
required=["generate_project"],
type="str",
help="Type of project to generate.",
),
ProjectOption(
"board",
required=["generate_project"],
type="str",
help="Name of the board to build for.",
),
ProjectOption(
"cmsis_path",
optional=["generate_project"],
type="str",
default=None,
help="Path to the CMSIS directory.",
),
ProjectOption(
"warning_as_error",
optional=["generate_project"],
type="bool",
default=False,
help="Treat warnings as errors and raise an Exception.",
),
ProjectOption(
"compile_definitions",
optional=["generate_project"],
type="str",
default=None,
help="Extra definitions added project compile.",
),
ProjectOption(
"extra_files_tar",
optional=["generate_project"],
type="str",
default=None,
help="If given, during generate_project, "
"uncompress the tarball at this path into the project dir.",
),
]
for name, config in kw.items():
option_found = False
for ind, option in enumerate(options):
if option.name == name:
options[ind] = option.replace(config)
option_found = True
break
if not option_found:
raise ValueError("Option {} was not found in default ProjectOptions.".format(name))
return options
def main(handler: ProjectAPIHandler, argv: typing.List[str] = None):
"""Start a Project API server.
Parameters
----------
argv : list[str]
Command-line parameters to this program. If not given, sys.argv is used.
handler : ProjectAPIHandler
Handler class that implements the API server RPC calls.
"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Generic TVM Project API server entry point")
parser.add_argument(
"--read-fd",
type=int,
required=True,
help="Numeric file descriptor where RPC requests should be read.",
)
parser.add_argument(
"--write-fd",
type=int,
required=True,
help="Numeric file descriptor where RPC replies should be written.",
)
parser.add_argument(
"--debug", action="store_true", help="When given, configure logging at DEBUG level."
)
args = parser.parse_args()
logging.basicConfig(level="DEBUG" if args.debug else "INFO", stream=sys.stderr)
read_file = os.fdopen(args.read_fd, "rb", buffering=0)
write_file = os.fdopen(args.write_fd, "wb", buffering=0)
server = ProjectAPIServer(read_file, write_file, handler)
server.serve_forever()
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/session.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines a top-level glue class that operates the Transport and Flasher classes."""
import json
import logging
import sys
import os
import pathlib
import shutil
from typing import Union
from ..error import register_error
from .._ffi import get_global_func, register_func
from ..contrib import graph_executor
from ..contrib import utils
from ..contrib.debugger import debug_executor
from ..rpc import RPCSession
from . import project
from .transport import IoTimeoutError
from .transport import TransportLogger
try:
from .base import _rpc_connect
except ImportError:
raise ImportError("micro tvm is not enabled. Set USE_MICRO to ON in config.cmake")
@register_error
class SessionTerminatedError(Exception):
"""Raised when a transport read operation discovers that the remote session is terminated."""
class Session:
"""MicroTVM Device Session
Parameters
----------
config : dict
configuration for this session (as generated by
`tvm.micro.device.host.default_config()`, for example)
Example
--------
.. code-block:: python
c_mod = ... # some module generated with "c" as the target
dev_config = micro.device.arm.stm32f746xx.default_config('127.0.0.1', 6666)
with tvm.micro.Session(dev_config) as sess:
micro_mod = sess.create_micro_mod(c_mod)
"""
def __init__(
self,
transport_context_manager=None,
session_name="micro-rpc",
timeout_override=None,
):
"""Configure a new session.
Parameters
----------
transport_context_manager : ContextManager[transport.Transport]
If given, `flasher` and `binary` should not be given. On entry, this context manager
should establish a transport between this TVM instance and the device.
session_name : str
Name of the session, used for debugging.
timeout_override : TransportTimeouts
If given, TransportTimeouts that govern the way Receive() behaves. If not given, this is
determined by calling has_flow_control() on the transport.
"""
self.transport_context_manager = transport_context_manager
self.session_name = session_name
self.timeout_override = timeout_override
self._rpc = None
self._graph_executor = None
self._enable_rpc_logger = False
self._exit_called = False
def get_system_lib(self):
return self._rpc.get_function("runtime.SystemLib")()
def create_aot_executor(self):
return self._rpc.get_function("tvm.aot_executor.create")(
self.get_system_lib(), self.device, "default"
)
def _wrap_transport_read(self, n, timeout_microsec):
try:
return self.transport.read(
n, float(timeout_microsec) / 1e6 if timeout_microsec is not None else None
)
except IoTimeoutError:
return bytes([])
def _wrap_transport_write(self, data, timeout_microsec):
self.transport.write(
data, float(timeout_microsec) / 1e6 if timeout_microsec is not None else None
)
return len(data) # TODO(areusch): delete
def __enter__(self):
"""Initialize this session and establish an RPC session with the on-device RPC server.
Returns
-------
Session :
Returns self.
"""
self.transport = TransportLogger(
self.session_name, self.transport_context_manager, level=logging.DEBUG
).__enter__()
try:
timeouts = self.timeout_override
if timeouts is None:
timeouts = self.transport.timeouts()
self._rpc = RPCSession(
_rpc_connect(
self.session_name,
self._wrap_transport_write,
self._wrap_transport_read,
int(timeouts.session_start_retry_timeout_sec * 1e6),
int(timeouts.session_start_timeout_sec * 1e6),
int(timeouts.session_established_timeout_sec * 1e6),
self._cleanup,
self._enable_rpc_logger,
)
)
self.device = self._rpc.cpu(0)
return self
except:
self.transport.__exit__(*sys.exc_info())
raise
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Tear down this session and associated RPC session resources."""
if not self._exit_called:
self._exit_called = True
self.transport.__exit__(exc_type, exc_value, exc_traceback)
shutdown_func = self._rpc._sess.get_function("CloseRPCConnection")
shutdown_func()
def _cleanup(self):
self.__exit__(None, None, None)
def lookup_remote_linked_param(mod, storage_id, template_tensor, device):
"""Lookup a parameter that has been pre-linked into a remote (i.e. over RPC) Module.
This function signature matches the signature built by
Parameters
----------
mod : tvm.runtime.Module
The remote Module containing the pre-linked parameters.
storage_id : int
An integer identifying the pre-linked paramter to find
template_tensor : DLTensor
A DLTensor containing metadata that should be filled-in to the returned NDArray. This
function should mostly not inspect this, and just pass it along to
NDArrayFromRemoteOpaqueHandle.
device : Device
The remote CPU device to be used with the returned NDArray.
Returns
-------
tvm.nd.NDArray :
NDArray containing the pre-linked parameter.
"""
try:
lookup_linked_param = mod.get_function("_lookup_linked_param")
except AttributeError:
return None
remote_data = lookup_linked_param(storage_id)
if remote_data is None:
return None
return get_global_func("tvm.rpc.NDArrayFromRemoteOpaqueHandle")(
mod, remote_data, template_tensor, device, None
)
def create_local_graph_executor(graph_json_str, mod, device):
"""Create a local graph executor driving execution on the remote CPU device given.
Parameters
----------
graph_json_str : str
A string containing the graph representation.
mod : tvm.runtime.Module
The remote module containing functions in graph_json_str.
device : tvm.runtime.Device
The remote CPU execution device.
Returns
-------
tvm.contrib.GraphExecutor :
A local graph executor instance that executes on the remote device.
"""
device_type_id = [device.device_type, device.device_id]
fcreate = get_global_func("tvm.graph_executor.create")
return graph_executor.GraphModule(
fcreate(graph_json_str, mod, lookup_remote_linked_param, *device_type_id)
)
def create_local_debug_executor(graph_json_str, mod, device, dump_root=None):
"""Create a local debug runtime driving execution on the remote CPU device given.
Parameters
----------
graph_json_str : str
A string containing the graph representation.
mod : tvm.runtime.Module
The remote module containing functions in graph_json_str.
device : tvm.runtime.Device
The remote CPU execution device.
dump_root : Optional[str]
If given, passed as dump_root= to GraphModuleDebug.
Returns
-------
tvm.contrib.GraphExecutor :
A local graph executor instance that executes on the remote device.
"""
device_type_id = [device.device_type, device.device_id]
fcreate = get_global_func("tvm.graph_executor_debug.create")
return debug_executor.GraphModuleDebug(
fcreate(graph_json_str, mod, lookup_remote_linked_param, *device_type_id),
[device],
graph_json_str,
dump_root=dump_root,
)
@register_func("tvm.micro.compile_and_create_micro_session")
def compile_and_create_micro_session(
mod_src_bytes: bytes,
template_project_dir: str,
project_options: dict = None,
project_dir: Union[os.PathLike, str] = None,
use_existing: bool = False,
):
"""Compile the given libraries and sources into a MicroBinary, then invoke create_micro_session.
Parameters
----------
mod_src_bytes : bytes
The content of a tarfile which contains the TVM-generated sources which together form the
SystemLib. This tar is expected to be created by export_library. The tar will be extracted
into a directory and the sources compiled into a MicroLibrary using the Compiler.
template_project_dir: str
The path to a template microTVM Project API project which is used to generate the embedded
project that is built and flashed onto the target device.
project_options: dict
Options for the microTVM API Server contained in template_project_dir.
project_dir: Union[os.PathLike, str]
if use_existing is False: The path to save the generated microTVM Project.
if use_existing is True: The path to a generated microTVM Project for debugging.
use_existing: bool
skips the project generation and opens transport to the project at the project_dir address.
"""
if use_existing:
project_dir = pathlib.Path(project_dir)
assert project_dir.is_dir(), f"{project_dir} does not exist."
build_dir = project_dir / "generated-project" / "build"
shutil.rmtree(build_dir)
generated_project = project.GeneratedProject.from_directory(
project_dir / "generated-project",
options=json.loads(project_options),
)
else:
if project_dir:
temp_dir = utils.tempdir(custom_path=project_dir, keep_for_debug=True)
else:
temp_dir = utils.tempdir()
model_library_format_path = temp_dir / "model.tar.gz"
with open(model_library_format_path, "wb") as mlf_f:
mlf_f.write(mod_src_bytes)
try:
template_project = project.TemplateProject.from_directory(template_project_dir)
generated_project = template_project.generate_project_from_mlf(
model_library_format_path,
str(temp_dir / "generated-project"),
options=json.loads(project_options),
)
except Exception as exception:
logging.error("Project Generate Error: %s", str(exception))
raise exception
generated_project.build()
generated_project.flash()
transport = generated_project.transport()
rpc_session = Session(transport_context_manager=transport)
# RPC exit is called by cleanup function.
rpc_session.__enter__()
return rpc_session._rpc._sess
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Allows the tools specified below to be imported directly from tvm.micro.testing"""
from .evaluation import tune_model, create_aot_session, predict_labels_aot
from .utils import get_supported_boards, get_target
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/testing/aot_test_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file provides utilities for running AOT tests, especially for Corstone.
"""
import logging
import itertools
import shutil
import pytest
import tvm
from tvm.testing.aot import AOTTestRunner
pytest.importorskip("tvm.micro")
_LOG = logging.getLogger(__name__)
AOT_DEFAULT_RUNNER = AOTTestRunner()
# AOT Test Runner using the Arm® Corstone™-300 Reference Systems
# see: https://developer.arm.com/ip-products/subsystem/corstone/corstone-300
AOT_CORSTONE300_RUNNER = AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
""",
includes=["uart.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": "cortex-m55",
}
},
)
AOT_USMP_CORSTONE300_RUNNER = AOTTestRunner(
makefile="corstone300",
prologue="""
uart_init();
""",
includes=["uart.h"],
pass_config={
"relay.ext.cmsisnn.options": {
"mcpu": "cortex-m55",
},
"tir.usmp.enable": True,
},
)
def parametrize_aot_options(test):
"""Parametrize over valid option combinations"""
requires_arm_eabi = pytest.mark.skipif(
shutil.which("arm-none-eabi-gcc") is None, reason="ARM embedded toolchain unavailable"
)
interface_api = ["packed", "c"]
use_unpacked_api = [True, False]
test_runner = [AOT_DEFAULT_RUNNER, AOT_CORSTONE300_RUNNER]
all_combinations = itertools.product(interface_api, use_unpacked_api, test_runner)
# Filter out packed operators with c interface
valid_combinations = filter(
lambda parameters: not (parameters[0] == "c" and not parameters[1]),
all_combinations,
)
# Only use reference system for C interface and unpacked API calls
valid_combinations = filter(
lambda parameters: not (
parameters[2] == AOT_CORSTONE300_RUNNER
and (parameters[0] == "packed" or not parameters[1])
),
valid_combinations,
)
# Skip reference system tests if running in i386 container
marked_combinations = map(
lambda parameters: pytest.param(*parameters, marks=[requires_arm_eabi])
if parameters[2] == AOT_CORSTONE300_RUNNER
else parameters,
valid_combinations,
)
func = pytest.mark.parametrize(
["interface_api", "use_unpacked_api", "test_runner"],
marked_combinations,
)(test)
return tvm.testing.skip_if_32bit(reason="Reference system unavailable in i386 container")(func)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/testing/evaluation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Provides high-level functions for instantiating and timing AOT models. Used
by autotuning tests in tests/micro, and may be used for more performance
tests in the future.
"""
import logging
from io import StringIO
from pathlib import Path
from contextlib import ExitStack
import tempfile
import shutil
import tvm
from tvm.relay.op.contrib import cmsisnn
def tune_model(
platform,
board,
target,
mod,
params,
num_trials,
tuner_cls=tvm.autotvm.tuner.GATuner,
project_options=None,
):
"""Autotunes a model with microTVM and returns a StringIO with the tuning logs"""
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
assert isinstance(params, dict)
project_options = {
"board": board,
"project_type": "host_driven",
**(project_options or {}),
}
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=tvm.micro.get_microtvm_template_projects(platform),
project_options=project_options,
)
builder = tvm.autotvm.LocalBuilder(
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=False,
build_func=tvm.micro.autotvm_build_func,
runtime=tvm.relay.backend.Runtime("crt", {"system-lib": True}),
)
runner = tvm.autotvm.LocalRunner(number=1, repeat=1, timeout=100, module_loader=module_loader)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
results = StringIO()
for task in tasks:
tuner = tuner_cls(task)
tuner.tune(
n_trial=num_trials,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(results),
tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"),
],
si_prefix="M",
)
# Note that we might not find a working schedule at all, in which case
# tuner.best_flops would equal zero. This is not good, but checking for
# this case will happen elsewhere.
return results
def create_aot_session(
platform,
board,
target,
mod,
params,
build_dir=Path(tempfile.mkdtemp()),
tune_logs=None,
timeout_override=None,
use_cmsis_nn=False,
project_options=None,
use_existing=False,
):
"""AOT-compiles and uploads a model to a microcontroller, and returns the RPC session"""
executor = tvm.relay.backend.Executor("aot")
crt_runtime = tvm.relay.backend.Runtime("crt", {"system-lib": True})
with ExitStack() as stack:
config = {"tir.disable_vectorize": True}
if use_cmsis_nn:
config["relay.ext.cmsisnn.options"] = {"mcpu": target.mcpu}
stack.enter_context(tvm.transform.PassContext(opt_level=3, config=config))
if use_cmsis_nn:
mod = cmsisnn.partition_for_cmsisnn(mod, params, mcpu=target.mcpu)
if tune_logs is not None:
stack.enter_context(tvm.autotvm.apply_history_best(tune_logs))
lowered = tvm.relay.build(
mod,
target=target,
params=params,
runtime=crt_runtime,
executor=executor,
)
parameter_size = len(tvm.runtime.save_param_dict(lowered.get_params()))
print(f"Model parameter size: {parameter_size}")
project_options = {
"board": board,
"project_type": "host_driven",
# {} shouldn't be the default value for project options ({}
# is mutable), so we use this workaround
**(project_options or {}),
}
if use_existing:
shutil.rmtree(build_dir / "project" / "build")
project = tvm.micro.GeneratedProject.from_directory(
build_dir / "project",
options=project_options,
)
else:
project = tvm.micro.generate_project(
str(tvm.micro.get_microtvm_template_projects(platform)),
lowered,
build_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport(), timeout_override=timeout_override)
def predict_labels_aot(session, aot_executor, input_data, runs_per_sample=1):
"""Predicts labels for each sample in input_data using host-driven AOT.
Returns an iterator of (label, runtime) tuples. This function can only
be used with models for which the output is the confidence for each class."""
assert aot_executor.get_num_inputs() == 1
assert aot_executor.get_num_outputs() == 1
assert runs_per_sample > 0
for counter, sample in enumerate(input_data):
logging.info("Evaluating sample %d", counter)
aot_executor.get_input(0).copyfrom(sample)
result = aot_executor.module.time_evaluator("run", session.device, number=runs_per_sample)()
predicted_label = aot_executor.get_output(0).numpy().argmax()
runtime = result.mean
yield predicted_label, runtime
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/testing/pytest_plugin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,redefined-outer-name
""" microTVM testing fixtures used to deduce testing argument
values from testing parameters """
import pathlib
import os
import datetime
import pytest
from tvm.contrib.utils import tempdir
from .utils import get_supported_platforms, get_supported_boards
def pytest_addoption(parser):
"""Adds more pytest arguments"""
parser.addoption(
"--platform",
choices=get_supported_platforms(),
help=("microTVM platform for tests."),
)
parser.addoption(
"--board",
choices=list(get_supported_boards("zephyr").keys())
+ list(get_supported_boards("arduino").keys()),
help=(
"microTVM boards for tests. Board refers to instances"
"of microcontrollers/emulators defined in a platform."
),
)
parser.addoption(
"--test-build-only",
action="store_true",
default=False,
help="Only run tests that don't require physical hardware.",
)
parser.addoption(
"--microtvm-debug",
action="store_true",
default=False,
help=(
"If set true, it will keep the project directory for debugging."
"Also, it will enable debug level logging in project generation."
),
)
def pytest_generate_tests(metafunc):
"""Hooks into pytest to add platform and board fixtures to tests that
require them. To make sure that "platform" and "board" are treated as
parameters for the appropriate tests (and included in the test names),
we add them as function level parametrizations. This prevents data
from being overwritten in Junit XML files if multiple platforms
or boards are tested."""
for argument in ["platform", "board"]:
if argument in metafunc.fixturenames:
value = metafunc.config.getoption(f"--{argument}", default=None)
if not value:
raise ValueError(
f"Test {metafunc.function.__name__} in module {metafunc.module.__name__} "
f"requires a --{argument} argument, but none was given."
)
metafunc.parametrize(argument, [metafunc.config.getoption(f"--{argument}")])
@pytest.fixture(scope="session")
def microtvm_debug(request):
return request.config.getoption("--microtvm-debug")
def pytest_collection_modifyitems(config, items):
if config.getoption("--test-build-only"):
skip_hardware_tests = pytest.mark.skip(reason="--test-build-only was passed")
for item in items:
if "requires_hardware" in item.keywords:
item.add_marker(skip_hardware_tests)
@pytest.fixture
def workspace_dir(request, board, microtvm_debug):
"""Creates workspace directory for each test."""
parent_dir = pathlib.Path(os.path.dirname(request.module.__file__))
board_workspace = (
parent_dir / f"workspace_{board}" / datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
board_workspace_base = str(board_workspace)
number = 1
while board_workspace.exists():
board_workspace = pathlib.Path(board_workspace_base + f"-{number}")
number += 1
if not os.path.exists(board_workspace.parent):
os.makedirs(board_workspace.parent)
keep_for_debug = microtvm_debug if microtvm_debug else None
test_temp_dir = tempdir(custom_path=board_workspace, keep_for_debug=keep_for_debug)
return test_temp_dir
@pytest.fixture(autouse=True)
def skip_by_board(request, board):
"""Skip test if board is in the list."""
if request.node.get_closest_marker("skip_boards"):
if board in request.node.get_closest_marker("skip_boards").args[0]:
pytest.skip("skipped on this board: {}".format(board))
def pytest_configure(config):
config.addinivalue_line(
"markers",
"skip_boards(board): skip test for the given board",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines the test methods used with microTVM."""
from functools import lru_cache
import json
import logging
from pathlib import Path
import tarfile
import time
from typing import Union
import tvm
from tvm import relay
from tvm.micro.project_api.server import IoTimeoutError
# Timeout in seconds for AOT transport.
TIMEOUT_SEC = 10
@lru_cache(maxsize=None)
def get_supported_platforms():
return ["arduino", "zephyr"]
@lru_cache(maxsize=None)
def get_supported_boards(platform: str):
template = Path(tvm.micro.get_microtvm_template_projects(platform))
with open(template / "boards.json") as f:
return json.load(f)
def get_target(platform: str, board: str) -> tvm.target.Target:
"""Intentionally simple function for making Targets for microcontrollers.
If you need more complex arguments, one should call target.micro directly. Note
that almost all, but not all, supported microcontrollers are Arm-based."""
model = get_supported_boards(platform)[board]["model"]
return tvm.target.target.micro(model, options=["-device=arm_cpu"])
def check_tune_log(log_path: Union[Path, str]):
"""Read the tuning log and check each result."""
with open(log_path, "r") as f:
lines = f.readlines()
for line in lines:
if len(line) > 0:
tune_result = json.loads(line)
assert tune_result["result"][0][0] < 1000000000.0
def aot_transport_init_wait(transport):
"""Send init message to microTVM device until it receives wakeup sequence."""
while True:
try:
aot_transport_find_message(transport, "wakeup", timeout_sec=TIMEOUT_SEC)
break
except IoTimeoutError:
transport.write(b"init%", timeout_sec=TIMEOUT_SEC)
def aot_transport_find_message(transport, expression: str, timeout_sec: int) -> str:
"""Read transport message until it finds the expression."""
timeout = timeout_sec
start_time = time.monotonic()
while True:
data = _read_line(transport, timeout)
logging.debug("new line: %s", data)
if expression in data:
return data
timeout = max(0, timeout_sec - (time.monotonic() - start_time))
def _read_line(transport, timeout_sec: int) -> str:
data = bytearray()
while True:
new_data = transport.read(1, timeout_sec=timeout_sec)
logging.debug("read data: %s", new_data)
for item in new_data:
data.append(item)
if str(chr(item)) == "\n":
return data.decode(encoding="utf-8")
def mlf_extract_workspace_size_bytes(mlf_tar_path: Union[Path, str]) -> int:
"""Extract an MLF archive file and read workspace size from metadata file."""
workspace_size = 0
with tarfile.open(mlf_tar_path, "r:*") as tar_file:
tar_members = [ti.name for ti in tar_file.getmembers()]
assert "./metadata.json" in tar_members
with tar_file.extractfile("./metadata.json") as f:
metadata = json.load(f)
for mod_name in metadata["modules"].keys():
workspace_size += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][
"workspace_size_bytes"
]
return workspace_size
def get_conv2d_relay_module():
"""Generate a conv2d Relay module for testing."""
data_shape = (1, 3, 64, 64)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "int8"))
weight = relay.var("weight", relay.TensorType(weight_shape, "int8"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
channels=8,
kernel_size=(5, 5),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/micro/transport.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines abstractions and implementations of the RPC transport used with micro TVM."""
import abc
import logging
import string
import typing
from .project_api.server import IoTimeoutError, TransportTimeouts
from .project_api.server import TransportClosedError
_ = TransportClosedError # work around pylint unused-import error
_LOG = logging.getLogger(__name__)
def debug_transport_timeouts(session_start_retry_timeout_sec=0):
return TransportTimeouts(
session_start_retry_timeout_sec=session_start_retry_timeout_sec,
session_start_timeout_sec=0,
session_established_timeout_sec=0,
)
class Transport(metaclass=abc.ABCMeta):
"""The abstract Transport class used for micro TVM."""
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
@abc.abstractmethod
def timeouts(self):
"""Return TransportTimeouts suitable for use with this transport.
See the TransportTimeouts documentation in python/tvm/micro/session.py.
"""
raise NotImplementedError()
@abc.abstractmethod
def open(self):
"""Open any resources needed to send and receive RPC protocol data for a single session."""
raise NotImplementedError()
@abc.abstractmethod
def close(self):
"""Release resources associated with this transport."""
raise NotImplementedError()
@abc.abstractmethod
def read(self, n, timeout_sec):
"""Read up to n bytes from the transport.
Parameters
----------
n : int
Maximum number of bytes to read from the transport.
timeout_sec : Union[float, None]
Number of seconds to wait for all `n` bytes to be received before timing out. The
transport can wait additional time to account for transport latency or bandwidth
limitations based on the selected configuration and number of bytes being received. If
timeout_sec is 0, read should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, read should block until at least 1 byte of data can be returned.
Returns
-------
bytes :
Data read from the channel. Less than `n` bytes may be returned, but 0 bytes should
never be returned. If returning less than `n` bytes, the full timeout_sec, plus any
internally-added timeout, should be waited. If a timeout or transport error occurs,
an exception should be raised rather than simply returning empty bytes.
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
@abc.abstractmethod
def write(self, data, timeout_sec):
"""Write data to the transport channel.
Parameters
----------
data : bytes
The data to write over the channel.
timeout_sec : Union[float, None]
Number of seconds to wait for at least one byte to be written before timing out. The
transport can wait additional time to account for transport latency or bandwidth
limitations based on the selected configuration and number of bytes being received. If
timeout_sec is 0, write should attempt to service the request in a non-blocking fashion.
If timeout_sec is None, write should block until at least 1 byte of data can be
returned.
Returns
-------
int :
The number of bytes written to the underlying channel. This can be less than the length
of `data`, but cannot be 0 (raise an exception instead).
Raises
------
TransportClosedError :
When the transport layer determines that the transport can no longer send or receive
data due to an underlying I/O problem (i.e. file descriptor closed, cable removed, etc).
IoTimeoutError :
When `timeout_sec` elapses without receiving any data.
"""
raise NotImplementedError()
class TransportLogger(Transport):
"""Wraps a Transport implementation and logs traffic to the Python logging infrastructure."""
def __init__(self, name, child, logger=None, level=logging.INFO):
self.name = name
self.child = child
self.logger = logger or _LOG
self.level = level
# Construct PRINTABLE to exclude whitespace from string.printable.
PRINTABLE = string.digits + string.ascii_letters + string.punctuation
@classmethod
def _to_hex(cls, data):
lines = []
if not data:
lines.append("")
return lines
for i in range(0, (len(data) + 15) // 16):
chunk = data[i * 16 : (i + 1) * 16]
hex_chunk = " ".join(f"{c:02x}" for c in chunk)
ascii_chunk = "".join((chr(c) if chr(c) in cls.PRINTABLE else ".") for c in chunk)
lines.append(f"{i * 16:04x} {hex_chunk:47} {ascii_chunk}")
if len(lines) == 1:
lines[0] = lines[0][6:]
return lines
def timeouts(self):
return self.child.timeouts()
def open(self):
self.logger.log(self.level, "%s: opening transport", self.name)
self.child.open()
def close(self):
self.logger.log(self.level, "%s: closing transport", self.name)
return self.child.close()
def read(self, n, timeout_sec):
timeout_str = f"{timeout_sec:5.2f}s" if timeout_sec is not None else " None "
try:
data = self.child.read(n, timeout_sec)
except IoTimeoutError:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [IoTimeoutError %s]",
self.name,
timeout_str,
n,
timeout_str,
)
raise
except Exception as err:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [err: %s]",
self.name,
timeout_str,
n,
err.__class__.__name__,
exc_info=1,
)
raise err
hex_lines = self._to_hex(data)
if len(hex_lines) > 1:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [%3d B]:\n%s",
self.name,
timeout_str,
n,
len(data),
"\n".join(hex_lines),
)
else:
self.logger.log(
self.level,
"%s: read {%s} %4d B -> [%3d B]: %s",
self.name,
timeout_str,
n,
len(data),
hex_lines[0],
)
return data
def write(self, data, timeout_sec):
timeout_str = f"{timeout_sec:5.2f}s" if timeout_sec is not None else " None "
try:
self.child.write(data, timeout_sec)
except IoTimeoutError:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: [IoTimeoutError %s]",
self.name,
timeout_str,
len(data),
timeout_str,
)
raise
except Exception as err:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: [err: %s]",
self.name,
timeout_str,
len(data),
err.__class__.__name__,
exc_info=1,
)
raise err
hex_lines = self._to_hex(data)
if len(hex_lines) > 1:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]:\n%s",
self.name,
timeout_str,
len(data),
"\n".join(hex_lines),
)
else:
self.logger.log(
self.level,
"%s: write {%s} <- [%3d B]: %s",
self.name,
timeout_str,
len(data),
hex_lines[0],
)
TransportContextManager = typing.ContextManager[Transport]
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/api.py | from __future__ import annotations
from dataclasses import dataclass, field
import numpy as np
import tvm
from tvm import relay, ir
from .extool import *
from .types import *
from . import runtime, topi
VisitorT = typing.Callable[ [RelayExpr, ParametersT], None ]
TransformerT = typing.Callable[
[RelayExpr, ParametersT], typing.Optional[RelayExpr]]
@dataclass
class Trace:
name: str
""" Trace Name """
expr: RelayExpr
params: ParametersT
input_vars: typing.List[Var] = field(init=False)
param_vars: typing.List[Var] = field(init=False)
def __post_init__(self):
self.input_vars = []
self.param_vars = []
for v in relay.analysis.free_vars(self.expr):
if v.name_hint in self.params:
self.param_vars.append(v)
else:
self.input_vars.append(v)
@property
def input_names(self) -> typing.List[str]:
return [i.name for i in self.input_vars]
def random_inputs(self) -> typing.Dict[str, np.ndarray]:
inputs = {}
for v in self.input_vars:
shape = v.type_annotation.concrete_shape
dtype = v.type_annotation.dtype
data = np.random.randn(*shape).astype(dtype)
inputs[v.name_hint] = data
return inputs
def calibrate(self,
data: typing.Optional[np.ndarray] = None,
data_dict: typing.Dict[str, np.ndarray] = {},
) -> typing.Dict[str, np.ndarray]:
self.infer_type()
calibrate_outputs: typing.Dict[str, np.ndarray] = {
k: v.numpy() for k, v in params.items()}
# set input data
for v in self.input_vars:
shape = v.type_annotation.concrete_shape
dtype = v.type_annotation.dtype
val = data_dict.get(v.name_hint, data)
if val is None:
print("input: {} use random data".format(
v.name_hint))
val = np.random.randn(*shape).astype(dtype)
calibrate_outputs[v.name_hint] = val
def _calibrate(expr: RelayExpr, params: ParametersT):
data = [ calibrate_outputs[e] for e in args(expr) ]
out = topi.execute(expr, data)
shape = list(expr.checked_type.concrete_shape)
assert list(out.shape) == shape
assert str(out.dtype) == expr.checked_type.dtype
calibrate_outputs[expr] = out
self.visit(_calibrate)
return calibrate_outputs
def run(self,
data: typing.Optional[np.ndarray] = None,
data_dict: typing.Dict[str, np.ndarray] = {},
device: tvm.runtime.Device = tvm.runtime.cpu(0),
) -> typing.List[np.ndarray]:
inputs = {k: v for k, v in self.params.items()}
for v in self.input_vars:
shape = v.type_annotation.concrete_shape
dtype = v.type_annotation.dtype
val = data_dict.get(v.name_hint, data)
assert val is not None
assert list(shape) == list(val.shape), (
"{}: {} vs. {}").format(
v.name_hint, shape, val.shape)
assert dtype == val.dtype
inputs[v.name_hint] = val
return runtime.infer(self.expr, inputs)
def eval(self, device) -> runtime.ValidateFunctionT:
return runtime.validator(
self.expr, self.params, self.name,
device=device)
def visit(self, callback: VisitorT):
def _visitor(expr: RelayExpr):
callback(expr, self.params)
visit(self.expr, _visitor)
def transform(self, callback: TransformerT) -> Trace:
def _tfm(expr: RelayExpr):
return callback(expr, self.params)
return Trace(callback.__name__,
transform(self.expr, _tfm), self.params)
def infer_type(self) -> Model:
return Trace("infer_type",
infer_type(self.expr), self.params)
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/calibrate.py | from __future__ import annotations
import typing
import numpy as np
import tvm
from dataclasses import dataclass, field, InitVar
from .symbol import *
from . import runtime
from .transform import Transformer
from .types import *
@dataclass
class Calibrator(Transformer):
args: typing.List[Calibrator]
is_nd: bool = False
output: typing.List[np.ndarray] = field(default_factory=list)
def __call__(self,
data: tvm.nd.NDArray | None =None,
data_dict: ParametersT = {}):
if self.is_input():
out = data_dict.get(self.name, data)
if out is None:
# use random input data
out = np.random.randn(*self.shape)
out = out.astype(self.dtype)
out = tvm.nd.array(out)
elif self.is_param():
out = self.params[self.name]
elif self.is_op(TUPLE_GET_ITEM_NAME):
out = self.args[0].raw_output[self.attrs["index"]]
assert isinstance(out, tvm.nd.NDArray), type(out)
else:
out = self.run({ a.name: a.raw_output \
for a in self.args })
if isinstance(out, tvm.nd.NDArray):
self.is_nd = True
self.output = [ out, ]
self._assert(out.dtype, self.dtype)
self._assert(out.shape, self.shape)
else:
self.is_nd = False
self.output = out
self._assert([o.dtype for o in out], self.dtype)
self._assert([o.shape for o in out], self.shape)
print(self.name, self.op_name, self.shape, self.dtype)
def run(self, args_data: typing.Dict[str, tvm.nd.NDArray]):
args = [ a.as_parameter() for a in self.args]
sym = self.clone(Symbol, args=args)
expr = symbol2expr(sym)
# data = { a.name: a.raw_output for a in self.args }
return runtime.infer(expr, args_data)
@property
def raw_output(self):
return self.output[0] if self.is_nd else self.output
def _assert(self, val, expect):
if isinstance(val, (list, tuple)):
assert len(val) == len(expect), (
"{} vs. {}").format(val, expect)
for v, e in zip(val, expect):
self._assert(v, e)
return
assert val == expect, "{} vs. {}".format(val, expect)
# @property
# def shape(self):
# return self.output[0].shape if self.is_nd \
# else [ o.shape for o in self.output ]
# @property
# def dtype(self):
# return self.output[0].shape if self.is_nd \
# else [ o.shape for o in self.output ]
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/data.py |
import numpy as np
import tvm
from tvm import relay, ir, runtime
from .extool import *
from .types import ParametersT
def random_inputs(expr: ir.expr.RelayExpr,
params: ParametersT = {}) -> ParametersT:
input_data = {k: v for k, v in params.items()}
for v in relay.analysis.free_vars(expr):
if v.name_hint in params:
continue
print(v.name_hint, v.type_annotation)
ty = v.type_annotation
# ty = v.checked_type
np_data = np.random.randn(
*ty.concrete_shape).astype(ty.dtype)
input_data[v.name_hint] = tvm.nd.array(np_data)
return input_data
def set_inputs(expr: ir.expr.RelayExpr,
params: ParametersT = {}) -> ParametersT:
free_vars = relay.analysis.free_vars(expr)
input_data = {k: v for k, v in params.items()}
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/dataset-old.py | """ Dataset Class Definition.
Customized datasets definition and customized interface
definition including ``metrics``, ``validate``,
``_load_data`` and ``iter_func``.
Only **crucial parts** of the custommized interface
implementation are elaborated.
"""
import mxnet as mx
from mxnet import gluon
from mxnet import nd
from gluoncv import data as gdata
from gluoncv.data.batchify import Tuple, Stack, Pad
from gluoncv.data.transforms.presets.yolo import YOLO3DefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.data.transforms.presets.ssd import SSDDefaultValTransform
import numpy as np
import requests
import tarfile
import os
from os import path
import math
import pickle
import logging
from . import conf
__all__ = ["DS_REG", "Dataset"]
# dataset_dir = path.expanduser("~/.mxnet/datasets")
src = "http://0.0.0.0:8827"
def extract_file(tar_path, target_path):
tar = tarfile.open(tar_path, "r")
if path.exists(path.join(target_path,
tar.firstmember.name)):
return
tar.extractall(target_path)
tar.close()
def download_files(category, files, base_url=src, root=conf.MRT_DATASET_ROOT):
logger = logging.getLogger("dataset")
root_dir = path.join(root, category)
os.makedirs(root_dir, exist_ok=True)
for df in files:
url = path.join(base_url, 'datasets', category, df)
fpath = path.join(root_dir, df)
if path.exists(fpath):
continue
fdir = path.dirname(fpath)
if not path.exists(fdir):
os.makedirs(fdir)
logger.info("Downloading dateset %s into %s from url[%s]",
df, root_dir, url)
r = requests.get(url)
if r.status_code != 200:
logger.error("Url response invalid status code: %s",
r.status_code)
exit()
r.raise_for_status()
with open(fpath, "wb") as fout:
fout.write(r.content)
return root_dir
DS_REG = {
# "voc": VOCDataset,
# "imagenet": ImageNetDataset,
# "cifar10": Cifar10Dataset,
# "quickdraw": QuickDrawDataset,
# "mnist": MnistDataset,
# "trec": TrecDataset,
# "coco": COCODataset,
}
def register_dataset(name):
def _wrapper(dataset):
dataset.name = name
if name in DS_REG:
raise NameError("Dataset " + name + " has been registered")
DS_REG[name] = dataset;
return dataset
return _wrapper
class Dataset:
""" Base dataset class, with pre-defined interface.
The dataset directory is located at the ``root`` directory containing
the dataset `name` directory. And the custom dataset should pass
the parameter location of root, or implement the derived class
of your data iterator, metrics and validate function.
Notice:
Our default imagenet dataset is organized as an ``record``
binary format, which can amplify the throughput for image read.
Custom Image dataset of third party could be preprocessed by the
`im2rec` procedure to transform the image into the record format.
The transformation script is located at ``docs/mrt/im2rec.py``.
And more details refer to the script helper documentation
please(print usage with command ``-h``).
Parameters
==========
input_shape: Tuple or List
The input shape requested from user, and some dataset would
check the format validity. Generally, specific dataset will
do some checks for input shape, such as the channel number for
image.
Example: imagenet's input shape is like to this, (N, C, H, W),
where the C must be equal to 3, H equals to W and N indicates
the batch size user want. Different H(W) requests the dataset
loader to resize image.
root: os.path.Path or path string
The location where dataset is stored, defined with variable
``MRT_DATASET_ROOT`` in conf.py or custom directory.
**Custom Dataset Implementation (derived this class):**
1. Register dataset name into DS_REG that can be accessed
at the ``dataset`` package API. And releated function is
the ``register_dataset`` function.
2. Override the abstract method defined in base dataset class:
_load_data(self) [Required]:
Load data from disk that stored into the data variable.
And save the required `data_loader` to the member: `data`.
iter_func(self) [Optional]:
Return the tuple (data, label) for each invocation according
to the member `data` loaded from the function `_load_data`.
Also, this function is optional since we have implemented
a naive version if the member `data` is python generator-
compatible type, supporting the `iter(data)` function. Or
you will override the function you need.
metrics(self) [Required]:
Return the metrics object for the dataset, such as
some auxiliary variable.
validate(self, metrics, predict, label) [Required]:
Calculates the accuracy for model inference of string.
Return formated string type
Examples
========
>>> from mxnet import ndarray as nd
>>> @register_dataset("my_dataset")
>>> class MyDataset(Dataset):
... def _load_data(self):
... B = self.ishape[0]
... def _data_loader():
... for i in range(1000):
... yield nd.array([i + c for c in range(B)])
... self.data = _data_loader()
...
... # use the default `iter_func` defined in base class
...
... def metrics(self):
... return {"count": 0, "total": 0}
... def validate(self, metrics, predict, label):
... for idx in range(predict.shape[0]):
... res_label = predict[idx].asnumpy().argmax()
... data_label = label[idx].asnumpy()
... if res_label == data_label:
... metrics["acc"] += 1
... metrics["total"] += 1
... acc = 1. * metrics["acc"] / metrics["total"]
... return "{:6.2%}".format(acc)
>>>
>>> # usage
>>> md_cls = DS_REG["my_dataset"]
>>> ds = md_cls([8]) # batch size is 8
>>> data_iter_func = ds.iter_func()
>>> data_iter_func() # get the batch data
NDArray<[0, 1, 2, 3, 4, 5, 6, 7] @ctx(cpu)>
"""
name = None
""" Registered Dataset Name """
def __init__(self, input_shape, root=conf.MRT_DATASET_ROOT):
self.ishape = input_shape
if self.name is None:
raise RuntimeError("Dataset name not set")
# Dataset not to download the file, it's user's responsibility
self.root_dir = path.join(root, self.name)
# self.root_dir = download_files(
# self.name, self.download_deps, base_url, root) \
# if dataset_dir is None else dataset_dir
# for fname in self.download_deps:
# if fname.endswith(".tar") or fname.endswith(".tar.gz"):
# extract_file(
# path.join(self.root_dir, fname), self.root_dir)
self.data = None
self._load_data()
def metrics(self):
raise NotImplementedError(
"Derived " + self.name + " dataset not override the" +
" base `metric` function defined in Dataset")
def validate(self, metrics, predict, label):
raise NotImplementedError(
"Derived " + self.name + " dataset not override the" +
" base `validate` function defined in Dataset")
def _load_data(self):
""" Load data from disk.
Save the data loader into member `data` like:
.. code-block:: python
self.data = data_loader
And validate the input shape if necessary:
.. code-block:: python
N, C, H, W = self.ishape
assert C == 3 and H == W
"""
raise NotImplementedError(
"Derived " + self.name + " dataset not override the" +
" base `_load_data` function defined in Dataset")
def iter_func(self):
""" Returns (data, label) iterator function.
Get the iterator of `self.data` and iterate each batch sample
with `next` function manually. Call like this:
.. code-block:: python
data_iter_func = dataset.iter_func()
data, label = data_iter_func()
"""
data_iter = iter(self.data)
def _wrapper():
return next(data_iter)
return _wrapper
@register_dataset("coco")
class COCODataset(Dataset):
# download_deps = ['val2017.zip']
def _load_data(self):
""" Customized _load_data method introduction.
COCO dataset only support layout of NCHW and the number of channels must be 3, i.e. (batch_size, 3, input_size, input_size).
The validation dataset will be created by *MS COCO Detection Dataset* and use SSDDefaultValTransform as data preprocess function.
"""
assert len(self.ishape) == 4
N, C, H, W = self.ishape
assert C == 3
self.val_dataset = gdata.COCODetection(
root=self.root_dir, splits='instances_val2017', skip_empty=False)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
self.data = gluon.data.DataLoader(
self.val_dataset.transform(SSDDefaultValTransform(W, H)),
batch_size=N, shuffle=False, batchify_fn=val_batchify_fn,
last_batch='rollover', num_workers=30)
def metrics(self):
""" Customized metrics method introduction.
COCODetectionMetric is used which is the detection metric for COCO bbox task.
"""
_, _, H, W = self.ishape
metric = COCODetectionMetric(
self.val_dataset, '_eval', cleanup=True, data_shape=(H, W))
metric.reset()
return metric
def validate(self, metrics, predict, label):
""" Customized validate method introduction.
The image height must be equal to the image width.
The model output is [id, score, bounding_box],
where bounding_box is of layout (x1, y1, x2, y2).
The data label is implemented as follows:
.. code-block:: python
map_name, mean_ap = metrics.get()
acc = {k: v for k,v in zip(map_name, mean_ap)}
acc = float(acc['~~~~ MeanAP @ IoU=[0.50, 0.95] ~~~~\\n']) / 100
"""
det_ids, det_scores, det_bboxes = [], [], []
gt_ids, gt_bboxes, gt_difficults = [], [], []
_, _, H, W = self.ishape
assert H == W
ids, scores, bboxes = predict
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, H))
gt_ids.append(label.slice_axis(axis=-1, begin=4, end=5))
gt_difficults.append(
label.slice_axis(axis=-1, begin=5, end=6) \
if label.shape[-1] > 5 else None)
gt_bboxes.append(label.slice_axis(axis=-1, begin=0, end=4))
metrics.update(det_bboxes, det_ids, det_scores,
gt_bboxes, gt_ids, gt_difficults)
names, values = metrics.get()
acc = {k:v for k,v in zip(names, values)}
acc = float(acc['~~~~ MeanAP @ IoU=[0.50,0.95] ~~~~\n']) / 100
return "{:6.2%}".format(acc)
@register_dataset("voc")
class VOCDataset(Dataset):
# name = "voc"
# download_deps = ["VOCtest_06-Nov-2007.tar"]
def _load_data(self):
""" Customized _load_data method introduction.
VOC dataset only support layout of NCHW and the number of channels must be 3, i.e. (batch_size, 3, input_size, input_size).
The validation dataset will be created by Pascal *VOC detection Dataset* and use YOLO3DefaultValTransform as data preprocess function.
"""
assert len(self.ishape) == 4
N, C, H, W = self.ishape
assert C == 3
val_dataset = gdata.VOCDetection(
root=path.join(self.root_dir, 'VOCdevkit'),
splits=[('2007', 'test')])
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
self.data = gluon.data.DataLoader(
val_dataset.transform(YOLO3DefaultValTransform(W, H)),
N, False, batchify_fn=val_batchify_fn,
last_batch='discard', num_workers=30)
def metrics(self):
""" Customized metric method introduction.
VOC07MApMetric is used which is the Mean average precision metric for PASCAL V0C 07 dataset.
"""
metric = VOC07MApMetric(
iou_thresh=0.5, class_names=gdata.VOCDetection.CLASSES)
metric.reset()
return metric
def validate(self, metrics, predict, label):
""" Customized validate method introduction.
The image height must be equal to the image width.
The model output is [id, score, bounding_box],
where bounding_box is of layout (x1, y1, x2, y2).
The data label is implemented as follows:
.. code-block:: python
map_name, mean_ap = metrics.get()
acc = {k: v for k,v in zip(map_name, mean_ap)}['mAP']
"""
det_ids, det_scores, det_bboxes = [], [], []
gt_ids, gt_bboxes, gt_difficults = [], [], []
_, _, H, W = self.ishape
assert H == W
ids, scores, bboxes = predict
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, H))
gt_ids.append(label.slice_axis(axis=-1, begin=4, end=5))
gt_difficults.append(
label.slice_axis(axis=-1, begin=5, end=6) \
if label.shape[-1] > 5 else None)
gt_bboxes.append(label.slice_axis(axis=-1, begin=0, end=4))
metrics.update(det_bboxes, det_ids, det_scores,
gt_bboxes, gt_ids, gt_difficults)
map_name, mean_ap = metrics.get()
acc = {k:v for k,v in zip(map_name, mean_ap)}['mAP']
return "{:6.2%}".format(acc)
class VisionDataset(Dataset):
pass
# def metrics(self):
# """ Customized metric method introduction.
# Computes accuracy classification score and top k predictions accuracy.
# """
# return [mx.metric.Accuracy(),
# mx.metric.TopKAccuracy(5)]
# def validate(self, metrics, predict, label):
# """ Customized metric method introduction.
# The model output include score for 1000 classes.
# """
# metrics[0].update(label, predict)
# metrics[1].update(label, predict)
# _, top1 = metrics[0].get()
# _, top5 = metrics[1].get()
# return "top1={:6.2%} top5={:6.2%}".format(top1, top5)
@register_dataset("imagenet")
class ImageNetDataset(VisionDataset):
# name = "imagenet"
download_deps = ["rec/val.rec", "rec/val.idx"]
def _load_data(self):
""" Customized _load_data method introduction.
ImageNet dataset only support layout of NCHW and the number of channels must be 3, i.e. (batch_size, 3, input_size, input_size). The image height must be equal to the image width.
The data preprocess process includes:
.. math::
crop_ratio = 0.875
.. math::
resize = ceil(H / crop\_ratio)
.. math::
mean_rgb = [123.68, 116.779, 103.939]
.. math::
std_rgb = [58.393, 57.12, 57.375]
Use ImageRecordIter to iterate on image record io files.
"""
assert len(self.ishape) == 4
N, C, H, W = self.ishape
assert C == 3
assert H == W
crop_ratio = 0.875
resize = int(math.ceil(H / crop_ratio))
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
rec_val = path.join(self.root_dir, self.download_deps[0])
rec_val_idx = path.join(self.root_dir, self.download_deps[1])
self.data = mx.io.ImageRecordIter(
path_imgrec = rec_val,
path_imgidx = rec_val_idx,
preprocess_threads = 24,
shuffle = False,
batch_size = N,
resize = resize,
data_shape = (3, H, W),
mean_r = mean_rgb[0],
mean_g = mean_rgb[1],
mean_b = mean_rgb[2],
std_r = std_rgb[0],
std_g = std_rgb[1],
std_b = std_rgb[2],
)
def iter_func(self):
def _wrapper():
data = self.data.next()
return data.data[0], data.label[0]
return _wrapper
@register_dataset("cifar10")
class Cifar10Dataset(VisionDataset):
# name = "cifar10"
# download_deps = ["cifar-10-binary.tar.gz"]
def _load_data(self):
""" Customized _load_data method introduction.
Cifar10Dataset only support layout of NCHW and the number of channels must be 3, i.e. (batch_size, 3, 32, 32). The image height and width must be equal to 32.
The data preprocess process includes:
.. math::
mean = [0.4914, 0.4822, 0.4465]
.. math::
std = [0.2023, 0.1994, 0.2010]
"""
N, C, H, W = self.ishape
assert C == 3 and H == W and H == 32
transform_test = gluon.data.vision.transforms.Compose([
gluon.data.vision.transforms.ToTensor(),
gluon.data.vision.transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])])
self.data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(root=self.root_dir,
train=False).transform_first(transform_test),
batch_size=N, shuffle=False, num_workers=4)
@register_dataset("quickdraw")
class QuickDrawDataset(VisionDataset):
name = "quickdraw"
def __init__(self, input_shape, is_train=False, **kwargs):
self.download_deps = [
"quickdraw_X.npy", "quickdraw_y.npy"] if is_train else \
["quickdraw_X_test.npy", "quickdraw_y_test.npy"]
self.is_train = is_train
super().__init__(input_shape, **kwargs)
def _load_data(self):
""" Customized _load_data method introduction.
QuickDrawDataset only support layout of NCHW and the number of channels must be 3, the image height and width must be equal to 32, i.e. (batch_size, 3, 28, 28).
"""
N, C, H, W = self.ishape
assert C == 1 and H == 28 and W == 28
X = nd.array(np.load(path.join(self.root_dir, self.download_deps[0])))
Y = nd.array(np.load(path.join(self.root_dir, self.download_deps[1])))
self.data = gluon.data.DataLoader(
mx.gluon.data.dataset.ArrayDataset(X, Y),
batch_size=N,
last_batch='discard',
shuffle=self.is_train,
num_workers=4)
@register_dataset("mnist")
class MnistDataset(VisionDataset):
# name = "mnist"
# there is no need to download the data from cortexlabs,
# since mxnet has supplied the neccesary download logic.
# download_deps = ["t10k-images-idx3-ubyte.gz",
# "t10k-labels-idx1-ubyte.gz",
# "train-images-idx3-ubyte.gz",
# "train-labels-idx1-ubyte.gz"]
def data_xform(self, data):
"""Move channel axis to the beginning,
cast to float32, and normalize to [0, 1].
"""
return nd.moveaxis(data, 2, 0).astype('float32') / 255
def _load_data(self):
""" Customized _load_data method introduction.
The MxNet gluon package will auto-download the mnist dataset.
MnistDataset only support layout of NCHW and the number of channels must be 1, the image height and width must be equal to 32, i.e. (batch_size, 1, 28, 28).
"""
val_data = mx.gluon.data.vision.MNIST(
root=self.root_dir, train=False).transform_first(
self.data_xform)
N, C, H, W = self.ishape
assert C == 1 and H == 28 and W == 28
self.data = mx.gluon.data.DataLoader(
val_data, shuffle=False, batch_size=N)
@register_dataset("trec")
class TrecDataset(Dataset):
# name = "trec"
download_deps = ["TREC.train.pk", "TREC.test.pk"]
def __init__(self, input_shape, is_train=False, **kwargs):
self.is_train = is_train
super().__init__(input_shape, **kwargs)
def _load_data(self):
""" Customized _load_data method introduction.
The MxNet gluon package will auto-download the mnist dataset.
TrecDataset only support layout of (I, N), the image height and width must be equal to 32, i.e. (batch_size, 1, 28, 28).
"""
fname = path.join(
self.root_dir, self.download_deps[0] \
if self.is_train else self.download_deps[1])
I, N = self.ishape
assert I == 38
# (38, batch), (batch,)
with open(fname, "rb") as fin:
reader = pickle.load(fin)
def data_loader():
data, label = [], []
for x, y in reader:
if len(data) < self.ishape[1]:
data.append(x)
label.append(y)
else:
yield nd.transpose(nd.array(data)), nd.array(label)
data, label = [], []
yield nd.transpose(nd.array(data)), nd.array(label)
self.data = data_loader()
def metrics(self):
return {"acc": 0, "total": 0}
def validate(self, metrics, predict, label):
""" Customized validate method introduction.
The score for 6 classes is the model output. The data label is implemented as follows:
.. code-block:: python
acc = 1. * metrcs["acc"] / metrics["total"]
"""
for idx in range(predict.shape[0]):
res_label = predict[idx].asnumpy().argmax()
data_label = label[idx].asnumpy()
if res_label == data_label:
metrics["acc"] += 1
metrics["total"] += 1
acc = 1. * metrics["acc"] / metrics["total"]
return "{:6.2%}".format(acc)
@register_dataset("stdrandom")
class StdRandomDataset(Dataset):
def _load_data(self):
def data_loader():
N, I, C = self.ishape
assert I == 1 and C == 3
data, label = [], []
while True:
if len(data) < N:
x = np.random.uniform(low=0.0, high=1.0, size=(I,C))
y = np.random.uniform(low=0.0, high=1.0, size=(I))
data.append(x)
label.append(y)
else:
batch_data, batch_label = nd.array(data), nd.array(label)
yield batch_data, batch_label
data, label = [], []
self.data = data_loader()
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/dataset.py | import typing
from os import path
import tvm
from .types import *
class Dataset:
def next(self) -> typing.Optional[DataLabelT]:
""" get next data, None if end. """
raise RuntimeError("Base Dataset Error")
def reset(self):
""" reset dataset internal reader status. """
raise RuntimeError("Base Dataset Error")
class ImageNet(Dataset):
category_name = "imagenet_category.json"
def __init__(self):
base_dir = path.join(path.dirname(__file__), "datasets")
with open(path.join(base_dir, self.category_name)) as f:
self.synset = eval(f.read())
def label(self, index):
return self.synset.get(index, "unknown category")
def labels(self, indexes):
return [ self.label(i) for i in indexes ]
class MemoryDataset(Dataset):
def __init__(self, available_dls: typing.List[DataLabelT]):
self.data = available_dls
self._max = len(self.data)
self._index = 0
def next(self) -> typing.Optional[DataLabelT]:
if self._index < self._max:
self._index += 1
return self.data[self._index-1]
return None
def reset(self):
self._index = 0
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/extool.py | from __future__ import annotations
import typing
import copy
from functools import wraps
import pprint
import tvm
from tvm import relay, ir
from tvm.ir.expr import *
from tvm.relay.expr import *
from .types import *
def update_expr_args(old: RelayExpr, expr_map) -> RelayExpr:
try:
new = eval("relay." + op_name(old))(
*args(old), **attrs(old))
except Exception as e:
print(op_name(old))
raise e
if isinstance(new, relay.TupleWrapper):
new = new.tuple_value
return new
def clone(expr: RelayExpr, **kwargs) -> RelayExpr:
expr = copy.copy(expr)
for k, v in kwargs.items():
setattr(expr, k, v)
_VisitorT = typing.Callable[ [RelayExpr], None ]
_TransformerT = typing.Callable[
[RelayExpr], typing.Optional[RelayExpr]]
""" Expr Transformer
Return new expr to transform old expr into updated one,
or just return None for expr visit.
"""
def transform(expr: RelayExpr, callback: _TransformerT) -> RelayExpr:
expr_list: typing.List[RelayExpr] = []
def _collect_expr(expr: RelayExpr):
# primitive ir operators, wrapper by CallNode
if isinstance(expr, ir.op.Op):
return
expr_list.append(expr)
relay.analysis.post_order_visit(expr, _collect_expr)
expr_map = {}
for i, sym in enumerate(expr_list):
out = update_expr_args(sym, expr_map)
# pre-clone symbol, to avoid misleading usage in callback
out = callback(out) or out
assert isinstance(out, RelayExpr)
expr_map[sym] = out
return expr_map[expr]
def infer_type(expr: RelayExpr) -> expr:
mod = relay.transform.InferType()(ir.IRModule.from_expr(expr))
return mod["main"].body
def visit(expr: RelayExpr, callback: _VisitorT):
expr_list: typing.List[RelayExpr] = []
def _collect_expr(expr: RelayExpr):
# primitive ir operators, wrapper by CallNode
if isinstance(expr, ir.op.Op):
return
expr_list.append(expr)
relay.analysis.post_order_visit(expr, _collect_expr)
for sym in expr_list:
callback(sym)
def simple_raw_print(expr: RelayExpr, params: ParametersT = {}):
info = { "op": 0, "param": 0 }
def _simple_visit(sym):
if not is_operator(sym):
print("{:68} /* attrs */ \t{}".format(
sym.name, sym.attrs))
if is_param(sym, params):
info["param"] += utils.product(sym.attrs["shape"])
return
info["op"] += 1
print("{:15} = {:>20}{:30} /* attrs */ \t{}".format(
sym.name, sym.op_name,
"(" + ", ".join([i.name for i in sym.args]) + ")",
sym.attrs,
))
transform(expr, _simple_visit)
print("="*50)
print("Operators: {} | Parameters: {}".format(
info["op"], info["param"]))
print("="*50)
def to_json(expr: RelayExpr):
json_map = {}
def _cast(expr: RelayExpr):
data = {
"op_name": op_name(expr),
"args": [],
"attrs": {},
}
json_map[expr] = data
visit(expr, _cast)
return json_map[expr]
def filter_operators(*op_names: typing.List[str]):
def _pass(f):
@wraps(f)
def _wrapper(expr: RelayExpr, *args, **kw):
if op_name(expr) not in op_names:
return
return f(expr, *args, **kw)
return _wrapper
return _pass
VAR_NAME = "var"
TUPLE_NAME = "Tuple"
TUPLE_GET_ITEM_NAME = "TupleGetItem"
def op_name(expr: RelayExpr):
if isinstance(expr, Call):
return expr.op.name
elif isinstance(expr, TupleGetItem):
return TUPLE_GET_ITEM_NAME
elif isinstance(expr, Tuple):
return TUPLE_NAME
elif isinstance(expr, Var):
return VAR_NAME
assert False, type(expr)
def args(expr: RelayExpr) -> List[RelayExpr]:
if isinstance(expr, Call):
return expr.args
elif isinstance(expr, TupleGetItem):
return [ expr.tuple_value ]
elif isinstance(expr, Tuple):
return expr.fields
elif isinstance(expr, Var):
return []
assert False, type(expr)
def attrs(expr: RelayExpr) -> dict:
if isinstance(expr, Call):
attrs = expr.attrs or {}
return {k: attrs[k] for k in attrs.keys()}
elif isinstance(expr, TupleGetItem):
return { "index": expr.index }
elif isinstance(expr, Tuple):
return {}
elif isinstance(expr, Var):
return {
"name_hint": expr.name_hint,
"shape": expr.type_annotation.concrete_shape,
"dtype": expr.type_annotation.dtype,
}
assert False, type(expr)
def is_operator(expr: RelayExpr, params: ParametersT = {}):
return not isinstance(expr, Var)
def is_variable(expr: RelayExpr, params: Parameters = {}):
return isinstance(expr, Var)
def is_param(expr: RelayExpr, params: Parameters):
return is_variable(expr) and expr.name_hint in params
def is_input(expr: RelayExpr, params: Parameters):
return is_variable(expr) and expr.name_hint not in params
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/fuse.py | from dataclasses import InitVar
from .symbol import *
@dataclass
class FusionOp(Symbol):
params: InitVar[ParametersT]
def __post_init__(self, params):
self._fuse_batch_norm()
@filter_operators("nn.batch_norm")
def _fuse_batch_norm(self):
X = self.args[0]
assert X.is_op("nn.conv2d"), str(self)
print(self)
return self
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/gluon.py | """ Gluon model zoo for MRT quantizatoin.
Only **crucial parts** of the module are elaborated.
Current supported MxNet Model List:
resnet18_v1, resnet34_v1, resnet50_v1, resnet101_v1, resnet152_v1, resnet18_v2, resnet34_v2, resnet50_v2, resnet101_v2, resnet152_v2,
se_resnet18_v1, se_resnet34_v1, se_resnet50_v1, se_resnet101_v1, se_resnet152_v1, se_resnet18_v2, se_resnet34_v2, se_resnet50_v2, se_resnet101_v2, se_resnet152_v2,
vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn,
alexnet,
densenet121, densenet161, densenet169, densenet201,
squeezenet1.0, squeezenet1.1,
inceptionv3,
mobilenet1.0, mobilenet0.75, mobilenet0.5, mobilenet0.25, mobilenetv2_1.0, mobilenetv2_0.75, mobilenetv2_0.5, mobilenetv2_0.25,
ssd_300_vgg16_atrous_voc, ssd_300_vgg16_atrous_coco, ssd_300_vgg16_atrous_custom, ssd_512_vgg16_atrous_voc, ssd_512_vgg16_atrous_coco, ssd_512_vgg16_atrous_custom, ssd_512_resnet18_v1_voc, ssd_512_resnet18_v1_coco, ssd_512_resnet50_v1_voc, ssd_512_resnet50_v1_coco, ssd_512_resnet50_v1_custom, ssd_512_resnet101_v2_voc, ssd_512_resnet152_v2_voc, ssd_512_mobilenet1.0_voc, ssd_512_mobilenet1.0_coco, ssd_512_mobilenet1.0_custom,
faster_rcnn_resnet50_v1b_voc, faster_rcnn_resnet50_v1b_coco, faster_rcnn_fpn_resnet50_v1b_coco, faster_rcnn_fpn_bn_resnet50_v1b_coco, faster_rcnn_resnet50_v1b_custom, faster_rcnn_resnet101_v1d_voc, faster_rcnn_resnet101_v1d_coco, faster_rcnn_fpn_resnet101_v1d_coco, faster_rcnn_resnet101_v1d_custom,
mask_rcnn_resnet50_v1b_coco, mask_rcnn_fpn_resnet50_v1b_coco, mask_rcnn_resnet101_v1d_coco, mask_rcnn_fpn_resnet101_v1d_coco,
cifar_resnet20_v1, cifar_resnet56_v1, cifar_resnet110_v1, cifar_resnet20_v2, cifar_resnet56_v2, cifar_resnet110_v2,
cifar_wideresnet16_10, cifar_wideresnet28_10, cifar_wideresnet40_8,
cifar_resnext29_32x4d, cifar_resnext29_16x64d,
fcn_resnet50_voc, fcn_resnet101_coco, fcn_resnet101_voc, fcn_resnet50_ade, fcn_resnet101_ade,
psp_resnet101_coco, psp_resnet101_voc, psp_resnet50_ade, psp_resnet101_ade, psp_resnet101_citys,
deeplab_resnet101_coco, deeplab_resnet101_voc, deeplab_resnet152_coco, deeplab_resnet152_voc, deeplab_resnet50_ade, deeplab_resnet101_ade,
resnet18_v1b, resnet34_v1b, resnet50_v1b, resnet50_v1b_gn, resnet101_v1b_gn, resnet101_v1b, resnet152_v1b, resnet50_v1c, resnet101_v1c, resnet152_v1c, resnet50_v1d, resnet101_v1d, resnet152_v1d, resnet50_v1e, resnet101_v1e, resnet152_v1e, resnet50_v1s, resnet101_v1s, resnet152_v1s, resnext50_32x4d, resnext101_32x4d, resnext101_64x4d,
se_resnext50_32x4d, se_resnext101_32x4d, se_resnext101_64x4d,
senet_154,
darknet53,
yolo3_darknet53_coco, yolo3_darknet53_voc, yolo3_darknet53_custom,
yolo3_mobilenet1.0_coco, yolo3_mobilenet1.0_voc, yolo3_mobilenet1.0_custom,
nasnet_4_1056, nasnet_5_1538, nasnet_7_1920, nasnet_6_4032,
simple_pose_resnet18_v1b, simple_pose_resnet50_v1b, simple_pose_resnet101_v1b, simple_pose_resnet152_v1b, simple_pose_resnet50_v1d, simple_pose_resnet101_v1d, simple_pose_resnet152_v1d,
residualattentionnet56, residualattentionnet92, residualattentionnet128, residualattentionnet164, residualattentionnet200, residualattentionnet236, residualattentionnet452,
cifar_residualattentionnet56, cifar_residualattentionnet92, cifar_residualattentionnet452,
resnet18_v1b_0.89, resnet50_v1d_0.86, resnet50_v1d_0.48, resnet50_v1d_0.37, resnet50_v1d_0.11, resnet101_v1d_0.76, resnet101_v1d_0.73,
mobilenet1.0_int8,
resnet50_v1_int8,
ssd_300_vgg16_atrous_voc_int8,
ssd_512_mobilenet1.0_voc_int8,
SSD_512_RESNET50_V1_VOC_INT8,
SSD_512_VGG16_ATROUS_VOC_INT8
"""
import os
import mxnet as mx
from mxnet import ndarray as nd
from mxnet.gluon.model_zoo import vision
import gluoncv as cv
from . import utils
def load_inception_v3(ctx):
return vision.inception_v3(pretrained=True, ctx=ctx, prefix="")
def save_inception_v3():
graph = load_inception_v3(mx.cpu())
sym = graph(mx.symbol.Variable('data'))
with open('./data/inception_v3.json', 'w') as fout:
fout.write(sym.tojson())
graph.save_params('./data/inception_v3.params')
def load_mobilenet1_0(ctx):
return vision.mobilenet1_0(pretrained=True, ctx=ctx, prefix="")
def save_mobilenet1_0():
graph = load_mobilenet1_0(mx.cpu())
sym = graph(mx.symbol.Variable('data'))
with open('./data/mobilenet1_0.json', 'w') as fout:
fout.write(sym.tojson())
graph.save_params('./data/mobilenet1_0.params')
def load_mobilenet_v2_1_0(ctx):
return vision.mobilenet_v2_1_0(pretrained=True, ctx=ctx, prefix="")
def save_mobilenet_v2_1_0():
graph = load_mobilenet_v2_1_0(mx.cpu())
sym = graph(mx.sym.var('data'))
with open('./data/mobilenet_v2_1_0.json', 'w') as fout:
fout.write(sym.tojson())
graph.save_parameters('./data/mobilenet_v2_1_0.params')
def load_resnet18_v1_yolo():
return cv.model_zoo.get_model('yolo3_resnet18_v1_voc',
pretrained=False, pretrained_base=True,
ctx=mx.gpu())
def get_model(name, ctx=mx.gpu(), **kwargs):
""" Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
classes : int
Number of classes for the output layer.
ctx : mxnet.context()
Devices to get model.
Returns
-------
ret : HybridBlock
The model.
"""
return cv.model_zoo.get_model(
name, pretrained=True,
ctx=ctx, **kwargs)
def load_model(sym_fname, param_fname):
return mx.sym.load(sym_fname), nd.load(param_fname)
def save_model(name, data_dir=None, ctx=mx.gpu(), **kwargs):
""" Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
data_dir : str
Directory to store the model.
ctx : mxnet.context()
Devices to get model.
Returns
-------
ret : tuple
The symbol path and the model path.
"""
net = get_model(name, ctx=ctx, **kwargs)
sym = net(mx.sym.var('data'))
if isinstance(sym, tuple):
sym = mx.sym.Group([*sym])
data_dir = utils.MRT_MODEL_ROOT if data_dir is None else data_dir
prefix = os.path.join(data_dir, name)
sym_path, prm_path = utils.extend_fname(prefix)
with open(sym_path, "w") as fout:
fout.write(sym.tojson())
net.collect_params().save(prm_path)
return sym_path, prm_path
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/image.py | from os import path
import numpy as np
from PIL import Image
import tvm
from tvm.contrib.download import download_testdata
def get_real_image(im_height, im_width) -> np.ndarray:
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
data = np.array(image).astype("float32")
data = np.reshape(data, (1, im_height, im_width, 3))
data = np.transpose(data, (0, 3, 1, 2))
return data
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/runtime.py | import typing
import numpy as np
import tvm
from tvm import relay, ir, runtime
from tvm.contrib import graph_executor
from tvm.ir import RelayExpr
from .types import *
from .dataset import Dataset
from .stats import Statistics
from . import symbol
__all__ = ["infer"]
def create_executor(
expr: RelayExpr, params: ParametersT, device=runtime.cpu(0),
opt_level=0
) -> relay.build_module.GraphExecutor:
target = "llvm"
with tvm.transform.PassContext(opt_level=opt_level):
lib = relay.build_module.build(
ir.IRModule.from_expr(expr),
target=target, params=params)
rt_mod: relay.build_module.GraphExecutor = \
graph_executor.GraphModule(lib["default"](device))
return rt_mod
OutputDataType = typing.List[np.ndarray]
def infer(expr: RelayExpr, params: ParametersT,
device=runtime.cpu(0)) -> OutputDataType:
# # target = "llvm"
# target = tvm.target.cuda()
# with tvm.transform.PassContext(opt_level=3):
# lib = relay.build_module.build(
# ir.IRModule.from_expr(expr),
# target=target,
# params=params)
# rt_mod: relay.build_module.GraphExecutor = graph_executor.GraphModule(lib["default"](device))
# # rt_mod.set_input("input", data)
# rt_mod.run()
# return [rt_mod.get_output(i).numpy() \
# for i in range(rt_mod.get_num_outputs())]
result = tvm.relay.create_executor(
"graph", mod=ir.IRModule.from_expr(expr),
device=device, target="llvm",
).evaluate()(**params)
return result
# if isinstance(result, tvm.runtime.NDArray):
# result = [ result, ]
# return [ r.numpy() for r in result ]
def validator(expr: RelayExpr, params: ParametersT, name: str,
device=runtime.cpu(0), ):
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(
ir.IRModule.from_expr(expr),
target=target,
params=params)
mod: relay.build_module.GraphExecutor = graph_executor.GraphModule(lib["default"](device))
input_names = []
for v in relay.analysis.free_vars(expr):
if v.name_hint not in params:
input_names.append(v.name_hint)
assert len(input_names) == 1
assert mod.get_num_outputs() == 1
input_name = input_names[0]
def _run(dl: DataLabelT) -> DataLabelT:
data, label = dl
mod.set_input(input_name, dl)
mod.run()
return mod.get_output(0).numpy, dl
_run.__name__ = name
return _run
ValidateFunctionT = typing.Callable[[DataLabelT], DataLabelT]
def multiple_validate(
base_func: ValidateFunctionT,
dataset: Dataset, stats_type: typing.Type[Statistics],
*comp_funcs: typing.List[ValidateFunctionT],
max_iter_num: typing.Optional[int] = None,
):
all_funcs = [ base_func, ] + list(comp_funcs)
all_stats = [stats_type() for _ in all_funcs]
log_str = "Iteration: {:3d} | "
for func in all_funcs:
log_str += func.__name__ + ": {} | "
for i in range(max_iter_num):
dl = dataset.next()
if dl is None:
break
for func, stats in zip(all_funcs, all_stats):
out_dl = func(dl)
stats.merge(out_dl)
msg = log_str.format(i, *[s.info() for s in all_stats])
print(msg)
print("Multiple Validation Done!")
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/stats.py | from os import path
import enum
import tvm
import numpy as np
from .utils import PY_ROOT
from .types import *
class StatsConfig(enum.Enum):
NONE = enum.auto()
ALL = enum.auto()
ACCURACY = enum.auto()
""" enable accuracy info in stats, True by default. """
TIME = enum.auto()
""" enable time logger in stats. """
DL = enum.auto()
""" print current DataLabelT's info, this will suppress all other config. """
class Statistics:
def __init__(self):
self.stats_info = {}
def reset(self):
""" reset statistic status. """
raise RuntimeError("Accuracy Type Error")
def merge(self, dl: DataLabelT):
""" merge model output and update status. """
raise RuntimeError("Accuracy Type Error")
def info(self) -> str:
""" return statistic information. """
raise RuntimeError("Accuracy Type Error")
def dl_info(self) -> str:
""" return current DataLabel information. """
raise RuntimeError("Accuracy Type Error")
class ClassificationOutput(Statistics):
def __init__(self):
self.num_classes = None
self.data, self.label = None, None
self.top1_hit = 0
self.top5_hit = 0
self.dl_total = 0
self.dl_top1, self.top1_raw = [], []
self.dl_top5, self.top5_raw = [], []
def reset(self):
self.top1_hit = 0
self.top5_hit = 0
self.dl_total = 0
def merge(self, dl: DataLabelT):
data, label = dl
self.argsort = [ np.argsort(d).tolist() for d in data]
self.dl_top1 = [a[-1] for a in self.argsort]
self.dl_top5 = [a[-5:] for a in self.argsort]
self.top1_raw = [ data[i][b] \
for i, b in enumerate(self.dl_top1) ]
self.top5_raw = [ [data[i][a] for a in b] \
for i, b in enumerate(self.dl_top5) ]
assert len(data.shape) == 2
self.batch = data.shape[0]
assert len(label.shape) == 1
assert self.batch == label.shape[0]
if self.num_classes is None:
self.num_classes = data.shape[1]
else:
assert self.num_classes == data.shape[1]
label = label.tolist()
self.dl_total += self.batch
for d, l in zip(self.dl_top1, label):
self.top1_hit += (d == int(l))
for d, l in zip(self.dl_top5, label):
self.top5_hit += (int(l) in d)
def dl_info(self):
print("=" * 50)
print("Batch: {}, Class Number: {}".format(
self.batch, self.num_classes))
top1, top1_raw = self.dl_top1, self.top1_raw
top5, top5_raw = self.dl_top5, self.top5_raw
for i in range(self.batch):
print("{:5} Top1: {:3} | Raw: {}".format(
i, top1[i], top1_raw[i]))
print("{:5} Top5: {} | Raw: {}".format(
i, top5[i], top5_raw[i]))
# print("{:5} Top1: {:3} | Top5: {}".format(
# i, top1[i], top5[i]))
print("=" * 50)
def info(self):
return "{},{}".format(
(1. * self.top1_hit / self.dl_total),
(1. * self.top5_hit / self.dl_total))
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/symbol.py | from __future__ import annotations
import typing
from dataclasses import dataclass, fields, is_dataclass
from functools import wraps
import json
import tvm
from tvm import relay, ir
from tvm.ir.expr import *
from tvm.relay.expr import *
from .utils import *
from .types import *
# __all__ = [
# "Symbol", "ParametersT",
# "is_operator", "is_variable", "is_input", "is_param",
# # symbol pass wrapper and some help functions
# "transform", "filter_operators", "visit",
# "simple_raw_print",
# # API with expr
# "expr2symbol", "symbol2expr",
# ]
VAR_NAME = "var"
TUPLE_GET_ITEM_NAME = "TupleGetItem"
TUPLE_NAME = "Tuple"
def is_operator(symbol: Symbol, params: ParametersT = {}):
return symbol.op_name != VAR_NAME
def is_variable(symbol: Symbol, params: ParametersT = {}):
return symbol.op_name == VAR_NAME
def is_input(symbol: Symbol, params: ParametersT):
return is_variable(symbol) and symbol.name not in params
def is_param(symbol: Symbol, params: ParametersT):
return is_variable(symbol) and symbol.name in params
_CopyAttrsT = typing.Union[typing.List[str], str]
@dataclass
class Symbol:
""" Uniform Symbol Representation for RelayExpr
RelayExpr has different format for operators, functions,
which is hard to apply uniform transformation pass.
Such as the `TupleGetItem`.
Abstract representation allows different definitions
for operators, which can be easier for graph
transformation. Like the `BatchNorm` op returns
a 3-tuple, whereas the return is first in cvm.
We need to consistently print symbol information such as name,
for the user's config about quantization layers.
"""
name: str
op_name: str
args: typing.List[Symbol]
attrs: typing.Dict[str, typing.Any]
def __hash__(self) -> int:
return hash(str(self))
@staticmethod
def variable(name):
return Symbol(name, VAR_NAME, [], { "name_hint": name })
def as_parameter(self) -> Symbol:
var = Symbol.variable(self.name)
var.attrs["shape"] = self.shape
var.attrs["dtype"] = self.dtype
return var
def is_op(self, op_name):
return self.op_name == op_name
def to_dict(self):
return dict((f.name, getattr(self, f.name)) \
for f in fields(self))
def clone(self, cls: typing.Type[Symbol] = None, **kw):
cls = cls or type(self)
assert is_dataclass(cls)
data = {}
for k in [f.name for f in fields(cls)]:
if k in [f.name for f in fields(self)]:
data[k] = getattr(self, k)
data.update(kw)
try:
new = cls(**data)
except Exception as e:
print("clone failed: ", cls.__name__,
self, list(data.keys()))
raise e
return new
@property
def shape(self):
return self.attrs["shape"]
@property
def dtype(self):
return self.attrs["dtype"]
def __eq__(self, other: Symbol):
return self.args == other.args and hash(self) == hash(other)
def __str__(self):
args_info= ["{}@{}".format(
i.name, i.attrs.get("shape", None)) \
for i in self.args ]
return "{} = {}({}) /* attrs */ \t{}".format(
self.name, self.op_name,
", ".join(args_info),
self.attrs)
def _topo_sort(symbol: Symbol, sym_list: typing.List[Symbol]):
if sym_list.count(symbol) > 0:
return
for c in symbol.args:
_topo_sort(c, sym_list)
sym_list.append(symbol)
_VisitorT = typing.Callable[[Symbol], None]
_TransformerT = typing.Callable[[Symbol], typing.Optional[Symbol]]
""" Symbol Transformer
Return new symbol to transform old symbol into updated one,
or just return None for symbol visit.
"""
def visit(symbol: Symbol, callback: _VisitorT):
""" Visitor mode, possible modify symbol itself. """
sym_list: typing.List[Symbol] = []
_topo_sort(symbol, sym_list)
for sym in sym_list:
callback(sym)
def transform(symbol: Symbol, callback: _TransformerT) -> Symbol:
""" Transform symbol from old to new, with inputs updated.
Only the return value indicates mutation, while changing
attributes in parameter passed in args does nothing.
"""
sym_list: typing.List[Symbol] = []
_topo_sort(symbol, sym_list)
sym_map = {}
for sym in sym_list:
args = [sym_map[c.name] for c in sym.args]
# pre-clone symbol, to avoid misleading usage in callback
sym = sym.clone(
args=args,
attrs={k: v for k, v in sym.attrs.items()})
out = callback(sym) or sym
assert isinstance(out, Symbol)
sym_map[sym.name] = out
return sym_map[symbol.name]
def filter_operators(*op_names: typing.List[str]):
def _pass(f):
@wraps(f)
def _wrapper(sym: Symbol) -> typing.Any:
if any([ sym.is_op(n) for n in op_names ]):
return f(sym)
return _wrapper
return _pass
def simple_raw_print(symbol: Symbol, params: ParametersT ={}):
info = { "op": 0, "param": 0 }
def _simple_visit(sym):
if is_param(sym, params):
info["param"] += product(params[sym.name].shape)
info["op"] += is_operator(sym)
print("{:30} = {:>15}{:30} /* attrs */ {}".format(
sym.name, sym.op_name,
"(" + ", ".join([i.name for i in sym.args]) + ")",
sym.attrs,
))
transform(symbol, _simple_visit)
print("="*50)
print("Operators: {} | Parameters: {}".format(
info["op"], info["param"]))
print("="*50)
# ==============================================================
# API from relay.Function to Symbol.
# ==============================================================
SUPPORTED_EXPR_TYPE = (
relay.expr.Var,
ir.op.Op, # Op are wrapped by Call.
relay.expr.Call,
relay.expr.TupleGetItem,
)
def expr_type(checked_type: ir.type.Type, key):
if isinstance(checked_type, ir.type.TupleType):
return [expr_type(f, key) for f in checked_type.fields]
return getattr(checked_type, key)
def expr2symbol(expr: RelayExpr) -> Symbol:
mod = relay.transform.InferType()(ir.IRModule.from_expr(expr))
expr = mod["main"].body
symbol_map = {}
def _cast_expr(node: RelayExpr):
if not isinstance(node, SUPPORTED_EXPR_TYPE):
raise RuntimeError(
"MRT not support expr type:{}".format(type(node)))
if isinstance(node, ir.op.Op):
return
if isinstance(node, relay.Var):
name = node.name_hint or N.n(prefix="input_")
symbol_map[node] = Symbol.variable(name)
elif isinstance(node, relay.Call):
args = [symbol_map[i] for i in node.args]
attrs = node.attrs or {}
attrs = {k: attrs[k] for k in attrs.keys()}
symbol_map[node] = Symbol(N.n(), node.op.name,
args, attrs)
elif isinstance(node, relay.TupleGetItem):
args = [ symbol_map[node.tuple_value], ]
symbol_map[node] = Symbol(N.n(), TUPLE_GET_ITEM_NAME,
args, { "index": node.index })
elif isinstance(node, relay.Tuple):
args = [ symbol_map[f] for f in node.fields ]
symbol_map[node] = Symbol(N.n(), TUPLE_NAME,
args, {})
dtype = expr_type(node.checked_type, "dtype")
shape = expr_type(node.checked_type, "concrete_shape")
# print(dtype, shape, type(shape))
symbol_map[node].attrs.update({
"shape": list(shape),
"dtype": dtype,
})
with N():
relay.analysis.post_order_visit(expr, _cast_expr)
return symbol_map[expr]
def symbol2expr(symbol: Symbol, expr_map={}) -> RelayExpr:
# operator creator don't need shape or dtype attrs,
# except for the variable.
def _remove_type(sym: Symbol):
if is_variable(sym):
return
if "shape" in sym.attrs:
del sym.attrs["shape"]
if "dtype" in sym.attrs:
del sym.attrs["dtype"]
return sym
symbol = transform(symbol, _remove_type)
expr_map.clear()
def _cast_symbol(sym: Symbol):
args = [expr_map[i] for i in sym.args]
if sym.is_op(TUPLE_NAME):
out = relay.Tuple(args)
else:
try:
out = eval("relay." + sym.op_name)(*args, **sym.attrs)
except Exception as e:
print(sym, [type(a) for a in args])
raise e
if isinstance(out, relay.TupleWrapper):
out = out.tuple_value
# relay.transform.InferTypeLocal(out)
expr_map[sym] = out
_ = transform(symbol, _cast_symbol)
return expr_map[symbol]
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/topi.py | import typing
from functools import wraps
import numpy as np
import tvm
from tvm.ir import RelayExpr
from tvm.topi import testing
from .extool import *
TOPI_REGS = {}
DataType = typing.List[np.ndarray]
def register_topi(op_name):
def _wrapper(f):
TOPI_REGS[op_name] = f
return f
return _wrapper
@register_topi("nn.conv2d")
def run_conv2d(data: DataType, attrs: AttrsT):
dw_np = topi.testing.dilate_python()
return testing.conv2d_nchw_python(*data, **attrs)
@register_topi("nn.batchnorm")
def run_batchnorm(data: DataType, attrs: AttrsT):
return testing.batch_norm(*data, **attrs)
# def execute(op_name: str, attrs: AttrsT, data: DataType) -> np.ndarray:
# eval("relay." + op_name)(op_name, )
# return TOPI_REGS[op_name](data, attrs)
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/torch.py | from torchvision import models
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/trace.py | from __future__ import annotations
import typing
from dataclasses import dataclass, field
from functools import wraps
import numpy as np
import tvm
from tvm import relay, ir
from tvm.contrib import graph_executor as graph
from .symbol import *
from .types import *
from . import topi
from . import runtime
Visitor = typing.Callable[[Symbol, ParametersT], None]
Transformer = typing.Callable[[Symbol, ParametersT], typing.Optional[Symbol]]
@dataclass
class Trace:
""" Only use visitor mode in Trace. """
name: str
""" Trace Name """
symbol: Symbol
params: ParametersT
sym_inputs: typing.List[Symbol] = field(init=False)
sym_params: typing.List[Symbol] = field(init=False)
def __post_init__(self):
self.sym_inputs = []
self.sym_params = []
def _init(sym: Symbol):
if is_input(sym, self.params):
self.sym_inputs.append(sym)
elif is_param(sym, self.params):
sym_shape = list(sym.attrs["shape"])
param_shape = self.params[sym.name].shape
assert sym_shape == list(param_shape), (
"param:{} shape inconsistent: {} vs. {}"
).format(sym.name, sym_shape, param_shape)
# sym.attrs["shape"] = self.params[sym.name].shape
self.sym_params.append(sym)
visit(self.symbol, _init)
@property
def input_names(self) -> typing.List[str]:
return [i.name for i in self.sym_inputs]
@property
def input_shapes(self) -> typing.List[ShapeT]:
return [i.attrs["shape"] for i in self.sym_inputs]
def random_inputs(self) -> ParametersT:
data = {}
for sym in self.sym_inputs:
shape = sym.attrs["shape"]
dtype = sym.attrs["dtype"]
np_data = np.random.randn(*shape).astype(dtype)
data[sym.name] = tvm.nd.array(np_data)
return data
def calibrate(self,
data: typing.Optional[np.ndarray] = None,
data_dict: typing.Dict[str, np.ndarray] = {},
) -> typing.Dict[str, np.ndarray]:
calibrate_outputs: typing.Dict[str, np.ndarray] = {
k: v.numpy() for k, v in self.params.items()}
# set input data
for v in self.sym_inputs:
shape, dtype = v.attrs["shape"], v.attrs["dtype"]
val = data_dict.get(v.name, data)
if val is None:
print("input: {} use random data".format(v.name))
val = np.random.randn(*shape).astype(dtype)
calibrate_outputs[v.name] = val
def _execute(sym: Symbol, data: ParametersT) -> runtime.OutputDataType:
args = [ a.as_parameter() for a in sym.args]
sym = sym.clone(args=args)
expr = symbol2expr(sym)
result = runtime.infer(expr, data)
return result
def _tassert(expect: typing.Any, val: typing.Any):
if isinstance(expect, ( list, tuple )):
assert len(expect) == len(val), (
"{} vs. {}").format(expect, val)
for e, o in zip(expect, val):
_tassert(e, o)
elif isinstance(expect, ( int, str )):
assert expect == val
def _get_type(out, key):
if isinstance(out, tvm.runtime.NDArray):
return getattr(out, key)
return [ _get_type(o, key) for o in out ]
def _calibrate(sym: Symbol, params: ParametersT):
global TUPLE_GET_ITEM_NAME
if is_variable(sym, params):
return
if sym.op_name == TUPLE_GET_ITEM_NAME:
out = calibrate_outputs[sym.args[0].name][sym.attrs['index']]
else:
out = _execute(sym, calibrate_outputs)
_tassert(sym.attrs["shape"], _get_type(out, "shape"))
_tassert(sym.attrs["dtype"], _get_type(out, "dtype"))
calibrate_outputs[sym.name] = out
self.visit(_calibrate)
return calibrate_outputs
def run(self,
data: typing.Optional[tvm.nd.NDArray] = None,
data_dict: ParametersT = {},
device: tvm.runtime.Device = tvm.runtime.cpu(0),
) -> typing.List[np.ndarray]:
params = {k: v for k, v in self.params.items()}
for sym in self.sym_inputs:
val = data_dict.get(sym.name, data)
shape = sym.attrs["shape"]
dtype = sym.attrs["dtype"]
assert val is not None
assert shape == list(val.shape), (
"{}: {} vs. {}").format(
sym.name, shape, val.shape)
assert dtype == val.dtype
params[sym.name] = val
return runtime.infer(self.to_mod(), params, device=device)
def random_run(self) -> typing.List[tvm.nd.NDArray]:
data = {}
for sym in self.sym_inputs:
shape = sym.attrs["shape"]
dtype = sym.attrs["dtype"]
np_data = np.random.randn(*shape).astype(dtype)
data[sym.name] = tvm.nd.array(np_data)
return self.run(data_dict=data)
def set_input_shape(self,
shape = None, shape_dict = {}) -> Trace:
shape_dict["common_shape"] = shape
def _set_shape(sym: Symbol):
if is_input(sym, self.params):
shape = shape_dict.get(
sym.name, shape_dict["common_shape"])
if shape is not None:
sym.attrs["shape"] = shape
return sym
symbol = transform(self.symbol, _set_shape)
return Trace.from_expr(symbol2expr(symbol), self.params)
def print(self):
simple_raw_print(self.symbol, self.params)
def visit(self, callback: Visitor):
def _visitor(sym: Symbol):
callback(sym, self.params)
visit(self.symbol, _visitor)
def transform(self, callback: Transformer) -> Trace:
def _tfm(sym: Symbol):
return callback(sym, self.params)
return Trace(callback.__name__,
transform(self.symbol, _tfm), self.params)
def to_expr(self, expr_map={}) -> ir.RelayExpr:
return symbol2expr(self.symbol, expr_map)
@staticmethod
def from_expr(expr: RelayExpr, params: ParametersT) -> Trace:
return Trace("init", expr2symbol(expr), params)
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/transform.py | from __future__ import annotations
import typing
from dataclasses import dataclass, fields
import tvm
from tvm import relay, ir
from . import transformers
from .symbol import *
from .trace import *
@dataclass
class Transformer(Symbol):
""" Type TransformerT for Trace """
params: ParametersT = field(default_factory=dict)
def is_input(self) -> bool:
return is_input(self, self.params)
def is_param(self) -> bool:
return is_param(self, self.params)
def is_variable(self) -> bool:
return is_variable(self, self.params)
def is_operator(self) -> bool:
return is_operator(self, self.params)
@classmethod
def apply(cls, *args, **kw):
def _tfm(symbol: Symbol, params: ParametersT):
ins = symbol.clone(cls, params=params)
return ins(*args, **kw) or ins
return _tfm
def __call__(self, *args, **kw) -> Symbol:
return self
class Validator(Transformer):
pass
class Quantizer(Transformer):
def expect_max_precision(self, max_prec) -> Quantizer:
""" Requantization Method """
raise NotImplementedError("")
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/transformers/__init__.py |
from .validate import *
from .quantize import *
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/transformers/quantize.py |
from tvm import relay, ir
def quantize_conv2d(expr: relay.Call, attrs):
""" quantize by scale. """
assert isinstance(expr, relay.Call)
expr.op
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/transformers/validate.py |
from tvm import relay, ir
from ..extool import *
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/types.py | import typing
import tvm
import numpy as np
ParametersT = typing.Dict[str, tvm.nd.NDArray]
AttrsT = typing.Dict[str, typing.Any]
ShapeT = typing.Union[typing.List[int], typing.Tuple[int]]
""" shape type, tuple of int, such as (1, 3, 34, 34). """
DataLabelT = typing.Tuple[np.ndarray, typing.Any]
""" a (data, label) representation. """
| https://github.com/zk-ml/tachikoma |
python/tvm/mrt/utils.py | from __future__ import annotations
import typing
import threading
import os
from os import path
from tvm import relay, ir
from .types import *
ROOT = path.abspath(path.join(__file__, "../../../"))
PY_ROOT = path.join(ROOT, "python")
MRT_MODEL_ROOT = path.expanduser("~/mrt_model")
if not path.exists(MRT_MODEL_ROOT):
os.makedirs(MRT_MODEL_ROOT)
MRT_DATASET_ROOT = path.expanduser("~/.mxnet/datasets")
if not path.exists(MRT_DATASET_ROOT):
os.makedirs(MRT_DATASET_ROOT)
def product(shape: ShapeT):
total = 1
for s in shape:
total *= s
return total
class N:
def __init__(self, name=""):
self.counter = 0
self.scope_name = name
self.lock = threading.Lock()
self.last_scope = N.__GLOBAL_INSTANCE__
def __enter__(self):
self._set_name_scope(self)
return self
def __exit__(self, *args):
self._set_name_scope(self.last_scope)
def _alloc_name(self, prefix, suffix):
with self.lock:
index = self.counter
self.counter += 1
name = "{}{}{}".format(prefix, index, suffix)
if self.scope_name:
name = "{}.{}".format(self.scope_name, name)
return name
__GLOBAL_INSTANCE__ = None
@staticmethod
def _set_name_scope(ins):
N.__GLOBAL_INSTANCE__ = ins
@staticmethod
def n(prefix="%", suffix=""):
ins = N.__GLOBAL_INSTANCE__
if ins is None:
raise RuntimeError("Namescope not specified")
return ins._alloc_name(prefix, suffix)
@staticmethod
def register_global_scope(name=""):
N._set_name_scope(N(name))
def extend_fname(prefix, with_ext=False):
""" Get the precision of the data.
Parameters
__________
prefix : str
The model path prefix.
with_ext : bool
Whether to include ext_file path in return value.
Returns
_______
ret : tuple
The symbol path, params path; and with_ext is True, also return ext file path.
"""
ret = ["%s.json"%prefix, "%s.params"%prefix]
if with_ext:
ret.append("%s.ext"%prefix)
return tuple(ret)
| https://github.com/zk-ml/tachikoma |
python/tvm/parser/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""The under development unified IR parsing infrastructure."""
from .. import _ffi, Object
from . import _ffi_api
@_ffi.register_object("SourceMap")
class SourceMap(Object):
def add(self, name, content):
return _ffi.get_global_func("SourceMapAdd")(self, name, content)
def parse(source, source_name="from_string", init_module=None, init_meta_table=None):
if init_meta_table is None:
init_meta_table = {}
return _ffi_api.ParseModuleInContext(source_name, source, init_module, init_meta_table)
def parse_expr(source):
return _ffi_api.ParseExpr("string", source)
def fromtext(source, source_name="from_string"):
return parse(source, source_name)
def SpanCheck():
"""A debugging utility for reporting missing span information."""
return _ffi_api.SpanCheck()
| https://github.com/zk-ml/tachikoma |
python/tvm/parser/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for tvm.ir"""
import tvm._ffi
tvm._ffi._init_api("parser", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay IR namespace containing the IR definition and compiler."""
import os
from sys import setrecursionlimit
from . import base
from . import ty
from . import expr
from . import function
from . import type_functor
from . import expr_functor
from . import adt
from . import prelude
from . import loops
from . import scope_builder
from . import transform
from . import analysis
from . import collage
from .build_module import build, create_executor, optimize
from .transform import build_config
from . import debug
from . import param_dict
from .backend import vm
# Root operators
from .op import nn
from .op import image
from .op import annotation
from .op import vision
from .op import contrib
from .op import dyn
from .op import random
from .op.reduce import *
from .op.tensor import *
from .op.transform import *
from .op.algorithm import *
from . import frontend
from . import backend
from . import quantize
from . import data_dep_optimization
# Dialects
from . import qnn
from .scope_builder import ScopeBuilder
# Load Memory Passes
from .transform import memory_plan
# Required to traverse large programs
setrecursionlimit(10000)
# Span
Span = base.Span
SourceName = base.SourceName
# Type
Type = ty.Type
TupleType = ty.TupleType
TensorType = ty.TensorType
TypeKind = ty.TypeKind
TypeVar = ty.TypeVar
ShapeVar = ty.ShapeVar
TypeConstraint = ty.TypeConstraint
FuncType = ty.FuncType
TypeRelation = ty.TypeRelation
IncompleteType = ty.IncompleteType
scalar_type = ty.scalar_type
RefType = ty.RefType
GlobalTypeVar = ty.GlobalTypeVar
TypeCall = ty.TypeCall
Any = ty.Any
# Expr
Expr = expr.RelayExpr
Constant = expr.Constant
Tuple = expr.Tuple
Var = expr.Var
GlobalVar = expr.GlobalVar
Function = function.Function
Call = expr.Call
Let = expr.Let
If = expr.If
TupleGetItem = expr.TupleGetItem
RefCreate = expr.RefCreate
RefRead = expr.RefRead
RefWrite = expr.RefWrite
# ADT
Pattern = adt.Pattern
PatternWildcard = adt.PatternWildcard
PatternVar = adt.PatternVar
PatternConstructor = adt.PatternConstructor
PatternTuple = adt.PatternTuple
Constructor = adt.Constructor
TypeData = adt.TypeData
Clause = adt.Clause
Match = adt.Match
# helper functions
var = expr.var
const = expr.const
bind = expr.bind
# TypeFunctor
TypeFunctor = type_functor.TypeFunctor
TypeVisitor = type_functor.TypeVisitor
TypeMutator = type_functor.TypeMutator
# ExprFunctor
ExprFunctor = expr_functor.ExprFunctor
ExprVisitor = expr_functor.ExprVisitor
ExprMutator = expr_functor.ExprMutator
# Prelude
Prelude = prelude.Prelude
# Scope Builder
ScopeBuilder = scope_builder.ScopeBuilder
# Param Serialization
save_param_dict = param_dict.save_param_dict
load_param_dict = param_dict.load_param_dict
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/_build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable
"""The interface for building Relay functions exposed from C++."""
import tvm._ffi
tvm._ffi._init_api("relay.build_module", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for Relay program IR."""
import tvm._ffi
tvm._ffi._init_api("relay.ir", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The constructors for all Relay AST nodes exposed from C++.
This module includes MyPy type signatures for all of the
exposed modules.
"""
import tvm._ffi
tvm._ffi._init_api("relay._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/adt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, unused-import
"""Algebraic data types in Relay."""
from tvm.ir import Constructor, TypeData
from tvm.runtime import Object
import tvm._ffi
from .base import RelayNode
from . import _ffi_api
from .ty import Type
from .expr import ExprWithOp, RelayExpr, Call
class Pattern(RelayNode):
"""Base type for pattern matching constructs."""
@tvm._ffi.register_object("relay.PatternWildcard")
class PatternWildcard(Pattern):
"""Wildcard pattern in Relay: Matches any ADT and binds nothing."""
def __init__(self):
"""Constructs a wildcard pattern.
Parameters
----------
None
Returns
-------
wildcard: PatternWildcard
a wildcard pattern.
"""
self.__init_handle_by_constructor__(_ffi_api.PatternWildcard)
@tvm._ffi.register_object("relay.PatternVar")
class PatternVar(Pattern):
"""Variable pattern in Relay: Matches anything and binds it to the variable."""
def __init__(self, var):
"""Construct a variable pattern.
Parameters
----------
var: tvm.relay.Var
Returns
-------
pv: PatternVar
A variable pattern.
"""
self.__init_handle_by_constructor__(_ffi_api.PatternVar, var)
@tvm._ffi.register_object("relay.PatternConstructor")
class PatternConstructor(Pattern):
"""Constructor pattern in Relay: Matches an ADT of the given constructor, binds recursively."""
def __init__(self, constructor, patterns=None):
"""Construct a constructor pattern.
Parameters
----------
constructor: Constructor
The constructor.
patterns: Optional[List[Pattern]]
Optional subpatterns: for each field of the constructor,
match to the given subpattern (treated as a variable pattern by default).
Returns
-------
wildcard: PatternWildcard
a wildcard pattern.
"""
if patterns is None:
patterns = []
self.__init_handle_by_constructor__(_ffi_api.PatternConstructor, constructor, patterns)
@tvm._ffi.register_object("relay.PatternTuple")
class PatternTuple(Pattern):
"""Constructor pattern in Relay: Matches a tuple, binds recursively."""
def __init__(self, patterns=None):
"""Construct a tuple pattern.
Parameters
----------
patterns: Optional[List[Pattern]]
Optional subpatterns: for each field of the constructor,
match to the given subpattern (treated as a variable pattern by default).
Returns
-------
wildcard: PatternWildcard
a wildcard pattern.
"""
if patterns is None:
patterns = []
self.__init_handle_by_constructor__(_ffi_api.PatternTuple, patterns)
@tvm._ffi.register_object("relay.Clause")
class Clause(Object):
"""Clause for pattern matching in Relay."""
def __init__(self, lhs, rhs):
"""Construct a clause.
Parameters
----------
lhs: tvm.relay.Pattern
Left-hand side of match clause.
rhs: tvm.relay.Expr
Right-hand side of match clause.
Returns
-------
clause: Clause
The Clause.
"""
self.__init_handle_by_constructor__(_ffi_api.Clause, lhs, rhs)
@tvm._ffi.register_object("relay.Match")
class Match(ExprWithOp):
"""Pattern matching expression in Relay."""
def __init__(self, data, clauses, complete=True):
"""Construct a Match.
Parameters
----------
data: tvm.relay.Expr
The value being deconstructed and matched.
clauses: List[tvm.relay.Clause]
The pattern match clauses.
complete: Optional[Bool]
Should the match be complete (cover all cases)?
If yes, the type checker will generate an error if there are any missing cases.
Returns
-------
match: tvm.relay.Expr
The match expression.
"""
self.__init_handle_by_constructor__(_ffi_api.Match, data, clauses, complete)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay IR namespace containing the analysis passes."""
# Analysis passes
from .analysis import *
# Annotations
from .annotated_regions import AnnotatedRegionSet
# Call graph
from . import call_graph
from .call_graph import CallGraph
# Feature
from . import feature
from . import sparse_dense
from . import sparse_conv2d
# Utilities
from .count_layers import count_layers
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for Relay program analysis."""
import tvm._ffi
tvm._ffi._init_api("relay.analysis", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/analysis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains the set of passes for Relay, which exposes an interface for
configuring the passes and scripting them in Python.
"""
from ...ir import IRModule
from ...relay import transform, build_module
from ...runtime.ndarray import cpu
from . import _ffi_api
from .feature import Feature
def post_order_visit(expr, fvisit):
"""Recursively visit the ir in post DFS order node,
apply fvisit. Each node is guaranteed to be visited
only once.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
fvisit : function
The visitor function to be applied.
"""
return _ffi_api.post_order_visit(expr, fvisit)
def well_formed(expr):
"""Check that each Var is only bound once (well formed).
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
well_form : bool
Whether the input expression is well formed
"""
return _ffi_api.well_formed(expr)
def check_kind(t, mod=None):
"""Check that the type is well kinded and return the kind.
For example, this mean type cannot has tensor of tensor, or is a tuple type
of 2 shapes.
Parameters
----------
t : tvm.relay.Type
The type to check
mod : Optional[tvm.IRModule]
The global module.
Returns
-------
kind : Kind
the kind of t
Examples
--------
.. code:: python
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Shape)])) == Shape
assert check_kind(relay.TupleType([relay.TypeParam('tp1', relay.Kind.Type)])) == Type
"""
if mod is not None:
return _ffi_api.check_kind(t, mod)
else:
return _ffi_api.check_kind(t)
def check_constant(expr):
"""Check whether an expression is constant
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is constant.
"""
return _ffi_api.check_constant(expr)
def check_basic_block_normal_form(expr):
"""Check whether an expression is in the basic block form
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
result : bool
Whether the expression is in the basic block form.
"""
return _ffi_api.check_basic_block_normal_form(expr)
def free_vars(expr):
"""Get free Vars from expression expr in Post DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of free variables in post DFS order.
Note
----
The fact that Vars are post-DFS ordred are useful in
neural networks: usually this means weights of previous
are ordered first.
"""
return _ffi_api.free_vars(expr)
def bound_vars(expr):
"""Get bound vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of bound variables in post-DFS order.
"""
return _ffi_api.bound_vars(expr)
def all_vars(expr):
"""Get all vars from expression expr in post-DFS order.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
free : List[tvm.relay.Var]
The list of all variables in post-DFS order.
"""
return _ffi_api.all_vars(expr)
def free_type_vars(expr, mod=None):
"""Get free type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of free type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.free_type_vars(expr, use_mod)
def bound_type_vars(expr, mod=None):
"""Get bound type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of bound type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.bound_type_vars(expr, use_mod)
def all_type_vars(expr, mod=None):
"""Get all type variables from expression/type e
Parameters
----------
expr : Union[tvm.relay.Expr,tvm.relay.Type]
The input expression/type
mod : Optional[tvm.IRModule]
The global module
Returns
-------
free : List[tvm.relay.TypeVar]
The list of all type variables in post-DFS order
"""
use_mod = mod if mod is not None else IRModule()
return _ffi_api.all_type_vars(expr, use_mod)
def all_dtypes(expr):
"""Collect set of all data types used in `expr`.
Parameters
----------
expr : tvm.relay.Expr
The input expression
Returns
-------
ret : Set[String]
Set of data types used in the expression (e.g., `{'int8', 'int32'}`)
"""
return set(_ffi_api.all_dtypes(expr))
def get_total_mac_number(expr):
"""
Count the number of MACs (multiply-accumulate) of a model
Parameters
----------
expr : tvm.relay.Expr
The input expression.
Returns
-------
result : int64
The number of MACs (multiply-accumulate) of a model
"""
return _ffi_api.GetTotalMacNumber(expr)
def unmatched_cases(match, mod=None):
"""
Finds cases that the match expression does not catch, if any.
Parameters
----------
match : tvm.relay.Match
The match expression
mod : Optional[tvm.IRModule]
The module (defaults to an empty module)
Returns
-------
missing_patterns : [tvm.relay.Pattern]
Patterns that the match expression does not catch.
"""
return _ffi_api.unmatched_cases(match, mod)
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.IRModule]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.IRModule]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, IRModule):
a, b = b, a
return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)}
def extract_fused_functions(mod):
"""Pass to extract IRModule of only fused primitive functions.
The ExtractFusedFunctions pass invokes SimplifyInference, FuseOps(3),
and ExtractFusedFunctions in that order
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[int, tvm.relay.function.Function]
A module containing only fused primitive functions
"""
ret_mod = _ffi_api.ExtractFusedFunctions()(mod)
ret = {}
for hash_, func in ret_mod.functions.items():
ret[hash_] = func
return ret
def list_op_freqs(mod):
"""Pass to extract unique operator names and how frequently they appear
in an IRModule. Fused functions are traversed to count the operators
that compose them.
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[str, int]
Dict of unique operator names to frequency
"""
return _ffi_api.ExtractOperators(mod)
def list_fake_quantized_op_freqs(mod):
"""Pass to extract fake quantized op names and the frequency that they appear
in fake quantized regions of an IRModule.
Parameters
----------
mod : tvm.IRModule
Returns
-------
ret : Dict[str, int]
Dict of fake quantized operator names to frequency
"""
return _ffi_api.ExtractFakeQuantizedOps(mod)
def search_fc_transpose(expr):
"""Search fc weight name in the patten: y = nn.dense(x, transpose(w, [1, 0]))
This function is used in the data_dep_optimization.simplify_fc_transpose method
Parameters
----------
expr : tvm.relay.Expr
Returns
-------
ret : Array[String]
Array of weight variable name in pattern y = nn.dense(x, transpose(w, [1, 0]))
"""
ret = _ffi_api.search_fc_transpose(expr)
return ret
def get_calibration_data(mod, data):
"""Get the calibration data of a given relay graph
This pass uses the graph executor to get the calibration data of a module, which
includes the input and output values of each function. The returned data uses
the GlobalVar of each function as a key. Users can further access the inputs and
outputs by using `inputs` or `outputs` as the key.
Following are some limitations:
1. The input module (graph) cannot have control flows.
2. The input arguments of each function cannot be tuples (outputs can be tuples).
3. We only handle top-level functions (i.e., nested function is not handled).
4. We only handle functions with `Compiler` attribute being set.
Parameters
----------
mod : tvm.IRModule
The input module for collecting the calibration data
data : Dict[str, NDArray]
The input data for running the module
Returns
-------
data : Dict[tvm.relay.GlobalVar, Dict[str, NDArray]]
"""
output_map = _ffi_api.get_calibrate_output_map(mod)
mod = _ffi_api.get_calibrate_module(mod)
mod = transform.Inline()(mod)
ref_res = build_module.create_executor("graph", mod=mod, device=cpu(0)).evaluate()(**data)
calib_data = {}
for gvar, indices in output_map.items():
offset = int(indices[0])
in_len = int(indices[1])
out_len = int(indices[2])
value = {
"inputs": ref_res[offset : offset + in_len],
"outputs": ref_res[offset + in_len : offset + in_len + out_len],
}
calib_data[gvar] = value
return calib_data
def extract_intermdeiate_expr(mod, expr_id):
"""Extract Relay Expr by its expression ID
This function is used for extracting Relay Expr
by its expression ID of the main function
that we can see in `print(mod["main"])`.
Parameters
----------
mod : tvm.IRModule
expr_id : the Expr ID that we want to extract
Returns
-------
ret : Extracted IRModule
Examples
--------
.. code-block:: python
# Suppose our module is printed like this:
# def @main(%x: Tensor[(1, 1, 5, 1), float32], %w1, %w2) {
# %0 = nn.conv2d(%x, %w1, padding=[1, 1, 1, 1], channels=1, kernel_size=[3, 3]);
# %1 = nn.conv2d(%0, %w2, padding=[1, 1, 1, 1], channels=1, kernel_size=[3, 3]);
# %2 = add(%0, %1);
# %3 = split(%2, indices_or_sections=1);
# %4 = %3.0;
# add(%4, 1f)
# }
# if we want to extract `%1 = nn.conv2d`
from tvm import relay
relay.analysis.extract_intermdeiate_expr(mod, 1)
"""
return _ffi_api.ExtractIntermediateExpr(mod, expr_id)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/annotated_regions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, unused-import
"""Regions used in Relay."""
from ...runtime import Object
from . import _ffi_api
class AnnotatedRegionSet(Object):
"""Class to represent a relay expression split into regions."""
def __init__(self, expr, region_begin_op, region_end_op):
"""Construct regions from an expression.
Parameters
----------
expr : tvm.relay.Expr
The expression from which to construct the regions.
region_begin_op : tvm.ir.Op
The region begin annotation.
region_end_op : tvm.ir.Op
The region end annotation.
"""
self.__init_handle_by_constructor__(
_ffi_api.AnnotatedRegionSet, expr, region_begin_op, region_end_op
)
def __len__(self):
return len(self.regions)
def get_region(self, expr):
"""Get the region an expression belongs to.
Parameters
----------
expr : tvm.relay.Expr
The expression.
Returns
-------
region
The region containing the expression.
None if not found.
"""
return _ffi_api.GetRegion(self, expr)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/call_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, unused-import
"""Call graph used in Relay."""
from ...ir import IRModule
from ...runtime import Object
from ..expr import GlobalVar
from . import _ffi_api
class CallGraph(Object):
"""Class to represent a call graph."""
def __init__(self, module):
"""Construct a call graph.
Parameters
----------
module : tvm.ir.IRModule
The IR module used to create a call graph
Returns
-------
call_graph: CallGraph
A constructed call graph.
"""
self.__init_handle_by_constructor__(_ffi_api.CallGraph, module)
@property
def module(self):
"""Return the contained Relay IR module.
Parameters
----------
None
Returns
-------
ret : tvm.ir.IRModule
The contained IRModule
"""
return _ffi_api.GetModule(self)
def ref_count(self, var):
"""Return the number of references to the global var
Parameters
----------
var : Union[String, tvm.relay.GlobalVar]
Returns
-------
ret : int
The number reference to the global var
"""
var = self._get_global_var(var)
return _ffi_api.GetRefCountGlobalVar(self, var)
def global_call_count(self, var):
"""Return the number of global function calls from a given global var.
Parameters
----------
var : Union[String, tvm.relay.GlobalVar]
Returns
-------
ret : int
The number of global function calls from the given var.
"""
var = self._get_global_var(var)
return _ffi_api.GetGlobalVarCallCount(self, var)
def is_recursive(self, var):
"""Return if the function corresponding to a var is a recursive
function.
Parameters
----------
var : Union[String, tvm.relay.GlobalVar]
Returns
-------
ret : Boolean
If the function corresponding to var is recurisve.
"""
var = self._get_global_var(var)
return _ffi_api.IsRecursive(self, var)
def _get_global_var(self, var):
"""Return the global var using a given name or GlobalVar.
Parameters
----------
var : Union[String, tvm.relay.GlobalVar]
Returns
-------
ret : tvm.relay.GlobalVar
The global var.
"""
if isinstance(var, str):
mod = self.module
var = mod.get_global_var(var)
if isinstance(var, GlobalVar):
return var
else:
raise TypeError("var should be either a string or GlobalVar")
def print_var(self, var):
"""Print a call graph of a global function by name or by variable.
Parameters
----------
var: Union[String, tvm.relay.GlobalVar]
The name or global variable.
Returns
-------
ret : String
The call graph represented in string.
"""
var = self._get_global_var(var)
return _ffi_api.PrintCallGraphGlobalVar(self, var)
def __str__(self):
"""Print the call graph in the topological order."""
return _ffi_api.PrintCallGraph(self)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/count_layers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities that enable counting the number of layers in a graph."""
import tvm
from tvm import relay
from ..expr_functor import ExprVisitor
class LayerCounter(ExprVisitor):
"""A visitor pass that computes the deepest chain of specified ops in graph."""
def __init__(self, valid_ops):
self.depth_count = 0
self.deepest_count = 0
self.valid_ops = [relay.op.get(op) for op in valid_ops]
super().__init__()
def visit_call(self, call):
if call.op in self.valid_ops:
self.depth_count += 1
current_count = self.depth_count
self.deepest_count = max(self.deepest_count, current_count)
for arg in call.args:
self.visit(arg)
self.depth_count = current_count
def count(self):
return self.deepest_count
def count_layers(expr, valid_ops):
"""Determine the number of layers of specified ops in a graph.
This pass computes only the deepest chain of ops rather than the
total number of ops in a graph. Thus, if there are two parallel
convolutions (for example), they would be considered a single layer.
Parameters
----------
expr : tvm.relay.Expr, tvm.relay.Function, or tvm.ir.IRModule.
The input expression.
valid_ops: List[str]
A list of the operations that should be included in the count.
Returns
-------
layer_count : int
The number of layers of the specified operations found in the graph.
"""
if isinstance(expr, tvm.ir.IRModule):
expr = expr["main"]
count_pass = LayerCounter(valid_ops)
count_pass.visit(expr)
return count_pass.count()
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
"""The type nodes of the Relay language."""
from enum import IntEnum
class Feature(IntEnum):
"""The features a program might contain."""
fVar = 0
fGlobalVar = 1
fConstant = 2
fTuple = 3
fTupleGetItem = 4
fFunction = 5
fOp = 6
fCall = 7
fLet = 8
fIf = 9
fRefCreate = 10
fRefRead = 11
fRefWrite = 12
fConstructor = 13
fMatch = 14
""" Whether any non-atom fragment of the program is shared, making the program a graph. """
fGraph = 15
""" Whether there is local fixpoint in the program. """
fLetRec = 16
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/sparse_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains helper functions for convert dense model
to block sparse model
"""
from collections import namedtuple
import numpy as np
import scipy.sparse as sp
import tvm
from . import _ffi_api
SparseAnalysisResult = namedtuple(
"SparseAnalysisResult",
[
"weight_name",
"weight_shape",
],
)
def _search_conv2d_op_weight(expr):
"""Search name of weight in all ```nn.conv2d``` operator
This is a helpful function to determine which param need
to be converted to sparse
Parameters
----------
expr : relay.Expr
Expr will be searched
Returns
-------
ret : Array[String]
name of weight in all ``nn.conv2d``` operator
"""
return _ffi_api.search_conv2d_op_weight(expr)
def process_params(
expr, params, block_size, sparsity_threshold, layout, kernel_size, reg_task_input=True
):
"""Process parameters of conv2d from dense to sparse.
Parameters
----------
expr : Relay.Expr
Expr of the network
params : Dict[String, tvm.nd.array]
parameters of the network
block_size : Tuple(int, int)
Blocksize in BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting to sparse operation
layout : str
layout of network
Returns
-------
ret : Namedtuple[weight_name: Array[String], weight_shape: Array[Array[IntImm]]]
return names of qualified conv2d weight and the shape in BSR format
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.search_task import (
register_task_input_buffer,
) # lazily import to avoid recursive dependency
memo = SparseAnalysisResult(weight_name=[], weight_shape=[])
weight_names = _search_conv2d_op_weight(expr)
for name in weight_names:
name = str(name)
w_np = params[name].numpy()
if layout == "NHWC": # HWIO
weight_kernel = (w_np.shape[0], w_np.shape[1])
elif layout == "NCHW": # OIHW
weight_kernel = (w_np.shape[2], w_np.shape[3])
if weight_kernel[0] != weight_kernel[1]:
continue
if weight_kernel[0] == kernel_size == 1:
sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size)
if sparsity < sparsity_threshold:
continue
if layout == "NHWC":
w_np = w_np.squeeze().T
elif layout == "NCHW":
w_np = w_np.squeeze()
sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size)
# when bs_c=1, remove this dim
if block_size[1] == 1:
sparse_weight_data = sparse_weight.data.reshape(
sparse_weight.data.shape[0], block_size[0]
)
else:
sparse_weight_data = sparse_weight.data
elif weight_kernel[0] == kernel_size == 3:
if layout == "NHWC": # HWIO
w_np = w_np.reshape((-1, w_np.shape[-1])).T
elif layout == "NCHW": # OIHW
w_np = w_np.reshape((w_np.shape[0], -1))
sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size)
if 1 - (sparse_weight.nnz / w_np.size) < sparsity_threshold:
continue
sparse_weight_data = sparse_weight.data
else:
continue
# remove dense weight
del params[name]
memo.weight_name.append(name)
memo.weight_shape.append(
list(sparse_weight_data.shape)
+ list(sparse_weight.indices.shape)
+ list(sparse_weight.indptr.shape)
)
params[name + ".data"] = tvm.nd.array(sparse_weight_data)
params[name + ".indices"] = tvm.nd.array(sparse_weight.indices)
params[name + ".indptr"] = tvm.nd.array(sparse_weight.indptr)
if reg_task_input:
prefix = "sparse_conv2d_bsr_%d_%d_%d_%d_%d_%d_" % (
w_np.shape[0],
w_np.shape[1],
block_size[0],
block_size[1],
sparse_weight.indices.shape[0],
sparse_weight.indptr.shape[0],
)
register_task_input_buffer(
"default",
prefix + "W_data",
tvm.runtime.ndarray.array(sparse_weight_data),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indices",
tvm.runtime.ndarray.array(sparse_weight.indices),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indptr",
tvm.runtime.ndarray.array(sparse_weight.indptr),
overwrite=True,
)
ret = SparseAnalysisResult(
weight_name=tvm.runtime.convert(memo.weight_name),
weight_shape=tvm.runtime.convert(memo.weight_shape),
)
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/analysis/sparse_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
# pylint: disable=unidiomatic-typecheck
"""
This file contains helper functions for convert dense model
to block sparse model
"""
from collections import namedtuple
import numpy as np
import scipy.sparse as sp
import tvm
from . import _ffi_api
SparseAnalysisResult = namedtuple(
"SparseAnalysisResult",
[
"weight_name",
"weight_shape",
],
)
def _search_dense_op_weight(expr):
"""Search name of weight in all ```nn.dense``` operator
This is a helpful function to determine which param need
to be converted to sparse
Parameters
----------
expr : relay.Expr
Expr will be searched
Returns
-------
ret : Array[String]
name of weight in all ``nn.dense``` operator
"""
return _ffi_api.search_dense_op_weight(expr)
def process_params(expr, params, block_size, sparsity_threshold):
"""[summary]
Parameters
----------
expr : Relay.Expr
Expr of the network
params : Dict[String, tvm.nd.array]
parameters of the network
block_size : Tuple(int, int)
Blocksize in BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting to sparse operation
Returns
-------
ret : Namedtuple[weight_name: Array[String], weight_shape: Array[Array[IntImm]]]
return names of qualified dense weight and the shape in BSR format
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler.search_task import (
register_task_input_buffer,
) # lazily import to avoid recursive dependency
memo = SparseAnalysisResult(weight_name=[], weight_shape=[])
weight_names = _search_dense_op_weight(expr)
for name in weight_names:
name = str(name)
w_np = params[name].numpy()
sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size)
if sparsity >= sparsity_threshold:
sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size)
# remove dense weight
del params[name]
memo.weight_name.append(name)
memo.weight_shape.append(
list(sparse_weight.data.shape)
+ list(sparse_weight.indices.shape)
+ list(sparse_weight.indptr.shape)
)
params[name + ".data"] = tvm.nd.array(sparse_weight.data)
params[name + ".indices"] = tvm.nd.array(sparse_weight.indices)
params[name + ".indptr"] = tvm.nd.array(sparse_weight.indptr)
prefix = "sparse_dense_bsr_%d_%d_%d_%d_%d_%d_" % (
w_np.shape[0],
w_np.shape[1],
block_size[0],
block_size[1],
sparse_weight.indices.shape[0],
sparse_weight.indptr.shape[0],
)
register_task_input_buffer(
"default",
prefix + "W_data",
tvm.runtime.ndarray.array(sparse_weight.data),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indices",
tvm.runtime.ndarray.array(sparse_weight.indices),
overwrite=True,
)
register_task_input_buffer(
"default",
prefix + "W_indptr",
tvm.runtime.ndarray.array(sparse_weight.indptr),
overwrite=True,
)
ret = SparseAnalysisResult(
weight_name=tvm.runtime.convert(memo.weight_name),
weight_shape=tvm.runtime.convert(memo.weight_shape),
)
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend codegen modules for relay."""
from . import te_compiler
from .executor import Executor
from .runtime import Runtime
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The AOT FFI namespace.
"""
import tvm._ffi
tvm._ffi._init_api("relay.backend.aot", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/_backend.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The interface of expr function exposed from C++."""
import tvm._ffi
import tvm.driver
@tvm._ffi.register_func("relay.backend.build")
def build(mod, target, target_host=None):
"""Backend build function.
Parameters
----------
mod : tvm.IRModule or Dict[str, tvm.IRModule]
Input module
target : tvm.Target
The target to run the code on.
target_host : tvm.Target
The host target.
Returns
-------
module : tvm.Module
The runtime module.
"""
target_host = None if target_host == "" else target_host
return tvm.driver.build(mod, target=target, target_host=target_host)
@tvm._ffi.register_func("relay._tensor_value_repr")
def _tensor_value_repr(tvalue):
return str(tvalue.data.numpy())
@tvm._ffi.register_func("relay._constant_repr")
def _tensor_constant_repr(tvalue):
dtype = tvm.runtime.DataType(tvalue.data.dtype)
if tvm.target.datatype.get_type_registered(dtype.type_code):
return "custom tensor of type " + dtype.type_code
return str(tvalue.data.numpy())
tvm._ffi._init_api("relay.backend", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/_vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relay virtual machine FFI namespace.
"""
import tvm._ffi
tvm._ffi._init_api("relay._vm", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""AOT passes"""
from typing import Dict
from tvm import IRModule
from tvm.relay.backend import Executor
from tvm.ir.transform import Pass
from .utils import CallType
from . import _aot
def AOTLowerMain(mod_name: str, config: object, call_type: CallType) -> Pass:
"""Lower a Relay main function into an AOT TIR main function.
Parameters
----------
mod_name: str
The name of the module.
config : CompilationConfig
The compilation configuration.
call_type : CallType
The calling convention to use.
Returns
-------
Pass
The AOTLowerMain pass.
"""
return _aot.AOTLowerMain(mod_name, config, call_type.value)
def CreateFunctionMetadata(
mod: IRModule, workspace_byte_alignment: int, constant_byte_alignment: int
) -> Dict[str, object]:
"""Create the function metadata (FunctionInfos) from an AOT module.
Parameters
----------
mod : IRModule
The IRModule.
workspace_byte_alignment : int
The alignment of the workspace buffer in bytes.
constant_byte_alignment : int
The alignment of the constant buffer in bytes.
Returns
-------
Dict[str, FunctionInfo]
A map between function names and FunctionInfos.
"""
return _aot.CreateFunctionMetadata(mod, workspace_byte_alignment, constant_byte_alignment)
def CreateExecutorMetadata(
mod: IRModule,
mod_name: str,
executor: Executor,
workspace_byte_alignment: int,
constant_byte_alignment: int,
) -> object:
"""Create the executor metadata from an AOT module.
Parameters
----------
mod : IRModule
The IRModule.
mod_name : str
The name of the module.
executor : Executor
The executor configuration.
workspace_byte_alignment : int
The alignment of the workspace buffer in bytes.
constant_byte_alignment : int
The alignment of the constant buffer in bytes.
Returns
-------
ExecutorCodegenMetadata
The executor metadata.
"""
return _aot.CreateExecutorMetadata(
mod, mod_name, executor, workspace_byte_alignment, constant_byte_alignment
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External backend codegen modules for Relay."""
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-U NPU codegen modules for Relay."""
from . import util
from . import legalize
from . import preprocess
from . import codegen
from . import vela_api
from . import tir_to_cs_translator
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for relay transformation passes."""
import tvm._ffi # type: ignore
tvm._ffi._init_api("relay.ext.ethos-u", __name__)
tvm._ffi._init_api("tir.contrib.ethos-u", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Codegen for Arm(R) Ethos(TM)-U NPU"""
from collections import defaultdict
from typing import List, Callable
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import LowerToTIR
from tvm.relay.backend.contrib.ethosu.tir.scheduler import copy_constants
from tvm.contrib.ethosu.cascader import (
cascade,
EthosuDeviceConfig,
CascaderOptions,
MemoryRegion,
extract_memory_info,
)
from tvm.relay.backend.contrib.ethosu.legalize import LegalizeEthosU
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator, util
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
# pylint: disable=unused-import
from tvm.relay.backend.contrib.ethosu.op import op_attrs
from tvm.relay.backend.contrib.ethosu import op
from . import _ffi_api
class OptimizeLUTs(ExprMutator):
"""A pass to merge an identity operator with a LUT based activation function with
a preceding operator provided that operator can do a table lookup for the activation
in the hardware"""
def __init__(self):
super().__init__()
self.lut_ops = {
"contrib.ethosu.conv2d": op.ethosu_conv2d,
"contrib.ethosu.depthwise_conv2d": op.ethosu_depthwise_conv2d,
"contrib.ethosu.pooling": op.ethosu_pooling,
}
def create_op_with_lut(self, call):
"""Extract the parameters and attributes from the NPU operator and create
a new operator with LUT.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The new operator with LUT.
"""
identity = call
ethosu_op = call.args[0]
lut = identity.args[1]
activation = identity.attrs.activation
new_attrs = dict(ethosu_op.attrs)
new_attrs["activation"] = activation
# Assume that LUT is always the last argument
new_args = ethosu_op.args[:-1] + [lut]
assert ethosu_op.op.name in self.lut_ops.keys()
return self.lut_ops[ethosu_op.op.name](*new_args, **new_attrs)
def visit_call(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Recursively visit call nodes in the input graph and if an ethosu.identity
operator with LUT is found and the preceding operator has a LUT attribute, create
a new NPU operator.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The input call node in the case the current call node does
not refer to an Op. Else, a new call node with a new operator.
"""
new_call = call
lut_activations = ["TANH", "LUT", "SIGMOID"]
if isinstance(call.op, tvm.ir.Op) and isinstance(call.args[0], tvm.relay.expr.Call):
producer_op = call.args[0]
# Check if the producer can do a LUT operation
if (
producer_op.op.name in self.lut_ops.keys()
and call.op.name == "contrib.ethosu.identity"
and call.attrs.activation in lut_activations
):
# Check the producer doesn't already have a LUT
has_lut = producer_op.attrs.activation in lut_activations
if not has_lut:
new_call = self.create_op_with_lut(call)
new_call = super().visit_call(new_call)
return new_call
@util.create_npu_function_pass(opt_level=1)
class LUTsOptimizer:
"""Register LUTsOptimizer as a relay pass."""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""Visit relay nodes in the given NPU function.
Parameters
----------
func : tvm.relay.function.Function
The function to apply the optimization pass for multiple LUTs to.
Returns
-------
mod : tvm.IRModule
New module with optimized LUTs.
"""
return OptimizeLUTs().visit(func)
def __call__(self, *args, **kwargs):
pass
class AnalyzeConsumers(ExprVisitor):
"""Traverses the graph to determine consumers that are NPU operations. The
result is maintained in `npu_consumers`.
Attributes
----------
npu_consumers : Dict[tvm.relay.expr.Call, List[bool]]
Mapping from NPU operation to list of boolean values that represent
whether or not each consumer is an NPU operation.
optimize_ops : Dict[str, Callable]
A map from NPU operation name to function that creates NPU operation.
"""
def __init__(self, optimize_ops):
self.npu_consumers = defaultdict(list)
self.optimize_ops = optimize_ops
super().__init__()
def visit_call(self, call: relay.Call):
is_npu_consumer = call.op.name in self.optimize_ops
args = []
# Expand tuples
for arg in call.args:
if isinstance(arg, relay.Tuple):
args.extend(arg.fields)
else:
args.append(arg)
for arg in args:
if isinstance(arg, relay.Call) and arg.op.name in self.optimize_ops:
self.npu_consumers[arg].append(is_npu_consumer)
super().visit_call(call)
class LayoutOptimization(ExprMutator):
"""A pass to optimize the layout of NPU operations by converting to brick format (NHCWB16).
This pass traverses the graph and attempts to alter the input/output layouts when an NPU
operation is visited. Whether or not the input/output layout can be altered for a given NPU
operation depends on the following:
Check alter input layout: For each argument, if the producer is also an NPU operation and
its output is altered to brick format, then the input layout with respect to the current
argument is altered to brick format.
Check alter output layout: If all consumers (child nodes) are an NPU operation, then the
output layout is altered to brick format.
Note
----
In order for this pass to be run, the consumers of each NPU operation must first be analyzed
by the `AnalyzeConsumers` pass, since Relay doesn't keep a reference to child nodes.
Attributes
----------
npu_consumers : Dict[tvm.relay.expr.Call, bool]
A map from current call to a list boolean values that state whether or not each consumer
is an NPU operation.
optimize_ops : Dict[str, Callable]
A map from NPU operation name to function that creates NPU operation.
"""
def __init__(self, npu_consumers, optimize_ops):
self.npu_consumers = npu_consumers
self.optimize_ops = optimize_ops
super().__init__()
def alter_ethosu_op_layout(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Alter the layouts of given NPU operation to brick format if possible.
Parameters
----------
call : tvm.relay.expr.Call
The call pointing to an NPU operation that will be checked if
the layout needs altering.
Returns
-------
new_call : tvm.relay.expr.Call
New call with altered layouts.
"""
assert isinstance(call.attrs, tvm.ir.Attrs), (
f"The attributes for operator '{call.op.name}' could not be "
"found. Did you register the relay.attrs.Ethosu<opname>Attrs "
"object in python api?"
)
new_attrs = dict(call.attrs)
# Check if we can rewrite the input layouts
input_count = 0
for arg in call.args:
input_count += 1
if arg not in self.npu_consumers:
continue
consumers = self.npu_consumers[arg]
parent_has_brick_output = consumers and all(consumers)
if parent_has_brick_output:
layout_string = "ifm_layout" if input_count <= 1 else f"ifm{input_count}_layout"
new_attrs[layout_string] = "NHCWB16"
# Check if we can rewrite the output layouts
consumers = self.npu_consumers[call]
if consumers and all(consumers):
new_attrs["ofm_layout"] = "NHCWB16"
name = call.op.name
return self.optimize_ops[name](*call.args, **new_attrs)
def visit_call(self, call: tvm.relay.expr.Call) -> tvm.relay.expr.Call:
"""Recursively visit call nodes in the input graph and alter the
layout of an op if needed.
Parameters
----------
call : tvm.relay.expr.Call
The current call node being visited.
Returns
-------
tvm.relay.expr.Call
The input call node in the case the current call node does
not refer to an Op. Else, a new call node with altered Op
attributes.
"""
if isinstance(call.op, tvm.ir.Op) and call.op.name in self.optimize_ops:
call = self.alter_ethosu_op_layout(call)
return super().visit_call(call)
@util.create_npu_function_pass(opt_level=1)
class LayoutOptimizer:
"""Register LayoutOptimizer as a Relay pass."""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""A pass to optimize the layout of NPU operations. If both the
producer and consumer of a tensor are NPU operators, then the
layout is converted from NHWC to NHCWB16 as this is the layout NPU
uses internally."""
optimize_ops = {
"contrib.ethosu.conv2d": op.ethosu_conv2d,
"contrib.ethosu.depthwise_conv2d": op.ethosu_depthwise_conv2d,
"contrib.ethosu.pooling": op.ethosu_pooling,
"contrib.ethosu.binary_elementwise": op.ethosu_binary_elementwise,
"contrib.ethosu.unary_elementwise": op.ethosu_unary_elementwise,
}
analyze = AnalyzeConsumers(optimize_ops)
analyze.visit(func)
return LayoutOptimization(analyze.npu_consumers, optimize_ops).visit(func)
def __call__(self, *args, **kwargs):
pass
def IdentityOptimizer(): # pylint: disable=invalid-name
"""Pass that removes redundant identities
Return
------
Pass
The module pass.
"""
return _ffi_api.IdentityOptimizer()
def OutlineCompilerFunctions(compiler_name): # pylint: disable=invalid-name
"""Pass that outlines functions given a named Compiler attribute.
Parameters
----------
compiler_name
The name of the compiler to look for and outline.
Return
------
Pass
The module pass.
"""
return _ffi_api.OutlineCompilerFunctions(compiler_name)
@tvm._ffi.register_func("relay.ext.ethos-u.constant_updater")
def constant_updater(expr, symbol): # pylint: disable=unused-argument
"""
The constant updater process happen after lowering in the core compiler.
For the NPU, we dont want the build process to extract constants to be loaded in
the runtime as we are embedding them inside the C runtime.Module.
"""
return dict()
def _create_cascader(
options: CascaderOptions,
io_region: MemoryRegion,
constant_region: MemoryRegion,
working_regions: List[MemoryRegion],
device_config: EthosuDeviceConfig,
) -> Callable:
def _cascader(te_graph, const_dict, sch):
cascade(
sch,
te_graph,
const_dict,
options,
io_region,
constant_region,
working_regions,
device_config,
)
return _cascader
def _ethos_u55_cascader(sram, enable_striping) -> Callable:
# TODO(ekalda): Extract the flash info from ConstantPools once it is implemented
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig(util.get_accelerator_config())
cascader_options = CascaderOptions(
cascade_region=sram,
max_proposals=64,
stripe_factors=5,
max_plan_size=10,
always_copy_size=1024,
max_open_plans=8,
max_closed_plans=32,
enable_striping=enable_striping,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _calculate_memory_pressure(mod: tvm.ir.IRModule) -> int:
"""
Calculates a worst-case estimate of the memory consumed at the callsite of
each microNPU function. This value can be used as a hint to guide the cascader,
indicating how aggressively it will need to optimize the input module to fit
into the memory that remains in the memory workspace.
Parameters
----------
mod : tvm.ir.IRModule
The input module
Returns
-------
int
Memory pressure value for the module.
"""
memory_pressure = 0
@util.create_npu_function_pass(opt_level=1)
class CalculateMemoryPressure:
"""
Traverse the module and get total memory used by external NPU functions.
"""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
nonlocal memory_pressure
max_val = max(func.attrs["used_memory"])
memory_pressure += max_val
return func
CalculateMemoryPressure()(mod) # pylint: disable=not-callable
io_used_memory = 0
if not tvm.tir.usmp.utils.use_workspace_io_is_enabled():
io_used_memory = int(mod["main"].attrs["io_used_memory"])
return memory_pressure - io_used_memory
@tvm._ffi.register_func("relay.ext.ethos-u.relay_to_tir")
def relay_to_tir(mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
"""
This is the hook for python-based lowering of a Relay module which lowers NPU
external functions to TIR.
Parameters
----------
mod : tvm.ir.IRModule
This is the Relay module.
Returns
-------
mod : tvm.ir.IRModule
The Relay module with scheduled NPU external functions.
"""
mod = OutlineCompilerFunctions("ethos-u")(mod)
mod = LegalizeEthosU()(mod)
mod = LUTsOptimizer()(mod)
mod = relay.transform.InferType()(mod)
mod = IdentityOptimizer()(mod)
mod = LayoutOptimizer()(mod)
mod = relay.transform.InferType()(mod)
device_contexts = {
gv: "ethos-u" for gv, _ in filter(lambda x: util.is_npu_func(x[1]), mod.functions.items())
}
mod = mod.with_attr("device_contexts", device_contexts)
# Use the cascader if it is enabled for the U55 accelerator, otherwise use copy_constants
# scheduler
if util.is_cascader_enabled():
if util.get_accelerator_config() == "ethos-u65-256":
raise ValueError("Cascading is not supported for the U65 accelerator")
workspace_memory_pools = mod.attrs["workspace_memory_pools"]
if not workspace_memory_pools:
raise ValueError("Workspace memory pool needs to be provided for the U55 cascader")
if len(workspace_memory_pools.pools) != 1:
raise ValueError("Exactly one workspace pool needs to be provided for the U55 cascader")
memory_pressure = _calculate_memory_pressure(mod)
sram = extract_memory_info(workspace_memory_pools.pools[0], memory_pressure)
tir_mod = LowerToTIR(_ethos_u55_cascader(sram, util.is_striping_enabled()))(mod)
else:
tir_mod = LowerToTIR(copy_constants())(mod)
return tir_mod
@tvm._ffi.register_func("relay.ext.ethos-u.primfunc_to_artifact")
def primfunc_to_artifact(primfunc: tvm.tir.PrimFunc) -> util.CompilationArtifact:
"""
This is the hook for python-based lowering of TIR PrimFunc
that has undergone unified optimization to compilation
artifact destined for the microNPU.
Parameters
----------
primfunc : tir.PrimFunc
TIR PrimFunc that has undergone unified optimizations
Returns
-------
CompilationArtifact
This is a structure that holds the binary artifacts
for the microNPU
"""
symbol = str(primfunc.attrs["global_symbol"])
const_dict = primfunc.attrs["ethos-u.constants"]
tir_mod = tvm.IRModule()
tir_mod[symbol] = primfunc
const_dict_np = dict()
for buffer_var in const_dict.keys():
const_dict_np[buffer_var] = const_dict[buffer_var].numpy()
cmms, encoded_constants, base_addresses = tir_to_cs_translator.translate(tir_mod, const_dict_np)
return util.CompilationArtifact(symbol, cmms, encoded_constants, base_addresses)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel, no-value-for-parameter
"""A set of passes to legalize some of operations for the NPU"""
from typing import List, Type, Callable
import math
import numpy as np # type: ignore
from ethosu.vela import scaling, fp_math
import tvm # type: ignore
from tvm import relay
from tvm.relay.dataflow_pattern import DFPatternCallback # type: ignore
from tvm.relay.dataflow_pattern import wildcard
from tvm.relay.dataflow_pattern import is_op
from tvm.relay.dataflow_pattern import rewrite
from tvm.relay.dataflow_pattern import CallPattern
from tvm.relay.backend.contrib.ethosu import op as ethosu_ops # type: ignore
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.op.contrib import ethosu as ethosu_patterns # type: ignore
class SplitRewriter(DFPatternCallback):
"""This rewriting converts split operations into a sequence of
strided_slice operations, because codegen is going to be based
on strided_slices that will define the slice of the tensor that
will be fed to the consumer.
"""
def __init__(self):
super().__init__(require_type=True)
self.split_in = wildcard()
self.pattern = is_op("split")(self.split_in)
@staticmethod
def get_section_begin_coords(split: tvm.relay.Expr) -> List[int]:
"""Currently, the split operator takes an array of indices or an integer
indicating the number of splits. However, its an array of indices could
represent both cases, therefore this function just make it an array of
indices where each index represent the co-ordinate of beginning of each
section -- defines as section begins.
Parameters
----------
split : tvm.relay.Expr
The Relay Call expression for a split operator
Returns
-------
section_begins : List[int]
A list containing integers corresponding to section
begins
"""
indices_or_sections = split.attrs.indices_or_sections
input_shape = split.args[0].checked_type.shape
split_axis = split.attrs.axis
if isinstance(indices_or_sections, tvm.ir.container.Array):
# 0 is the beginning of the first section.
return [0] + list(indices_or_sections)
split_axis_len = input_shape[split_axis].value
section_length = split_axis_len // indices_or_sections.value
return list(range(0, split_axis_len, section_length))
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_begins = list()
split_ends = list()
section_begins_in_split_axis = self.get_section_begin_coords(post)
for split_cord in section_begins_in_split_axis:
# first begin is [0, 0, ... , 0]
begin_shape = [0 for i in range(len(split_input.checked_type.shape))]
begin_shape[post.attrs.axis] = split_cord
split_begins.append(begin_shape)
end_shape = list(split_input.checked_type.shape)
# Only the split axis coordinate changes
end_shape[post.attrs.axis] = split_cord
split_ends.append(end_shape)
# Coordinates needs to be shifted left because beginning
# of the next section is the end of the previous
split_ends = split_ends[1:]
# Last section end is the shape of the tensor itself.
split_ends.append(list(split_input.checked_type.shape))
strided_slices = list()
for sb, se in zip(split_begins, split_ends):
strided_slices.append(relay.strided_slice(split_input, sb, se))
return relay.Tuple(strided_slices)
class PartitionedSplitRewriter(DFPatternCallback):
"""This pass brings the split out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SplitParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_params = ethosu_patterns.SplitParams(post.op.body)
indices_or_sections = split_params.indices_or_sections
axis = split_params.axis
return relay.op.split(split_input, indices_or_sections, axis=axis).astuple()
def get_lut_from_func(
ifm_scale: float,
ifm_zp: int,
ofm_scale: float,
ofm_zp: int,
func: Callable[[float], float],
) -> List[int]:
"""Calculates the values of the lookup table based on the calculation function"""
lut_values = list()
# Only int8 is currently supported
dtype = np.int8
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
for x in range(qmin, qmax + 1):
x_real = ifm_scale * (x - ifm_zp)
out_real = func(x_real)
lut_result = int(util.round_away_zero(ofm_zp + out_real / ofm_scale))
lut_result = min(qmax, max(qmin, lut_result))
lut_values.append(lut_result)
return lut_values
class LutActivationRewriter(DFPatternCallback):
"""A class to create an identity operator with the LUT"""
def __init__(
self,
params_class: Type,
activation_type: str,
calc_func: Callable[[float], float],
):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = params_class
self.pattern = (wildcard().has_attr({"Composite": params_class.composite_name}))(wildcard())
self.activation_type = activation_type
self.calc_func = calc_func
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
input_scale = float(params.ifm.q_params.scale_f32)
input_zp = int(params.ifm.q_params.zero_point)
output_scale = float(params.ofm.q_params.scale_f32)
output_zp = int(params.ofm.q_params.zero_point)
lut_values = get_lut_from_func(
input_scale,
input_zp,
output_scale,
output_zp,
self.calc_func,
)
lut = relay.const(lut_values, dtype=params.ifm.dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation=self.activation_type,
)
return identity
class TanhRewriter(LutActivationRewriter):
"""This pass adds tanh as a LUT to the identity operator"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.TanhParams, activation_type="TANH", calc_func=math.tanh
)
def sigmoid_calc_func(x: float) -> float:
"""Function to calculate the values for sigmoid"""
# These limits are inherited from TFLite
upper_limit = 8.0
lower_limit = -8.0
if x <= lower_limit:
y = 0.0
elif x >= upper_limit:
y = 1.0
else:
y = 1 / (1 + math.exp(-x))
return y
class SigmoidRewriter(LutActivationRewriter):
"""This pass adds sigmoid as a LUT for identity op"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SigmoidParams,
activation_type="SIGMOID",
calc_func=sigmoid_calc_func,
)
def leaky_relu_calc_func(x: float, alpha: float) -> float:
"""Function to calculate the values for leaky relu."""
return x if x >= 0 else x * alpha
class LeakyReLURewriter(DFPatternCallback):
"""This pass adds leaky relu as a LUT for identity op."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = ethosu_patterns.LeakyReLUParams
self.pattern = wildcard().has_attr({"Composite": self.params_class.composite_name})(
wildcard()
)
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
input_scale = np.double(float(params.ifm.q_params.scale_f32))
input_zp = int(params.ifm.q_params.zero_point)
output_scale = np.double(float(params.ofm.q_params.scale_f32))
output_zp = int(params.ofm.q_params.zero_point)
alpha = params.alpha
# The calculation of the LUT values is similar to that in Vela
# convert_lrelu_to_lut(op, arch)
# (https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.2.0/ethosu/vela/tflite_graph_optimiser.py#864) # pylint: disable=line-too-long
alpha_scalar = 1
alpha_scale, alpha_shift = scaling.elementwise_mul_scale(input_scale, alpha, output_scale)
identity_scale, identity_shift = scaling.elementwise_mul_scale(input_scale, 1, output_scale)
dtype = params.ifm.dtype
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
def calculate_lut_value(i):
zp_shift = (
fp_math.multiply_by_quantized_multiplier(
alpha_scalar * (i - input_zp), alpha_scale, alpha_shift
)
if i < input_zp
else fp_math.multiply_by_quantized_multiplier(
i - input_zp, identity_scale, identity_shift
)
)
return min(qmax, max(qmin, output_zp + zp_shift))
values = list(map(calculate_lut_value, range(qmin, qmax + 1)))
lut = relay.const(values, dtype=dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation="LUT",
)
return identity
class HardSwishRewriter(DFPatternCallback):
"""Convert ethosu.hard_swish composite function to add operation with LUT."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.params_class = ethosu_patterns.HardSwishParams
self.pattern = wildcard().has_attr({"Composite": self.params_class.composite_name})(
wildcard()
)
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
# The calculation of the LUT values is similar to that in Vela
# convert_hardswish_to_lut(op, arch, nng)
# (https://review.mlplatform.org/plugins/gitiles/ml/ethos-u/ethos-u-vela/+/refs/tags/3.2.0/ethosu/vela/tflite_graph_optimiser.py#719) # pylint: disable=line-too-long
input_scale = np.double(params.ifm.q_params.scale_f32)
input_zp = int(params.ifm.q_params.zero_point)
hires_input_scale = (1 / 128) * input_scale
output_scale = np.double(params.ofm.q_params.scale_f32)
output_zp = int(params.ofm.q_params.zero_point)
output_scale, output_shift = scaling.quantise_scale(hires_input_scale / output_scale)
output_scale_16 = fp_math.downscale_multiplier_int32_to_int16(output_scale)
output_shift = 31 - output_shift
output_shift = -output_shift if output_shift < 0 else 0
dtype = params.ifm.dtype
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
def calculate_relu_multiplier(inp, input_scale):
rmultiplier = np.double(3 / 32768)
rscale, rshift = scaling.quantise_scale(input_scale / rmultiplier)
rscale_16 = fp_math.downscale_multiplier_int32_to_int16(rscale)
rvalue = np.int16(inp)
if rshift < 31:
rvalue = fp_math.shift_left16(rvalue, 30 - rshift)
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = fp_math.shift_left16(rvalue, 1)
elif rshift > 31:
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = fp_math.rounding_divide_by_pot(rvalue, rshift - 31)
else:
rvalue = fp_math.saturating_rounding_mul16(rvalue, rscale_16)
rvalue = (rvalue + (1 << 15)) >> 1
return rvalue
def calculate_lut_values(i):
hires_input_value = (i - input_zp) * 128
preshift_input_value = fp_math.saturating_rounding_mul16(
hires_input_value, output_scale_16
)
relu_value = calculate_relu_multiplier(hires_input_value, hires_input_scale)
lut_result = fp_math.saturating_mul16(relu_value, preshift_input_value)
lut_result = fp_math.rounding_divide_by_pot(lut_result, output_shift) + output_zp
return min(qmax, max(qmin, lut_result))
values = list(map(calculate_lut_values, range(-128, 128)))
lut = relay.const(values, dtype=dtype)
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation="LUT",
)
return identity
class Conv2DRewriter(DFPatternCallback):
"""Convert conv2d related composite functions into ethosu_conv2d operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (wildcard().has_attr({"Composite": "ethos-u.qnn_conv2d"}))(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
kernel_size_map = {
"HWIO": params.weights.shape[0:2],
"OHWI": params.weights.shape[1:3],
"HWOI": params.weights.shape[0:2],
}
activation_map = {"clip": "CLIP"}
weight_to_ohwi_transform_map = {"HWIO": [3, 0, 1, 2]}
weights_values = params.weights.values
weights_values_ohwi = np.transpose(
weights_values, weight_to_ohwi_transform_map[str(params.weights.layout)]
)
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_conv2d = ethosu_ops.ethosu_conv2d(
ifm=post.args[0],
weight=relay.const(weights_values_ohwi, params.weights.values.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=kernel_size_map[str(params.weights.layout)],
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
return ethosu_conv2d
class Conv2DTransposeRewriter(DFPatternCallback):
"""Convert conv2d_transpose related composite functions into
ethosu_conv2d_transpose operators."""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (wildcard().has_attr({"Composite": "ethos-u.qnn_conv2d_transpose"}))(
wildcard()
)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnConv2DTransposeParams(post.op.body)
params.ifm.tensor = post.args[0]
ofm_shape = params.ofm.shape
legalize_padding = params.legalize_padding
weight_to_ohwi_transform_map = {"IOHW": [1, 2, 3, 0]}
weights_values = params.weights.values
weights_values_ohwi = np.transpose(
weights_values, weight_to_ohwi_transform_map[str(params.weights.layout)]
)
weights_values_ohwi = np.flip(weights_values_ohwi, (1, 2))
weights = relay.const(weights_values_ohwi, dtype=params.weights.values.dtype)
bias_values = (
params.biases.tensor.data.asnumpy()
if params.biases
else np.zeros((params.ifm.shape[-1]))
)
scale_bias = vela_api.pack_biases(
biases=bias_values,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_conv2d(
ifm=post.args[0],
weight=weights,
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=params.kernel_shape,
ofm_channels=int(ofm_shape[-1]),
strides=(1, 1),
padding=legalize_padding,
dilation=params.dilation,
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
upscale="ZEROS",
)
# Remove additional padding by 'cropping' back to expected size
return relay.strided_slice(reduced_op, (0, 0, 0, 0), ofm_shape)
class DepthwiseConv2DRewriter(DFPatternCallback):
"""Convert ethosu.qnn_depthwise_conv2d composite functions to ethosu_depthwise_conv2d
operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr(
{"Composite": ethosu_patterns.QnnDepthwiseConv2DParams.composite_name}
)
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnDepthwiseConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
kernel_shape_map = {
"HWOI": params.weights.shape[0:2],
}
weights_values = params.weights.values
weights_values_ohwi = np.moveaxis(weights_values, [0, 1, 2, 3], [1, 2, 0, 3])
activation = "NONE"
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], "int8")
clip_min = 0
clip_max = 0
if params.activation:
activation = ethosu_patterns.QnnDepthwiseConv2DParams.activation_map[
params.activation.op.name
]
if activation == "CLIP":
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_depthwise_conv2d = ethosu_ops.ethosu_depthwise_conv2d(
post.args[0], # IFM
relay.const(weights_values_ohwi, params.weights.values.dtype),
relay.const(scale_bias, "uint8"),
lut,
float(params.ifm.q_params.scale_f32),
int(params.ifm.q_params.zero_point),
int(params.weights.q_params.zero_point),
float(params.ofm.q_params.scale_f32),
int(params.ofm.q_params.zero_point),
kernel_shape_map[str(params.weights.layout)],
params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
ofm_dtype=str(params.ofm.dtype),
)
return ethosu_depthwise_conv2d
class PoolingRewriter(DFPatternCallback):
"""Convert ethosu.avgpool2d and ethosu.maxpool2d composite functions to
ethosu_pooling operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], dtype="int8")
return ethosu_ops.ethosu_pooling(
ifm=post.args[0],
lut=lut,
pooling_type=params.pooling_type,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_zero_point=params.ifm.q_params.zero_point,
ofm_scale=params.ofm.q_params.scale_f32,
ofm_zero_point=params.ofm.q_params.zero_point,
pool_shape=params.pool_shape,
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
class MaxPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.MaxPool2DParams.composite_name})
)(wildcard()),
)
class AvgPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AvgPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.AvgPool2DParams.composite_name})
)(wildcard()),
)
class BinaryElementwiseRewriter(DFPatternCallback):
"""Convert ethosu binary elementwise composite functions to
ethosu_binary_elementwise operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
@staticmethod
def reshape_input(
inputs: List["TensorParams"],
) -> List[tvm.relay.Expr]:
"""Reshape the inputs so that the following binary elementwise
operator receives 4-dimensional inputs.
Parameters
----------
inputs: List[TensorParams]
The inputs to reshape.
Returns
-------
reshaped_inputs: List[tvm.relay.Expr]
The new reshaped inputs.
"""
reshaped_inputs = []
for i in inputs:
in_shape = i.shape
if len(in_shape) < 4:
pad_size = 4 - len(in_shape)
new_shape = ([1] * pad_size) + in_shape
new_call = relay.reshape(i.tensor, new_shape)
reshaped_inputs.append(new_call)
else:
reshaped_inputs.append(i.tensor)
return reshaped_inputs
@staticmethod
def reshape_output(output: tvm.relay.Expr, ifm_input_shape: List[int]) -> tvm.relay.Expr:
"""Reshape the output back to the original dimensionality.
Since the NPU must have the brodcastable tensor as the
second operand, the original shape of the first ifm must
be the output shape.
Parameters
----------
output: tvm.relay.Expr
The output to reshape.
ifm_input_shape: List[int]
The shape of the non-reshaped ifm tensor.
Returns
-------
reshaped_output: tvm.relay.Expr
The reshaped output expression.
"""
if len(ifm_input_shape) == 4:
return output
reshaped_output = relay.reshape(output, ifm_input_shape)
return reshaped_output
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[1] if params.reversed_operands else post.args[0]
params.ifm2.tensor = post.args[0] if params.reversed_operands else post.args[1]
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that need to get legalized to LUTs.
lut = relay.const([], dtype="int8")
inputs = [params.ifm, params.ifm2]
inputs = self.reshape_input(inputs)
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=inputs[0],
ifm2=inputs[1],
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ifm2_scale=float(params.ifm2.q_params.scale_f32),
ifm2_zero_point=int(params.ifm2.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=params.ifm.shape[-1] if params.ifm.shape else 1,
ifm2_channels=params.ifm2.shape[-1] if params.ifm2.shape else 1,
reversed_operands=params.reversed_operands,
ofm_dtype=params.ofm.dtype,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ifm2_layout=str(params.ifm2.layout),
ofm_layout=str(params.ofm.layout),
)
output = self.reshape_output(ethosu_binary_elementwise, params.ifm.shape)
return output
class AddRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AddParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AddParams.composite_name}))(
wildcard(), wildcard()
),
)
class SubRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SubParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.SubParams.composite_name}))(
wildcard(), wildcard()
),
)
class MulRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MulParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MulParams.composite_name}))(
wildcard(), wildcard()
),
)
class MinRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MinParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MinParams.composite_name}))(
wildcard(), wildcard()
),
)
class MaxRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MaxParams.composite_name}))(
wildcard(), wildcard()
),
)
class ShlRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.ShlParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.ShlParams.composite_name}))(
wildcard(), wildcard()
),
)
class StridedSliceRewriter(DFPatternCallback):
"""This pass brings the strided slice out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.StridedSliceParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
slice_input = post.args[0]
# TODO(lhutton1) For an unknown reason compilation will fail for strides of 4
# dimensions, so we cannot use params.strides as this will sometimes give
# strides as [1, 1, 1, 1]. Since we only support strides of 1, hardcoding this
# value for now.
strides = [1]
params = ethosu_patterns.StridedSliceParams(post.op.body)
strided_slice = relay.op.strided_slice(
slice_input,
params.begin,
params.end,
strides=strides,
axes=params.axes,
slice_mode=params.slice_mode,
)
return strided_slice
class ReshapeRewriter(DFPatternCallback):
"""This pass brings the reshape out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ReshapeParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
reshape_input = post.args[0]
reshape_params = ethosu_patterns.ReshapeParams(post.op.body)
new_shape = reshape_params.new_shape
return relay.op.reshape(reshape_input, newshape=new_shape)
class NoOpRewriter(DFPatternCallback):
"""This pass adds an idenity operator to reshape and strided slice to avoid a no op
without a consumer"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.reshape = is_op("reshape")(wildcard())
self.strided_slice = is_op("strided_slice")(wildcard())
self.pattern = self.reshape | self.strided_slice
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
if pre.checked_type.dtype == "int32":
return post
return ethosu_ops.ethosu_identity(ifm=post, lut=relay.const([], dtype="int8"))
class UnaryElementwiseRewriter(DFPatternCallback):
"""
Convert ethosu unary elementwise composite function to
ethosu_unary_elementwise operators
"""
def __init__(self, params_class: Type, pattern: CallPattern):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that use LUT.
lut = relay.const([], dtype="int8")
unary_input_shape = params.ifm.shape
# If the input tensor is not 4D, enter reshapes before and after the unary operator
if len(params.ifm.shape) == 4:
unary_input = params.ifm.tensor
else:
pad_size = 4 - len(unary_input_shape)
unary_input_shape = ([1] * pad_size) + unary_input_shape
unary_input = relay.op.reshape(params.ifm.tensor, newshape=unary_input_shape)
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=unary_input,
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ofm_channels=unary_input_shape[3],
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
if len(params.ifm.shape) == 4:
op = ethosu_unary_elementwise
else:
op = relay.op.reshape(ethosu_unary_elementwise, newshape=params.ifm.shape)
return op
class AbsRewriter(UnaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AbsParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AbsParams.composite_name}))(
wildcard()
),
)
class MeanRewriter(DFPatternCallback):
"""Convert ethosu.mean composite functions to an equivalent legalization:
- Case 1 (axis == [1, 2] and keepsdims == True):
ethosu_depthwise_conv2d + ethosu_binary_elementwise
- Case 2 (ifm qparams == ofm qparams): ethosu_pooling
- Case 3 (else): ethosu_depthwise_conv2d
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.MeanParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.MeanParams(post.op.body)
params.ifm.tensor = post.args[0]
ifm_shape = params.ifm.shape
ofm_shape = params.ofm.shape
lut = relay.const([], "int8")
axis = params.axis
reduced_op = params.ifm.tensor
# Enforce 4d input
if len(ifm_shape) < 4:
axis = [x + 1 for x in axis]
if len(ifm_shape) == 3:
ifm_shape = [1, params.height, params.width, ifm_shape[2]]
else:
ifm_shape = [1, params.height, params.width, 1]
reduced_op = relay.reshape(reduced_op, ifm_shape)
filter_height = ifm_shape[1] if 1 in axis else 1
filter_width = ifm_shape[2] if 2 in axis else 1
in_channels = out_channels = ifm_shape[-1]
# If the height is greater than max kernel height, reshape the input
# from [filter_height, filter_width] to [1, (filter_height*filter_width)]
# only in the case the axis is [1, 2].
if axis == [1, 2] and filter_height > 64:
ifm_shape = (ifm_shape[0], 1, filter_height * filter_width, in_channels)
filter_width = filter_height * filter_width
filter_height = 1
reduced_op = relay.reshape(reduced_op, ifm_shape)
if axis == [1, 2] and params.keepdims:
weight_scale = 1
weight_values = np.ones([out_channels, filter_height, filter_width, 1])
scale_bias = vela_api.pack_biases(
biases=np.zeros(ifm_shape[-1]),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array([weight_scale], dtype=np.float),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
ifm=reduced_op,
weight=relay.const(weight_values, params.ifm.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=(filter_height, filter_width),
ofm_channels=out_channels,
ofm_dtype="int16",
)
n = int(filter_height * filter_width)
eps = 1 / (256 * (n + 1)) if n % 2 == 0 else 0
scalar_tensor = relay.const(np.ones([1, 1, 1, 1], dtype="int16"), dtype="int16")
reduced_op = ethosu_ops.ethosu_binary_elementwise(
ifm=reduced_op,
ifm2=scalar_tensor,
lut=lut,
operator_type="MUL",
ifm_scale=float(params.ofm.q_params.scale_f32),
ifm_zero_point=int(params.ofm.q_params.zero_point),
ifm2_scale=1 / (n - eps),
ifm2_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=out_channels,
ifm2_channels=out_channels,
reversed_operands=False,
ofm_dtype="int8",
rounding_mode="NATURAL",
)
elif (
params.ifm.q_params.scale_f32 == params.ofm.q_params.scale_f32
and params.ifm.q_params.zero_point == params.ofm.q_params.zero_point
):
reduced_op = ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="AVG",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=0,
pool_shape=(filter_height, filter_width),
ofm_channels=out_channels,
rounding_mode="TRUNCATE",
)
else:
weight_scale = 1 / (filter_height * filter_width)
weight_values = np.ones([out_channels, filter_height, filter_width, 1])
bias = -1 * int(params.ifm.q_params.zero_point) * filter_height * filter_width
scale_bias = vela_api.pack_biases(
biases=np.ones([ifm_shape[-1]]) * bias,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array([weight_scale], dtype=np.float),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
ifm=reduced_op,
weight=relay.const(weight_values, params.ifm.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=(filter_height, filter_width),
ofm_channels=out_channels,
rounding_mode="NATURAL",
)
# Reshape to original ofm shape
if len(ofm_shape) < 4:
reduced_op = relay.reshape(reduced_op, ofm_shape)
return reduced_op
class ConcatRewriter(DFPatternCallback):
"""The newer versions of TFLite converters return a concatenate operator that concatenates
tensors with same QNN params (if the QNN params of tensors were initially different,
the converter adds a requantize node), so this rewriter replaces the QNN concatenate with
"normal" concatenate"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ConcatParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
# Find the tensors that are inputs to the concat and the scales and zero points
concat_args = list()
for arg in post.args:
if isinstance(arg, tvm.relay.expr.Call):
concat_args.append(arg)
axis = post.op.body.attrs.axis
concat = relay.op.concatenate(relay.Tuple(concat_args), axis=axis)
return concat
class RequantizeRewriter(DFPatternCallback):
"""Convert ethos-u.requantize composite function to an identity operation."""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.RequantizeParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.RequantizeParams(post.op.body)
params.ifm.tensor = post.args[0]
lut = relay.const([], "int8")
return ethosu_ops.ethosu_identity(
ifm=params.ifm.tensor,
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
class Resize2dRewriter(DFPatternCallback):
"""
Convert ethos-u.resize2d composite function to an equivalent operation that
performs the relevant upsampling operation.
Case 1: No upsampling (upscale factor of 1):
Identity.
Case 1: Nearest neighbor upsampling:
1x1 pooling with 2x2 nearest neighbor upsampling.
Case 2: Bilinear upsampling:
2x2 average pool with 2x2 nearest neighbor upsampling.
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.Resize2dParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.Resize2dParams(post.op.body)
params.ifm.tensor = post.args[0]
lut = relay.const([], "int8")
ifm_shape = params.ifm.shape
in_channels = ifm_shape[-1]
reduced_op = params.ifm.tensor
current_size = np.array(ifm_shape[1:3])
output_size = np.array(params.size)
if (current_size == output_size).all():
return ethosu_ops.ethosu_identity(
reduced_op,
lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
)
padding = [0, 0, 0, 0]
rounding_mode = "TFL"
pool_shape = [1, 1]
if params.method == "linear":
pool_shape = [2, 2]
rounding_mode = "NATURAL"
if params.coordinate_transformation_mode == "asymmetric":
# Use SAME padding.
ypad = Resize2dRewriter.get_required_padding(ifm_shape[1])
xpad = Resize2dRewriter.get_required_padding(ifm_shape[2])
padding = [ypad // 2, xpad // 2, (ypad + 1) // 2, (xpad + 1) // 2]
return ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="AVG",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
pool_shape=pool_shape,
ofm_channels=in_channels,
strides=[1, 1],
padding=padding,
upscale="NEAREST",
rounding_mode=rounding_mode,
)
@staticmethod
def get_required_padding(input_size: int, pool_size: int = 2) -> int:
"""Gets the amount of padding required needed to achieve
'SAME' padding for a given axis."""
needed_input = (input_size - 1) + pool_size
total_padding = max(0, needed_input - input_size)
return total_padding
class ExpandDimsRewriter(DFPatternCallback):
"""Legalize expand dims to a reshape operator."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ExpandDimsParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.ExpandDimsParams(post.op.body)
return relay.op.reshape(post.args[0], newshape=params.output.shape)
class SqueezeRewriter(DFPatternCallback):
"""Legalize squeeze to a reshape operator."""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SqueezeParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.SqueezeParams(post.op.body)
return relay.op.reshape(post.args[0], newshape=params.output.shape)
class FullyConnectedRewriter(DFPatternCallback):
"""Legalize Fully Connected (with bias and clip) to an NPU operator"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.FullyConnectedParams.composite_name})
)(wildcard())
def callback(self, pre, post, node_map):
params = ethosu_patterns.FullyConnectedParams(post.op.body)
params.ifm.tensor = post.args[0]
# IFM reshapes
ifm = post.args[0]
if len(params.ifm.shape) != 4 or not params.ifm.shape[1] == params.ifm.shape[2] == 1:
ifm = relay.reshape(ifm, (1, 1, 1, params.ifm.shape[-1]))
# Weight transformations
weights_values = params.weights.values
weights_values_ohwi = np.expand_dims(weights_values, axis=(1, 2))
if params.activation:
activation = "CLIP"
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
bias_values = (
params.biases.tensor.data.asnumpy()
if params.biases
else np.zeros((params.ofm.shape[-1]))
)
scale_bias = vela_api.pack_biases(
biases=bias_values,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
ethosu_fc = ethosu_ops.ethosu_conv2d(
ifm=ifm,
weight=relay.const(weights_values_ohwi, params.weights.values.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=[1, 1],
ofm_channels=params.weights.shape[0],
strides=(1, 1),
padding=(0, 0, 0, 0),
dilation=(1, 1),
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
)
if len(params.ofm.shape) != 4 or not params.ofm.shape[1] == params.ofm.shape[2] == 1:
ethosu_fc = relay.reshape(ethosu_fc, params.ofm.shape)
return ethosu_fc
@util.create_npu_function_pass(opt_level=1)
class LegalizeEthosU:
"""This is the pass to call graph-rewrites to perform graph transformation
in a way such that the operations are replaced with hardware/codegen supported
operations.
"""
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""This is the method that replaces the operations with hardware/codegen supported
operations.
"""
rewriters = [
PartitionedSplitRewriter(),
SplitRewriter(),
Conv2DRewriter(),
Conv2DTransposeRewriter(),
DepthwiseConv2DRewriter(),
FullyConnectedRewriter(),
MaxPoolingRewriter(),
AvgPoolingRewriter(),
AddRewriter(),
SubRewriter(),
MulRewriter(),
MinRewriter(),
MaxRewriter(),
ShlRewriter(),
AbsRewriter(),
TanhRewriter(),
HardSwishRewriter(),
LeakyReLURewriter(),
MeanRewriter(),
ConcatRewriter(),
SigmoidRewriter(),
RequantizeRewriter(),
Resize2dRewriter(),
ExpandDimsRewriter(),
SqueezeRewriter(),
ReshapeRewriter(),
StridedSliceRewriter(),
NoOpRewriter(),
]
for rewriter in rewriters:
func = rewrite(rewriter, func)
return func
def __call__(self, *args, **kwargs):
# pylint is unable figure out the decorated
# class is callable, thus adding this to
# suppress the warning.
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Relay operators for the Arm(R) Ethos(TM)-U NPU"
from .convolution import ethosu_conv2d
from .depthwise import ethosu_depthwise_conv2d
from .pooling import ethosu_pooling
from .binary_elementwise import ethosu_binary_elementwise
from .identity import ethosu_identity
from .unary_elementwise import ethosu_unary_elementwise
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for binary elementwise operators for Arm(R) Ethos(TM)-U NPU"""
from typing import Optional
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import binary_elementwise_compute
def _extract_ethosu_binary_elementwise_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_binary_elementwise compute TE
from a ethosu_binary_elementwise Relay call."""
ifm = args[0]
ifm2 = args[1]
lut = args[2]
operator_type = attrs.operator_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ifm2_scale = attrs.ifm2_scale
ifm2_zero_point = attrs.ifm2_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
ifm_channels = attrs.ifm_channels
ifm2_channels = attrs.ifm2_channels
reversed_operands = attrs.reversed_operands
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
ifm_layout = attrs.ifm_layout
ifm2_layout = attrs.ifm2_layout
ofm_layout = attrs.ofm_layout
ofm_dtype = attrs.ofm_dtype
return (
ifm,
ifm2,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ifm2_scale,
ifm2_zero_point,
ofm_scale,
ofm_zero_point,
ifm_channels,
ifm2_channels,
reversed_operands,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ifm2_layout,
ofm_layout,
ofm_dtype,
)
@tvm.ir.register_op_attr("contrib.ethosu.binary_elementwise", "FTVMCompute")
def create_ethosu_binary_elementwise_compute(attrs, args, out_type):
"""Create an ethosu_binary_elementwise compute op."""
params = _extract_ethosu_binary_elementwise_params(attrs, args)
op = binary_elementwise_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.binary_elementwise", "FTVMStrategy")
def binary_elementwise_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_binary_elementwise_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_binary_elementwise",
)
return strategy
def ethosu_binary_elementwise(
ifm: tvm.relay.Expr,
ifm2: tvm.relay.Expr,
lut: tvm.relay.Expr,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
ofm_dtype: str,
activation: Optional[str] = "NONE",
clip_min: Optional[int] = 0,
clip_max: Optional[int] = 0,
rounding_mode: Optional[str] = "TFL",
ifm_layout: Optional[str] = "NHWC",
ifm2_layout: Optional[str] = "NHWC",
ofm_layout: Optional[str] = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized binary elementwise operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
ifm2 : tvm.relay.Expr
The Input Feature Map tensor 2 (IFM2).
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 2.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_binary_elementwise op.
"""
return _make.ethosu_binary_elementwise(
ifm,
ifm2,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ifm2_scale,
ifm2_zero_point,
ofm_scale,
ofm_zero_point,
ifm_channels,
ifm2_channels,
reversed_operands,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ifm2_layout,
ofm_layout,
ofm_dtype,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for convolutions for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm # type: ignore
from tvm.relay.op import _make # type: ignore
from tvm.topi.generic import schedule_injective # type: ignore
from tvm.relay.op.op import OpStrategy # type: ignore
from tvm.relay.op import strategy as _strategy
from ..te import conv2d_compute
def _extract_ethosu_conv2d_params(attrs, args):
"""Get the parameters necessary to construct a compute TE
from a ethosu_conv2d Relay call."""
ifm = args[0]
weight = args[1]
scale_bias = args[2]
lut = args[3]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
weight_zero_point = attrs.weight_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.conv2d", "FTVMCompute")
def create_ethosu_conv2d_compute(attrs, args, out_type):
"""Create an ethosu_conv2d compute op."""
params = _extract_ethosu_conv2d_params(attrs, args)
op = conv2d_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.conv2d", "FTVMStrategy")
def conv2d_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_conv2d_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_conv2d",
)
return strategy
def ethosu_conv2d(
ifm: tvm.relay.Expr,
weight: tvm.relay.Expr,
scale_bias: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
kernel_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
dilation: Tuple[int, int] = (1, 1),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized 2D convolution operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data and OHWI format for the kernel weights.
Reference: https://developer.arm.com/documentation/102420/0200/
Note that the per-channel weight scale and bias tensor must be
packed together into a combined tensor of uint80s. This is represented
in TVM by a (channels, 10) tensor of type uint8. For more detail,
refer to the Technical Reference Manual linked above.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
weight : tvm.relay.Expr
The weight tensor.
scale_bias : tvm.relay.Expr
The packed per-channel weight scale and bias tensor.
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : int
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
kernel_shape : tuple of int
The 2 dimensional kernel shape as (kernel_height, kernel_width).
ofm_channels : int
The number of the Output Feature Map channels.
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : tuple of int, optional
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP"
clip_max : int, optional,
The maximum clipping value if activation = "CLIP"
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
tvm.relay.Call
A call to the ethosu_conv2d op.
"""
return _make.ethosu_conv2d(
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
kernel_shape,
ofm_channels,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operator for depthwise convolution for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import depthwise_conv2d_compute
def _extract_ethosu_depthwise_conv2d_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_depthwise_conv2d compute TE
from a ethosu_depthwise_conv2d Relay call."""
ifm = args[0]
weight = args[1]
scale_bias = args[2]
lut = args[3]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
weight_zero_point = attrs.weight_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
ofm_dtype = attrs.ofm_dtype
return (
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
ofm_dtype,
)
@tvm.ir.register_op_attr("contrib.ethosu.depthwise_conv2d", "FTVMCompute")
def create_ethosu_depthwise_conv2d_compute(attrs, args, out_type):
"""Create an ethosu_depthwise_conv2d compute op."""
params = _extract_ethosu_depthwise_conv2d_params(attrs, args)
op = depthwise_conv2d_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.depthwise_conv2d", "FTVMStrategy")
def depthwise_conv2d_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_depthwise_conv2d_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_depthwise_conv2d",
)
return strategy
def ethosu_depthwise_conv2d(
ifm: tvm.relay.Expr,
weight: tvm.relay.Expr,
scale_bias: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
kernel_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
dilation: Tuple[int, int] = (1, 1),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
ofm_dtype: str = "int8",
) -> tvm.relay.Call:
"""This is a quantized 2D depthwise convolution operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data and OHWI format for the kernel weights.
Reference: https://developer.arm.com/documentation/102420/0200/
Note that the per-channel weight scale and bias tensor must be
packed together into a combined tensor of uint80s. This is represented
in TVM by a (channels, 10) tensor of type uint8. For more detail,
refer to the Technical Reference Manual linked above.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
weight : tvm.relay.Expr
The weight tensor.
scale_bias : tvm.relay.Expr
The packed per-channel weight scale and bias tensor.
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
kernel_shape : tuple of int
The 2 dimensional kernel shape as (kernel_height, kernel_width).
ofm_channels : int
The number of the Output Feature Map channels.
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : tuple of int, optional
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform
the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP"
clip_max : int, optional,
The maximum clipping value if activation = "CLIP"
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype : str, optional
The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.
Returns
-------
out : tvm.relay.Call
A call to the ethosu_depthwise_conv2d op.
"""
return _make.ethosu_depthwise_conv2d(
ifm,
weight,
scale_bias,
lut,
ifm_scale,
ifm_zero_point,
weight_zero_point,
ofm_scale,
ofm_zero_point,
kernel_shape,
ofm_channels,
strides,
padding,
dilation,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
ofm_dtype,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay identity operator for Arm(R) Ethos(TM)-U NPU"""
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import identity_compute
@tvm.ir.register_op_attr("contrib.ethosu.identity", "FTVMCompute")
def create_ethosu_identity_compute(attrs, args, out_type):
"""Create an ethosu_identity compute op."""
ifm = args[0]
lut = args[1]
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
activation = attrs.activation
op = identity_compute(
ifm, lut, ifm_scale, ifm_zero_point, ofm_scale, ofm_zero_point, activation
)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.identity", "FTVMStrategy")
def identity_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_identity_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_identity",
)
return strategy
def ethosu_identity(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
ifm_scale: float = 1,
ifm_zero_point: int = 0,
ofm_scale: float = 1,
ofm_zero_point: int = 0,
activation: str = "NONE",
) -> tvm.relay.Call:
"""The Identity operator that runs on the NPU.
This operator takes in a tensor of any shape and returns the same tensor,
with the data optionally requantized.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table values to use if activation = "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
out : tvm.relay.Call
A call to the ethosu_identity op.
"""
return _make.ethosu_identity(
ifm, lut, ifm_scale, ifm_zero_point, ofm_scale, ofm_zero_point, activation
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/op_attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The attributes node used for Arm(R) Ethos(TM)-U NPU Relay operators."""
from tvm.ir import Attrs
import tvm._ffi
@tvm._ffi.register_object("relay.attrs.EthosuConv2DAttrs")
class EthosuConv2DAttrs(Attrs):
"""Attributes for contrib.ethosu.conv2d."""
@tvm._ffi.register_object("relay.attrs.EthosuIdentityAttrs")
class EthosuIdentityAttrs(Attrs):
"""Attributes for contrib.ethosu.identity."""
@tvm._ffi.register_object("relay.attrs.EthosuDepthwiseConv2DAttrs")
class EthosuDepthwiseConv2DAttrs(Attrs):
"""Attributes for contrib.ethosu.depthwise_conv2d."""
@tvm._ffi.register_object("relay.attrs.EthosuPoolingAttrs")
class EthosuPooling2DAttrs(Attrs):
"""Attributes for contrib.ethosu.pooling."""
@tvm._ffi.register_object("relay.attrs.EthosuBinaryElementwiseAttrs")
class EthosuBinaryElementwiseAttrs(Attrs):
"""Attributes for contrib.ethosu.binary_elementwise"""
@tvm._ffi.register_object("relay.attrs.EthosuUnaryElementwiseAttrs")
class EthosuUnaryElementwiseAttrs(Attrs):
"""Attributes for contrib.ethosu.unary_elementwise"""
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operators for pooling for Arm(R) Ethos(TM)-U NPU"""
from typing import Tuple
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import pooling_compute
def _extract_ethosu_pooling_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_pooling compute TE
from a ethosu_pooling Relay call."""
ifm = args[0]
lut = args[1]
pooling_type = attrs.pooling_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
pool_shape = attrs.pool_shape
ofm_channels = attrs.ofm_channels
strides = attrs.strides
padding = attrs.padding
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
upscale = attrs.upscale
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
lut,
pooling_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
pool_shape,
ofm_channels,
strides,
padding,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.pooling", "FTVMCompute")
def create_ethosu_pooling_compute(attrs, args, out_type):
"""Create an ethosu_pooling compute op."""
params = _extract_ethosu_pooling_params(attrs, args)
op = pooling_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.pooling", "FTVMStrategy")
def pooling_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_pooling_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_pooling",
)
return strategy
def ethosu_pooling(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
pooling_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
pool_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int] = (1, 1),
padding: Tuple[int, int, int, int] = (0, 0, 0, 0),
activation: str = "NONE",
clip_min: int = 0,
clip_max: int = 0,
rounding_mode: str = "TFL",
upscale: str = "NONE",
ifm_layout: str = "NHWC",
ofm_layout: str = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized 2D pooling operation as supported by
the NPU. It accepts either NHWC or NHCWB16 format
for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table of values to use if activation = "LUT".
pooling_type: str
The type of the pooling. "AVG" - average pool, "MAX" - max pool.
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
pool_shape : tuple of int
The 2 dimensional pool shape as (pool_shape_height, pool_shape_width).
ofm_channels : int
The number of the Output Feature Map channels
strides : tuple of int, optional
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple of int, optional
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale: str, optional
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_pooling op.
"""
return _make.ethosu_pooling(
ifm,
lut,
pooling_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
pool_shape,
ofm_channels,
strides,
padding,
activation,
clip_min,
clip_max,
rounding_mode,
upscale,
ifm_layout,
ofm_layout,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/op/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Relay operator for unary elementwise operations for Arm(R) Ethos(TM)-U NPU"""
from typing import Optional
import tvm
from tvm.relay.op import _make
from tvm.topi.generic import schedule_injective
from tvm.relay.op.op import OpStrategy
from tvm.relay.op import strategy as _strategy
from ..te import unary_elementwise_compute
def _extract_ethosu_unary_elementwise_params(attrs, args):
"""Get the parameters necessary to construct a ethosu_unary_elementwise compute TE
from a ethosu_unary_elementwise Relay call."""
ifm = args[0]
lut = args[1]
operator_type = attrs.operator_type
ifm_scale = attrs.ifm_scale
ifm_zero_point = attrs.ifm_zero_point
ofm_scale = attrs.ofm_scale
ofm_zero_point = attrs.ofm_zero_point
ofm_channels = attrs.ofm_channels
activation = attrs.activation
clip_min = attrs.clip_min
clip_max = attrs.clip_max
rounding_mode = attrs.rounding_mode
ifm_layout = attrs.ifm_layout
ofm_layout = attrs.ofm_layout
return (
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMCompute")
def create_ethosu_unary_elementwise_compute(attrs, args, out_type):
"""Create an ethosu_unary_elementwise compute op."""
params = _extract_ethosu_unary_elementwise_params(attrs, args)
op = unary_elementwise_compute(*params)
return [op]
@tvm.ir.register_op_attr("contrib.ethosu.unary_elementwise", "FTVMStrategy")
def unary_elementwise_strategy_ethosu(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
create_ethosu_unary_elementwise_compute,
_strategy.wrap_topi_schedule(schedule_injective),
name="ethosu_unary_elementwise",
)
return strategy
def ethosu_unary_elementwise(
ifm: tvm.relay.Expr,
lut: tvm.relay.Expr,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: Optional[str] = "NONE",
clip_min: Optional[int] = 0,
clip_max: Optional[int] = 0,
rounding_mode: Optional[str] = "TFL",
ifm_layout: Optional[str] = "NHWC",
ofm_layout: Optional[str] = "NHWC",
) -> tvm.relay.Call:
"""This is a quantized unary elementwise operation as supported by the
NPU. It accepts either NHWC or NHCWB16 format for the input data.
Parameters
----------
ifm : tvm.relay.Expr
The Input Feature Map tensor (IFM).
lut : tvm.relay.Expr
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str, optional
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int, optional
The minimum clipping value if activation = "CLIP".
clip_max : int, optional
The maximum clipping value if activation = "CLIP".
rounding_mode : str, optional
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
out : tvm.relay.Call
A call to the ethosu_unary_elementwise op.
"""
return _make.ethosu_unary_elementwise(
ifm,
lut,
operator_type,
ifm_scale,
ifm_zero_point,
ofm_scale,
ofm_zero_point,
ofm_channels,
activation,
clip_min,
clip_max,
rounding_mode,
ifm_layout,
ofm_layout,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/preprocess.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel
"""Set of passes to pre-process the IRModule to support Arm(R)-Ethos(TM)-U
NPU code generation. These set of passes will mutate both the main and the
external functions.
"""
import tvm # type: ignore
from . import _ffi_api # type: ignore
def preprocess_ext_io() -> tvm.transform.Pass:
"""This pass mutates the number of inputs going to / outputs coming out to/from
external functions to one. This is achieved via concatenation
of inputs and splitting of outputs in around the call to the external function.
Returns
-------
ret : tvm.transform.Pass
The registered pass to mutate the IO of the external functions and their calls.
"""
return _ffi_api.PreprocessExternalFuncIO() # type: ignore # pylint: disable=no-member
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tensor Expressions for the NPU"""
from .convolution import *
from .depthwise import *
from .pooling import *
from .binary_elementwise import *
from .identity import *
from .unary_elementwise import *
from .inline import *
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for binary_elementwise"""
import operator
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def binary_elementwise_compute(
ifm: te.Tensor,
ifm2: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ifm2_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of binary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
ifm2 : te.Tensor
The Input Feature Map tensor 2 (IFM2).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 1.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
assert ifm.shape[0] == 1
assert ifm2.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ifm2_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, (0, 0, 0, 0)
)
dmaed_ifm2 = dma_ifm_compute(
ifm2, ifm2_layout, ifm2_zero_point, ifm2_scale, ifm2_channels, (0, 0, 0, 0)
)
# Binary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
binary_elementwise_attrs = {
"op": "ethosu_binary_elementwise",
"operator_type": operator_type,
"reversed_operands": reversed_operands,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
operators = {
"ADD": operator.add,
"SUB": operator.sub,
"MUL": operator.mul,
"MIN": te.min,
"MAX": te.max,
"SHR": operator.add,
"SHL": operator.add,
}
broadcast = [value == 1 for value in dmaed_ifm2.shape]
if reversed_operands:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
else:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ifm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - int(broadcast[1])), 0, 0, int(broadcast[1])],
[0, 0, (1 - int(broadcast[2])), 0, int(broadcast[2])],
[0, 0, 0, (1 - int(broadcast[3])), int(broadcast[3])],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
ifm2_propagator = Propagator(
ifm2_matrix,
[0, 0, 0, 0] if ifm2_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"ifm2_propagator": ifm2_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
binary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ifm_channels,
attrs=propagator_attrs,
)
@register_matcher
def match_ethosu_binary_elementwise(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Binary Elementwise.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
binary_elementwise = convert_to_nhcwb16.op.input_tensors[0]
if binary_elementwise.op.name != "ethosu_binary_elementwise":
return None
pad = binary_elementwise.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
pad2 = binary_elementwise.op.input_tensors[1]
if pad2.op.name != "ethosu_pad":
return None
upscale2 = pad2.op.input_tensors[0]
if upscale2.op.name != "ethosu_upscale":
return None
convert_to_nhwc2 = upscale2.op.input_tensors[0]
if convert_to_nhwc2.op.name != "ethosu_convert_to_nhwc":
return None
read2 = convert_to_nhwc2.op.input_tensors[0]
if read2.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
read2.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["ifm2_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
input2_layout = convert_to_nhwc2.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
block_config = device_config.get_elementwise_block_config(
propagators[0],
propagators[1],
binary_elementwise.op.attrs,
output_tensor.shape,
output_layout,
input_layout,
input2_layout,
ifm_dtype,
ofm_dtype,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
1,
block_config,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common methods for the NPU tensor expressions"""
from typing import Tuple, List
def get_layout_transform_matrices(ofm_channels: int) -> Tuple[List[List[float]], List[List[float]]]:
"""Get the NHWC->NHCWB16 and NHCWB16->NHWC layout transform matrices.
For information about the supported layouts see https://developer.arm.com/documentation/102420/
0200/Functional-description/Control-and-data-flow/Supported-memory-formats-for-feature-maps
Parameters
----------
ofm_channels : int
The number of output channels in a NHWC layout
Returns
-------
nhwc_to_nhcwb16, nhcwb16_to_nhwc : Tuple[List[List[float]], List[List[float]]]
The layout transformation matrices
"""
# The value of the last dimension (B16) is always 16.
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
# When we convert from NHWC to NHCWB16, the new C value is given by
# (ofm_channels - 1) // 16 + 1, which is a lossy operation, so we need to use
# the actual value of channels in the transform matrix to accurately recover
# the C in NHWC when we convert from NHCWB16 to NHWC.
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
# We need to offset only if number of ofm_channels is not divisible by 16
# Moreover, we can't use just the "ofm_channels" as last element because
# the propogation matrices are used to propogate block configs as well.
[0, 0, 16, 0, 0, -(int(ofm_channels % 16 != 0)) * (16 - ofm_channels % 16)],
[0, 0, 0, 0, 0, 1],
]
return nhwc_to_nhcwb16, nhcwb16_to_nhwc
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for convolutions for the NPU"""
from typing import Tuple, Union, List
import numpy as np # type: ignore
from tvm import te # type: ignore
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def conv2d_compute(
ifm: te.Tensor,
weight: te.Tensor,
scale_bias: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
dilation: Union[Tuple[int, int], List[int]],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of a 2D convolution for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
weight : te.Tensor
The weight tensor.
scale_bias : te.Tensor
The packed per-channel weight scale and bias tensor.
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
strides : tuple
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : Union[Tuple[int, int], List[int]]
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
dilation_h, dilation_w = [int(v) for v in dilation]
ofm_channels, kernel_h, kernel_w, ifm_channels = [int(v) for v in weight.shape]
upscale_factor = 2 if upscale != "NONE" else 1
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm,
ifm_layout,
ifm_zero_point,
ifm_scale,
weight.shape[3],
padding,
upscale_factor,
)
# 2D Convolution compute operation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1
rc = te.reduce_axis((0, ifm_channels), name="rc")
rh = te.reduce_axis((0, kernel_h), name="ry")
rw = te.reduce_axis((0, kernel_w), name="rx")
conv2d_attrs = {
"op": "ethosu_conv2d",
"weight_zero_point": weight_zero_point,
"activation": activation,
"upscale": upscale,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"stride_h": stride_h,
"stride_w": stride_w,
"dilation_h": dilation_h,
"dilation_w": dilation_w,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
conv2d_attrs["lut"] = lut
conv = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: te.sum(
dmaed_ifm(
nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, rc
).astype(ifm.dtype)
* weight[cc, rh, rw, rc].astype(ifm.dtype)
# This is a trick to load 10 elements of the scale_bias at once, not accurate maths
+ (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype),
axis=[rh, rw, rc],
),
name="ethosu_conv2d",
attrs=conv2d_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
weights_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()
bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
weights_propagator = Propagator(
weights_matrix,
[0, 0, 0, 0],
)
bias_propagator = Propagator(
bias_matrix,
[0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"weights_propagator": weights_propagator,
"bias_propagator": bias_propagator,
}
# Compute operation for the OFM DMA pipeline
dma_ofm = dma_ofm_compute(
conv, ofm_layout, ofm_zero_point, ofm_scale, ofm_channels, attrs=propagator_attrs
)
return dma_ofm
@register_matcher
def match_ethosu_conv2d(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Conv2D.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
conv2d = convert_to_nhcwb16.op.input_tensors[0]
if conv2d.op.name != "ethosu_conv2d":
return None
pad = conv2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
conv2d.op.input_tensors[1],
conv2d.op.input_tensors[2],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["weights_propagator"],
write.op.attrs["bias_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
# Use channels from the weights tensor since that its shape doesn't change during layout
# conversion
ifm_channels = int(input_tensors[1].shape[3])
ofm_channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])
kernel_elements = kernel_height * kernel_width
is_part_kernel = device_config.is_partkernel(
conv2d.op.name, ifm_channels, ifm_dtype, kernel_elements
)
subkernels = len(
device_config.get_kernel_steps(
conv2d.op.name, kernel_height, kernel_width, ifm_dtype, is_part_kernel
)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
conv2d.op.attrs,
output_tensor.shape,
ofm_channels,
ifm_channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
kernel_height,
kernel_width,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
1,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for depthwise convolutions"""
from typing import Tuple, Union, List
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def depthwise_conv2d_compute(
ifm: te.Tensor,
weight: te.Tensor,
scale_bias: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
weight_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
dilation: Union[Tuple[int, int], List[int]],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of 2D convolution for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
weight : te.Tensor
The weight tensor.
scale_bias : te.Tensor
The packed per-channel weight scale and bias tensor.
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
weight_zero_point : int
The quantization zero point for the weight tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
strides : tuple
The 2 dimensional strides as (stride_height, stride_width).
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
dilation : Union[int, tuple, list]
The 2 dimensional dilation as (dilation_height, dilation_width).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype : str, optional
The Output Feature Map tensor data type. Can be 'int8', 'uint8' or 'int16'.
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1, f"Only batch size 1 is supported"
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
dilation_h, dilation_w = [int(v) for v in dilation]
channels, kernel_h, kernel_w, _ = [int(v) for v in weight.shape]
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(ifm, ifm_layout, ifm_zero_point, ifm_scale, channels, padding)
# 2D Depthwise Convolution compute operation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
ofm_height = (dmaed_ifm.shape[1] - dilated_kernel_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - dilated_kernel_w) // stride_w + 1
rh = te.reduce_axis((0, kernel_h), name="ry")
rw = te.reduce_axis((0, kernel_w), name="rx")
depthwise_conv2d_attrs = {
"op": "ethosu_depthwise_conv2d",
"weight_zero_point": weight_zero_point,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"upscale": upscale,
"stride_h": stride_h,
"stride_w": stride_w,
"dilation_h": dilation_h,
"dilation_w": dilation_w,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
depthwise_conv2d_attrs["lut"] = lut
depthwise = te.compute(
(1, ofm_height, ofm_width, channels),
lambda nn, hh, ww, cc: te.sum(
(
dmaed_ifm(
nn, hh * stride_h + rh * dilation_h, ww * stride_w + rw * dilation_w, cc
).astype(ifm.dtype)
* weight[cc, rh, rw, 0].astype(ifm.dtype)
# This is a trick to load 10 elements of the scale_bias at once, not accurate maths
+ (scale_bias[cc, 0] * scale_bias[cc, 9] + lut_expr).astype(ifm.dtype)
).astype(ofm_dtype),
axis=[rh, rw],
),
name="ethosu_depthwise_conv2d",
attrs=depthwise_conv2d_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weights_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
]
bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weights_matrix = np.matmul(weights_matrix, nhcwb16_to_nhwc).tolist()
bias_matrix = np.matmul(bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
weights_propagator = Propagator(
weights_matrix,
[0, 0, 0, 0],
)
bias_propagator = Propagator(
bias_matrix,
[0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"weights_propagator": weights_propagator,
"bias_propagator": bias_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
depthwise, ofm_layout, ofm_zero_point, ofm_scale, channels, attrs=propagator_attrs
)
@register_matcher
def match_ethosu_depthwise_conv2d(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Depthwise Conv2D.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration.
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
depthwise2d = convert_to_nhcwb16.op.input_tensors[0]
if depthwise2d.op.name != "ethosu_depthwise_conv2d":
return None
pad = depthwise2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
depthwise2d.op.input_tensors[1],
depthwise2d.op.input_tensors[2],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
write.op.attrs["weights_propagator"],
write.op.attrs["bias_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
channels, kernel_height, kernel_width = (int(axis) for axis in input_tensors[1].shape[0:3])
subkernels = len(
device_config.get_kernel_steps(depthwise2d.op.name, kernel_height, kernel_width, ifm_dtype)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
depthwise2d.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
kernel_height,
kernel_width,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
1,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/dma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unnecessary-lambda
"""Tensor Expressions for operations supported by the NPU DMA engine"""
from typing import Callable, Tuple, Optional, List
import tvm # type: ignore
from tvm import te
from tvm.topi.utils import equal_const_int # type: ignore
def _pad_tensor(
tensor: te.Tensor, pad_before: List[int], pad_after: Optional[List[int]] = None
) -> Callable:
"""Generate a padded tensor.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
pad_before : tuple of int
The 'before' padding on each axis.
pad_after : tuple of int
The 'after' padding on each axis.
Returns
-------
_pad : callable
The padded tensor.
"""
pad_after = pad_after or pad_before
dims = len(tensor.shape)
assert len(pad_before) == dims
assert len(pad_after) == dims
def _pad(*indices):
not_zero = [] # A list of padding conditions that aren't trivial (zero padding)
index_tuple = [] # The indices with which to access the padded tensor
for i in range(dims):
if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):
index_tuple.append(indices[i])
else:
index_tuple.append(indices[i] - pad_before[i])
not_zero.append(indices[i] >= pad_before[i])
not_zero.append(indices[i] < tensor.shape[i] + pad_before[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, tensor(*index_tuple), tvm.tir.const(0, tensor.dtype)
)
return tensor(*index_tuple)
return _pad
def read_compute(
tensor: te.Tensor, zero_point: int, scale: float, layout: Optional[str] = None
) -> te.Tensor:
"""A tensor expression which represents a read.
Parameters
----------
tensor : te.Tensor
The tensor to read.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
Returns
-------
te.Tensor
The tensor having been read.
"""
read_attrs = {
"op": "ethosu_read",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
read_attrs["layout"] = layout
return te.compute(tensor.shape, lambda *i: tensor(*i), name="ethosu_read", attrs=read_attrs)
def write_compute(
tensor: te.Tensor,
zero_point: int,
scale: float,
layout: Optional[str] = None,
attrs: dict = None,
) -> te.Tensor:
"""A tensor expression which represents a write.
Parameters
----------
tensor : te.Tensor
The tensor to write.
zero_point : int
The zero point of the tensor.
scale : float
The scale of the tensor.
layout : Optional[str]
The layout of the tensor, either NHWC or NHCWB16.
attrs : dict, optional
Additional attributes to add to the compute op.
Returns
-------
te.Tensor
The tensor having been written.
"""
if not attrs:
attrs = {}
write_attrs = {
"op": "ethosu_write",
"zero_point": zero_point,
"scale": scale,
}
if layout:
assert layout in {"NHWC", "NHCWB16"}
write_attrs["layout"] = layout
write_attrs = {**write_attrs, **attrs}
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_write",
attrs=write_attrs,
)
def convert_to_nhwc_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHWC layout if it's in NHWCB16 layout.
When the current layout is NHCWB16, a reduce sum operation is inserted
to ensure that the whole of the input tensor has a data dependency on
the copy operation. Without this, TVM removes compute that is deemed to
be unnecessary, which causes strides for the NPU to be calculated
incorrectly.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHWC layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhwc_attrs = {
"op": "ethosu_convert_to_nhwc",
"layout": layout,
}
if layout == "NHCWB16":
rc = te.reduce_axis((0, 16), name="rc")
return te.compute(
(tensor.shape[0], tensor.shape[1], tensor.shape[3], channels),
lambda nn, hh, ww, cc: te.sum(
tensor(nn, hh, te.indexdiv(cc, 16), ww, te.indexmod(rc, 16)), axis=rc
),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhwc",
attrs=convert_to_nhwc_attrs,
)
def convert_to_nhcwb16_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:
"""Converts a tensor into NHCWB16 layout if it's in NHWC layout.
Parameters
----------
tensor : te.Tensor
The tensor to convert.
layout : str
The layout of the tensor, either NHWC or NHCWB16.
channels : int
The number of valid channels for the tensor.
Returns
-------
te.Tensor
The converted tensor in NHCWB16 layout.
"""
assert layout in {"NHWC", "NHCWB16"}
convert_to_nhcwb16_attrs = {
"op": "ethosu_convert_to_nhcwb16",
"layout": layout,
}
if layout == "NHCWB16":
out_channel_bricks = te.indexdiv(channels - 1, 16) + 1
output_shape = (1, tensor.shape[1], out_channel_bricks, tensor.shape[2], 16)
return te.compute(
output_shape,
lambda nn, hh, cc, ww, cb: tvm.tir.if_then_else(
cc * 16 + cb < channels,
tensor(nn, hh, ww, cc * 16 + cb),
tvm.tir.IntImm(tensor.dtype, 0),
),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
return te.compute(
tensor.shape,
lambda *i: tensor(*i),
name="ethosu_convert_to_nhcwb16",
attrs=convert_to_nhcwb16_attrs,
)
def pad_compute(tensor: te.Tensor, padding: tuple) -> te.Tensor:
"""Pad an NHWC tensor in the height and width axes.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
Returns
-------
te.Tensor
The padded tensor.
"""
pad_top, pad_left, pad_down, pad_right = padding
pad_before = [0, int(pad_top), int(pad_left), 0]
pad_after = [0, int(pad_down), int(pad_right), 0]
pad_attrs = {
"op": "ethosu_pad",
}
shape = tensor.shape
return te.compute(
(shape[0], shape[1] + pad_top + pad_down, shape[2] + pad_left + pad_right, shape[3]),
lambda nn, hh, ww, cc: _pad_tensor(tensor, pad_before, pad_after)(nn, hh, ww, cc),
name="ethosu_pad",
attrs=pad_attrs,
)
def upscale_compute(tensor: te.Tensor, upscale_factor: int) -> te.Tensor:
"""Apply upscaling to an NHWC tensor.
Parameters
----------
tensor : te.Tensor
The tensor to pad.
upscale_factor : int
The factor by which to apply upscaling.
Returns
-------
te.Tensor
The upscaled tensor.
"""
shape = tensor.shape
reason = f"The compiler only supports 2x2 upscaling, but factor was {upscale_factor}."
assert upscale_factor in (1, 2), reason
new_shape = (shape[0], shape[1] * upscale_factor, shape[2] * upscale_factor, shape[3])
upscale_attrs = {"op": "ethosu_upscale"}
return te.compute(
new_shape,
lambda nn, hh, ww, cc: tensor(nn, hh // upscale_factor, ww // upscale_factor, cc),
name="ethosu_upscale",
attrs=upscale_attrs,
)
def dma_ifm_compute(
ifm: te.Tensor,
layout: str,
zero_point: int,
scale: float,
channels: int,
padding: Tuple[int, int, int, int],
upscale_factor: Optional[int] = 1,
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an IFM.
Parameters
----------
ifm : te.Tensor
The Input Feature Map (IFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
padding : tuple
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
upscale_factor : Optional[int]
The factor by which to apply upscaling. By default there will be no upscaling.
Returns
-------
te.Tensor
The dma-ed IFM tensor.
"""
read_ifm = read_compute(ifm, zero_point, scale, layout=layout)
convert_to_nhwc_ifm = convert_to_nhwc_compute(read_ifm, layout, channels)
upscale_ifm = upscale_compute(convert_to_nhwc_ifm, upscale_factor)
return pad_compute(upscale_ifm, padding)
def dma_ofm_compute(
ofm: te.Tensor, layout: str, zero_point: int, scale: float, channels: int, attrs: dict = None
) -> te.Tensor:
"""A sequence of compute operators representing the DMA capabilities for an OFM.
Parameters
----------
ofm : te.Tensor
The Output Feature Map (OFM) tensor.
layout : str
The layout of the data, either NHWC or NHCWB16.
zero_point : int
The zero point of the data.
scale : float
The scale of the data.
channels : int
The number of valid channels for the data.
attrs : dict, optional
Additional attributes to add to the write compute op.
Returns
-------
te.Tensor
The dma-ed OFM tensor.
"""
if not attrs:
attrs = {}
convert_to_nhcwb16_ofm = convert_to_nhcwb16_compute(ofm, layout, channels)
return write_compute(convert_to_nhcwb16_ofm, zero_point, scale, layout=layout, attrs=attrs)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expression for identity"""
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import read_compute, write_compute
def identity_compute(
ifm: te.Tensor,
lut: te.Tensor,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
activation: str,
) -> te.Tensor:
"""A compute operator for the NPU identity operator.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation is "LUT", "TANH" or "SIGMOID".
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
activation : str
The activation function to use.
"NONE" - no activation function.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
dmaed_ifm = read_compute(ifm, ifm_zero_point, ifm_scale)
id_attrs = {"op": "ethosu_identity", "activation": activation}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
id_attrs["lut"] = lut
identity = te.compute(
ifm.shape,
lambda *i: (dmaed_ifm(*i) + lut_expr).astype(ifm.dtype),
name="ethosu_identity",
attrs=id_attrs,
)
length = len(ifm.shape)
ifm_matrix = np.identity(length + 1)
offset = np.zeros(length, dtype="int64")
ifm_propagator = Propagator(
ifm_matrix,
offset.tolist(),
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
}
return write_compute(identity, ofm_zero_point, ofm_scale, attrs=propagator_attrs)
@register_matcher
def match_ethosu_identity(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU identity.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
identity = write.op.input_tensors[0]
if identity.op.name != "ethosu_identity":
return None
read = identity.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
input_tensors_shape = input_tensors[0].shape
length = len(input_tensors_shape)
assert length <= 4, "Input tensor shape must be <= 4 for the identity operator"
channels = int(input_tensors_shape[length - 1]) if length >= 3 else 1
subkernels = len(device_config.get_kernel_steps(identity.op.name, 1, 1, ifm_dtype))
input_layout = output_layout = "NHWC"
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
identity.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
1,
1,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Tensor Expressions for operations that will be inlined"""
import numpy as np # type: ignore
from tvm.contrib.ethosu.cascader import TESubgraph, InlinePart, Propagator, register_matcher
INLINE_OPS = {"T_reshape", "T_strided_slice"}
@register_matcher
def match_ethosu_inline(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an operator that will be inlined.
If the Tensor Expression matches, an InlinePart will be created that models the
matched Tensor Expression. Otherwise, None will be returned. This matcher is
naive and assumes nothing about the compute of the Tensor Expression. Therefore,
the resulting InlinePart will have full-tensor dependencies (i.e. each output
element depends on every input element).
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, InlinePart]
The created InlinePart if there was a match, otherwise None.
"""
if output_tensor.op.name not in INLINE_OPS:
return None
input_tensors = output_tensor.op.input_tensors
propagators = []
output_dims = len(output_tensor.shape)
for input_tensor in input_tensors:
input_dims = len(input_tensor.shape)
transform_matrix = np.zeros((input_dims + 1, output_dims + 1))
for i, axis in enumerate(input_tensor.shape):
transform_matrix[i, output_dims] = int(axis)
transform_matrix[input_dims, output_dims] = 1
offset_vector = np.zeros(input_dims, dtype="int64")
propagators.append(
Propagator(
transform_matrix.tolist(),
offset_vector.tolist(),
)
)
subgraph = TESubgraph(input_tensors, output_tensor)
return InlinePart(
subgraph,
propagators,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for poolings"""
from typing import Tuple
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def pooling_compute(
ifm: te.Tensor,
lut: te.Tensor,
pooling_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
pool_shape: Tuple[int, int],
ofm_channels: int,
strides: Tuple[int, int],
padding: Tuple[int, int, int, int],
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
upscale: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of pooling for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table of values to use if activation = "LUT".
pooling_type: str
The type of the pooling. "AVG" - average pool, "MAX" - max pool.
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
pool_shape : Tuple[int, int]
The 2 dimensional pool shape as (pool_shape_height, pool_shape_width).
ofm_channels : int
The number of the Output Feature Map channels
strides : Tuple[int, int]
The 2 dimensional strides as (stride_height, stride_width).
padding : Tuple[int, int, int, int]
The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
upscale : str
The 2x2 upscaling mode to apply to the Input Feature Map tensor.
"NONE" - no upscaling.
"NEAREST" - upscale using nearest neighbour.
"ZEROS" - upscale using zeros.
ifm_layout : str
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
padding = [int(v) for v in padding]
stride_h, stride_w = [int(v) for v in strides]
pool_shape_h, pool_shape_w = [int(v) for v in pool_shape]
upscale_factor = 2 if upscale != "NONE" else 1
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ofm_channels, padding, upscale_factor
)
# Pooling compute operation
ofm_height = (dmaed_ifm.shape[1] - pool_shape_h) // stride_h + 1
ofm_width = (dmaed_ifm.shape[2] - pool_shape_w) // stride_w + 1
rh = te.reduce_axis((0, pool_shape_h), name="ry")
rw = te.reduce_axis((0, pool_shape_w), name="rx")
pooling_attrs = {
"op": "ethosu_pooling",
"pooling_type": pooling_type,
"pool_shape_h": pool_shape_h,
"pool_shape_w": pool_shape_w,
"stride_h": stride_h,
"stride_w": stride_w,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
"upscale": upscale,
}
has_lut = activation in ("TANH", "LUT", "SIGMOID")
# This is a trick to insert the LUT tensor into the TE graph if LUT is present
lut_expr = (lut[0] + lut[255]).astype(ifm.dtype) if has_lut else 0
# Add the LUT tensor to the attributes to be able to later tell which tensor is the LUT
if has_lut:
pooling_attrs["lut"] = lut
pooling = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: te.max(
(dmaed_ifm(nn, hh * stride_h + rh, ww * stride_w + rw, cc) + lut_expr).astype(
ifm.dtype
),
axis=[rh, rw],
),
name="ethosu_pooling",
attrs=pooling_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ofm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (pool_shape_h - stride_h)],
[0, 0, stride_w, 0, (pool_shape_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
pooling, ofm_layout, ofm_zero_point, ofm_scale, ofm_channels, attrs=propagator_attrs
)
@register_matcher
def match_ethosu_pooling(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Pooling.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
pool2d = convert_to_nhcwb16.op.input_tensors[0]
if pool2d.op.name != "ethosu_pooling":
return None
pad = pool2d.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
# Use channels from a stage of TE graph where the IFM is always NHWC
channels = int(pool2d.shape[3])
pool_shape_h = int(pool2d.op.attrs["pool_shape_h"])
pool_shape_w = int(pool2d.op.attrs["pool_shape_w"])
subkernels = len(
device_config.get_kernel_steps(pool2d.op.name, pool_shape_h, pool_shape_w, ifm_dtype)
)
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
valid_block_configs = device_config.get_valid_block_configs(
propagators[0],
pool2d.op.attrs,
output_tensor.shape,
channels,
channels,
output_layout,
input_layout,
ifm_dtype,
ofm_dtype,
pool_shape_h,
pool_shape_w,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
subkernels,
valid_block_configs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/te/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Tensor Expressions for unary_elementwise for the NPU"""
import numpy as np
from tvm import te
from tvm.contrib.ethosu.cascader import TESubgraph, EthosuPart, Propagator, register_matcher
from .dma import dma_ofm_compute, dma_ifm_compute
from .common import get_layout_transform_matrices
def unary_elementwise_compute(
ifm: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ofm_channels: int,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ofm_layout: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of unary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the unary elementwise operator.
"ABS"
"CLZ"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ofm_channels : int
The number of OFM channels.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
Returns
-------
te.Tensor
The OFM tensor.
"""
assert ifm.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Changing the ifm and ofm scale to conform with that expected by Vela API
ofm_scale = ifm_scale / ofm_scale
ifm_scale = 1.0
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ofm_channels, (0, 0, 0, 0)
)
# Unary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
unary_elementwise_attrs = {
"op": "ethosu_unary_elementwise",
"operator_type": operator_type,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
def clz_imp(inp):
# Assuming that it's a 32 bit int
return 32 - te.log2(inp)
operators = {"ABS": te.abs, "CLZ": clz_imp}
unary_elementwise = te.compute(
(1, ofm_height, ofm_width, ofm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype)
),
name="ethosu_unary_elementwise",
attrs=unary_elementwise_attrs,
)
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(int(ofm_channels))
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {"ifm_propagator": ifm_propagator}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
unary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ofm_channels,
attrs=propagator_attrs,
)
@register_matcher
def match_ethosu_unary_elementwise(output_tensor, device_config):
"""Match a Tensor Expression corresponding to an NPU Unary Elementwise.
If the Tensor Expression matches, an EthosuPart will be created that models the
matched Tensor Expression. Otherwise, None will be returned.
Parameters
----------
output_tensor : tvm.te.Tensor
The tensor to attempt to match with.
device_config : EthosuDeviceConfig
Target device configuration
Returns
-------
Union[None, EthosuPart]
The created EthosuPart if there was a match, otherwise None.
"""
write = output_tensor
if write.op.name != "ethosu_write":
return None
convert_to_nhcwb16 = write.op.input_tensors[0]
if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
return None
unary_elementwise = convert_to_nhcwb16.op.input_tensors[0]
if unary_elementwise.op.name != "ethosu_unary_elementwise":
return None
pad = unary_elementwise.op.input_tensors[0]
if pad.op.name != "ethosu_pad":
return None
upscale = pad.op.input_tensors[0]
if upscale.op.name != "ethosu_upscale":
return None
convert_to_nhwc = upscale.op.input_tensors[0]
if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
return None
read = convert_to_nhwc.op.input_tensors[0]
if read.op.name != "ethosu_read":
return None
input_tensors = [
read.op.input_tensors[0],
]
subgraph = TESubgraph(input_tensors, output_tensor)
propagators = [
write.op.attrs["ifm_propagator"],
]
ifm_dtype = input_tensors[0].dtype
ofm_dtype = output_tensor.dtype
output_layout = convert_to_nhcwb16.op.attrs["layout"]
input_layout = convert_to_nhwc.op.attrs["layout"]
output_quantum = device_config.get_output_quantum(output_layout)
block_config = device_config.get_elementwise_block_config(
propagators[0],
None,
unary_elementwise.op.attrs,
output_tensor.shape,
output_layout,
input_layout,
None,
ifm_dtype,
ofm_dtype,
)
return EthosuPart(
subgraph,
propagators,
output_quantum,
1,
block_config,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm(R) Ethos(TM)-U NPU TIR codegen modules."""
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/binary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the binary_elementwise operators in TIR."""
from typing import Tuple
import tvm
from .utils import get_outer_loops, get_op_attrs
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialActivation, SerialBinaryElementwise
from .producers_consumers import ProducersConsumers
def ignore_cast(tir_load: tvm.tir.expr.Load) -> tvm.tir.Var:
"""When the datatype of the ifm, ifm2 and ofm do not match,
casts are inserted in TE to handle the difference in these types.
Since TIR is not directly run on the NPU we can simply ignore
these, and allow the NPU to handle the difference in datatypes
itself.
Parameters
----------
tir_load : tvm.tir.expr.Load
Returns
-------
tvm.tir.Var
"""
return tir_load.value if isinstance(tir_load, tvm.tir.Cast) else tir_load
def get_binary_elementwise_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialBinaryElementwise, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a binary_elementwise.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a binary elementwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialBinaryElementwise
The parameters needed to construct a binary elementwise operator.
output_pointer : tvm.tir.Var
The output pointer of the binary elementwise operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the binary elementwise output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
reversed_operands = attrs["reversed_operands"]
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
op = ignore_cast(inner.value)
input_pointer = ignore_cast(op.a).buffer.data
input_pointer1 = ignore_cast(op.b).buffer.data
if reversed_operands:
input_pointer, input_pointer1 = input_pointer1, input_pointer
output_pointer = inner.buffer.data
# Get feature map info
serial_ifm, _ = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ifm2, _ = get_ifm_params(input_pointer1, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
SerialBinaryElementwise(
ifm=serial_ifm,
ifm2=serial_ifm2,
ofm=serial_ofm,
operator_type=attrs["operator_type"],
reversed_operands=reversed_operands,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""The integration of the Arm(R) Ethos(TM)-U NPU TIR compiler."""
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator
from tvm.driver.build_module import schedule_to_module
from . import passes as ethosu_passes
from .scheduler import schedule
from .. import util
def lower_ethosu(sch, args, const_dict, name="main"):
"""Lower a schedule to TIR for the Arm(R) Ethos(TM)-U NPU target.
The resulting TIR module will contain a single function
that consists of a sequence of tir.call_extern to NPU
operations.
Parameters
----------
sch : tvm.te.Schedule
The schedule to be lowered.
args : Union[list of tvm.te.Tensor, TEGraph]
The input/output tensors.
const_dict : dict of int to numpy.ndarray
The constant dictionary.
name : str, optional
The name of the lowered primitive function.
Returns
-------
mod : tvm.IRModule
The lowered TIR module.
const_dict : dict of int to numpy.ndarray
The modified constant dictionary.
"""
if not isinstance(args, list):
args = list(args.inputs) + list(args.outputs)
# config setup
curr_pass_ctx = tvm.ir.transform.PassContext.current()
curr_cfg = dict()
for key, value in curr_pass_ctx.config.items():
curr_cfg[key] = value
tir_compiler_cfg = {
"tir.LoopPartition": {
"partition_const_loop": True,
"no_unroll_loop_with_extent_one": True,
},
"tir.UnrollLoop": {"auto_max_depth": -1},
"tir.noalias": True,
"tir.debug_keep_trivial_loop": True,
}
# Merge two configs
curr_cfg = {**curr_cfg, **tir_compiler_cfg}
sch = sch.normalize()
with tvm.transform.PassContext(config=curr_cfg):
mod = schedule_to_module(sch, args, name)
mod = tvm.tir.transform.Simplify()(mod)
mod = ethosu_passes.RemoveConcatenates()(mod)
mod = tvm.tir.transform.InjectRollingBuffer()(mod)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.UnrollLoop()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = ethosu_passes.RemoveZeroStores()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = ethosu_passes.ReplaceOperators()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod, const_dict = ethosu_passes.EncodeConstants(const_dict)(mod)
mod = ethosu_passes.HoistAllocates()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod, const_dict = ethosu_passes.MergeConstants(const_dict)(mod)
mod = ethosu_passes.CopyComputeReordering()(mod)
# When striping is enabled and if storage_rewrite is not run
# the striping results in incorrect code generation. This needs
# further investigation. Until such a time that is fixed, disable_storage_rewrite
# user directive will be overridden if striping is enabled.
disable_storage_rewrite = curr_cfg.get("tir.disable_storage_rewrite", False)
if not disable_storage_rewrite or util.is_striping_enabled():
mod = tvm.tir.transform.StorageRewrite()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = ethosu_passes.AnnotateAllocates()(mod)
mod, const_dict = ethosu_passes.CreatePrimFuncWithoutConstants(const_dict)(mod)
return mod, const_dict
def lower_to_te(prim_func):
"""Lower a Relay primitive function to a Tensor Expression in an unscheduled CachedFunc.
Parameters
----------
prim_func : tvm.relay.Function
The Relay function to lower.
Returns
-------
out : CachedFunc
The lowered Tensor Expression as part of a CachedFunc.
"""
f = tvm._ffi.get_global_func("relay.backend.LowerToTE")
return f(prim_func)
class ExtractConstants(ExprMutator):
"""The actual mutator pass to extract the constants from a function and replace them with
Vars so the function can be lowered to a TE graph. Additionally returns all the values of
the constants extracted."""
def __init__(self):
super().__init__()
self.constants = []
self.const_vars = []
def visit_constant(self, const):
if isinstance(const.checked_type, relay.ty.TensorType):
self.constants.append(const.data.asnumpy())
name = "p" + str(len(self.constants))
var = relay.var(type_annotation=const.checked_type, name_hint=name)
self.const_vars.append(var)
return var
return const
def visit_function(self, fn):
new_body = self.visit(fn.body)
new_params = list(fn.params) + self.const_vars
return relay.Function(new_params, new_body)
def extract_constants(self, func):
new_func = self.visit(func)
return new_func, self.constants
def extract_constants(func):
"""Extract the constants from a function and replace them with
Vars so the function can be lowered to a TE graph. Additionally
returns all the values of the constants extracted.
Parameters
----------
func : tvm.relay.Function
The Relay function from which to extract constants.
Returns
-------
new_func : tvm.relay.Function
The Relay function with constants replaced by vars.
const_dict : dict of int to numpy.ndarray
A dict of the extracted constants keyed by their param index.
"""
const_dict = {}
params = len(func.params)
new_func, consts = ExtractConstants().extract_constants(func)
for i, const in enumerate(consts):
const_dict[params + i] = const
new_func = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(new_func))["main"]
return new_func, const_dict
@util.create_npu_function_pass(opt_level=1)
class LowerToTIR:
"""A pass that lowers NPU Relay functions to TIR. This pass wraps
the _lower_to_tir pass that operates function->function, while this
is IRModule->IRModule.
Attributes
----------
scheduler : callable
A function to schedule NPU operations. For example,
scheduler.py/copy_constants.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
def transform_npu_function(self, _, func: relay.Function) -> relay.Function:
"""Lower NPU functions to TIR."""
tir_mod, const_dict = _lower_to_tir(func, self.scheduler)
for param in const_dict.keys():
const_dict[param] = tvm.nd.array(const_dict[param])
compiler_name = "ethos-u"
primfunc = tir_mod["main"]
primfunc = primfunc.with_attr("global_symbol", func.attrs["global_symbol"])
primfunc = primfunc.with_attr("ethos-u.constants", const_dict)
primfunc = primfunc.with_attr("target", tvm.target.Target(compiler_name))
return primfunc
def __call__(self, *args, **kwargs):
pass
def _lower_to_tir(func, cascader=None):
"""Lower a Relay function to TIR for the Arm(R) Ethos(TM)-U NPU target.
The Relay function should only contain operations supported
by the NPU.
Parameters
----------
func : tvm.relay.Function
The Relay function to lower.
cascader : Callable
An optional cascading function,
Returns
-------
mod : tvm.IRModule
The lowered TIR module.
consts : dict of int to numpy.ndarray
A dict of the extracted constants keyed by their param index.
"""
func, consts = extract_constants(func)
mod = tvm.IRModule.from_expr(func)
func = relay.transform.InferType()(mod)["main"]
cached_func = lower_to_te(func)
s = schedule(cached_func, consts, cascader)
mod, consts = lower_ethosu(s, cached_func, consts)
return mod, consts
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the convolution operators in TIR."""
import math
import tvm
from ethosu.vela import api as vapi
from ..vela_api import SCALE_BIAS_LENGTH, get_accelerator_config
from .utils import get_outer_loops, get_op_attrs, get_base_address, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialKernel, SerialAddressRange, SerialActivation, Serial2DConvolution
def get_conv2d_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a 2D convolution.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convolution loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
Serial2DConvolution
The parameters needed to construct a 2D convolution.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
accel_config = get_accelerator_config()
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
rc = rw.body
# loads = [output, input, weights, scale_bias, scale_bias, LUT, LUT]
loads = get_loads(rc.body)
# stores = [output]
stores = get_stores(rc.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=int(attrs["dilation_w"]),
dilation_h=int(attrs["dilation_h"]),
)
# Get scale_bias info
scale_bias_load = loads[3]
scale_bias_base = [get_base_address(index) for index in scale_bias_load.indices]
# Get weight info
weight_load = loads[2]
weight_base = [get_base_address(index) for index in weight_load.indices]
channels = serial_ofm[3] if isinstance(serial_ofm[3], int) else serial_ofm[3].value
if accel_config == vapi.NpuAccelerator.Ethos_U65_512:
scale_bias_length = SCALE_BIAS_LENGTH * math.ceil(channels / 2)
scale_bias2_length = SCALE_BIAS_LENGTH * math.floor(channels / 2)
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=scale_bias_length,
)
serial_scale_bias2 = SerialAddressRange(
address=tvm.tir.BufferLoad(
scale_bias_load.buffer, [scale_bias_base[0] + scale_bias_length]
),
length=scale_bias2_length,
)
weight_length = (
channels * serial_kernel[0] * serial_kernel[1] * math.ceil(rc.extent.value / 2)
)
weight2_length = (
channels * serial_kernel[0] * serial_kernel[1] * math.floor(rc.extent.value / 2)
)
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=weight_length,
)
serial_weight2 = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, [weight_base[0] + weight_length]),
length=weight2_length,
)
else:
scale_bias_length = SCALE_BIAS_LENGTH * channels
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=scale_bias_length,
)
# Insert -1s into the spec to denote the absence of the other pointer
serial_scale_bias2 = SerialAddressRange(
address=tvm.tir.IntImm("int8", -1),
length=tvm.tir.IntImm("int8", -1),
)
weight_length = channels * serial_kernel[0] * serial_kernel[1] * rc.extent.value
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=weight_length,
)
serial_weight2 = SerialAddressRange(
address=tvm.tir.IntImm("int8", -1),
length=tvm.tir.IntImm("int8", -1),
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
Serial2DConvolution(
ifm=serial_ifm,
ofm=serial_ofm,
kernel=serial_kernel,
weight=serial_weight,
weight2=serial_weight2,
weight_zero_point=attrs["weight_zero_point"],
scale_bias=serial_scale_bias,
scale_bias2=serial_scale_bias2,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale=attrs["upscale"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/depthwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the depthwise convolution operators in TIR."""
from typing import Tuple
import tvm
from ..vela_api import SCALE_BIAS_LENGTH
from .utils import get_outer_loops, get_op_attrs, get_base_address, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import (
SerialKernel,
SerialAddressRange,
SerialActivation,
Serial2DDepthwise,
)
from .producers_consumers import ProducersConsumers
def get_depthwise_conv2d_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[Serial2DDepthwise, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a depthwise_conv2d.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a depthwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
Serial2DDepthwise
The parameters needed to construct a 2D depthwise.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
# loads = [output, input, weights, scale_bias, scale_bias]
loads = get_loads(rw.body)
# stores = [output]
stores = get_stores(rw.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=int(attrs["dilation_w"]),
dilation_h=int(attrs["dilation_h"]),
)
# Get scale_bias info
scale_bias_load = loads[3]
scale_bias_base = [get_base_address(index) for index in scale_bias_load.indices]
serial_scale_bias = SerialAddressRange(
address=tvm.tir.BufferLoad(scale_bias_load.buffer, scale_bias_base),
length=SCALE_BIAS_LENGTH * serial_ofm[3],
)
# Get weight info
weight_load = loads[2]
weight_base = [get_base_address(index) for index in weight_load.indices]
serial_weight = SerialAddressRange(
address=tvm.tir.BufferLoad(weight_load.buffer, weight_base),
length=serial_ofm[3] * serial_kernel[0] * serial_kernel[1],
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
Serial2DDepthwise(
ifm=serial_ifm,
ofm=serial_ofm,
kernel=serial_kernel,
weight=serial_weight,
weight_zero_point=attrs["weight_zero_point"],
scale_bias=serial_scale_bias,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale="NONE",
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/dma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the DMA operators in TIR."""
from typing import NamedTuple, Union
import tvm
from .utils import get_outer_loops, get_base_address, get_strides, get_op_attrs
from .spec import SerialBlockConfig, SerialFeatureMap, SerialPadding
def get_pad_params(stmt):
"""Get the padding parameters from a pad loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a pad loop nest.
Returns
-------
pad : SerialPadding
The serializable padding.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
_, body = get_op_attrs(stmt)
n, h, w, c, _, inner = get_outer_loops(body, "NHWC")
output_pointer = inner.buffer.data
pad = SerialPadding(top=0, left=0, bottom=0, right=0)
if isinstance(inner.value, tvm.tir.Call):
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
return pad, input_pointer, output_pointer
padded_shape = [n.extent, h.extent, w.extent, c.extent]
def _visit(expr):
if isinstance(expr, tvm.tir.expr.LT):
var = expr.a
val = expr.b
if var == h.loop_var:
pad.bottom = padded_shape[1] - val
else:
pad.right = padded_shape[2] - val
elif isinstance(expr, tvm.tir.expr.LE):
var = expr.b
val = expr.a
if var == h.loop_var:
pad.top = val
else:
pad.left = val
cond = inner.value.args[0]
tvm.tir.stmt_functor.post_order_visit(cond, _visit)
return (
pad,
input_pointer,
output_pointer,
)
def get_upscale_params(stmt):
"""Get the upscale parameters from a loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of an upscale loop nest.
Returns
-------
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
_, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
if isinstance(inner.value, tvm.tir.Call):
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
return (input_pointer, output_pointer)
def get_convert_to_nhwc_params(stmt):
"""Get the true number of channels from a convert_to_nhwc loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhwc loop nest.
Returns
-------
int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, _, inner = get_outer_loops(body, "NHWC")
# Ignore the reduce sum operation inserted to ensure
# compute that is deemed uneccesary isn't removed by TVM.
if attrs["layout"] == "NHCWB16":
inner = inner.body
input_pointer = inner.value.b.buffer.data
else:
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
return c.extent, input_pointer, output_pointer
def get_convert_to_nhcwb16_params(stmt):
"""Get the true number of channels from a convert_to_nhcwb16 loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhcwb16 loop nest.
Returns
-------
out_channels : int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, b, inner = get_outer_loops(body, attrs["layout"])
output_pointer = inner.buffer.data
if isinstance(inner.value, tvm.tir.Call):
cond = inner.value.args[0]
out_channels = cond.b.value
input_pointer = inner.value.args[1].buffer.data
else:
input_pointer = inner.value.buffer.data
out_channels = c.extent * b.extent if attrs["layout"] == "NHCWB16" else c.extent
return out_channels, input_pointer, output_pointer
class Tiles(NamedTuple):
height_0: tvm.tir.expr.IntImm
height_1: tvm.tir.expr.IntImm
width_0: tvm.tir.expr.IntImm
address_0: Union[tvm.tir.expr.BufferLoad, int]
address_1: Union[tvm.tir.expr.BufferLoad, int]
address_2: Union[tvm.tir.expr.BufferLoad, int]
def create_tiles(stmt: tvm.tir.stmt.AttrStmt) -> Tiles:
"""Given an AttrStmt this function returns a Tiles instance
containing the tiles' addresses and dimensions.
When rolling buffers are not used only tile0 is used.
Otherwise, when rolling buffers are used, the statement contains
modulo arithmetic operations, which are unsupported by the NPU.
To support this scenario more than one tile is used.
In particular, when the rolling variable is the height one
tile0 and tile2 are used, otherwise, when the rolling variable
is the width one, tile0 and tile1 are used.
As an example consider this statement:
// attr [iter_var(i0, )] pragma_op = "ethosu_read"
// attr [iter_var(i0, )] pragma_zero_point = 0
// attr [iter_var(i0, )] pragma_layout = "NHCWB16"
// attr [iter_var(i0, )] pragma_scale = 1f
for (i0, 0, 1) {
for (i1, 0, 6) {
for (i2, 0, 1) {
for (i3, 0, 1) {
for (i4, 0, 16) {
ethosu_read[((i1*16) + i4)] = ethosu_write[((floormod((i1 + 4), 6)*16) + i4)]
}
}
}
}
}
You can see from the floormod expression floormod((i1 + 4), 6)
that the rolling variable is i1, that is, the height one.
In this case tile0 and tile2 are used.
The height of tile0 will be 6 - 4 = 2, and height of tile2 will be 4.
Both the width of tile0 and tile2 will be equal to the extent of the width variable.
Also, the addresses are set accordingly.
When the rolling variable is the width one a simmetric approach will be used.
It is worth mentioning that only the height of tile0, the height of tile1,
and the width of tile0 must be computed, the other ones can be inferred.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, _, _, inner = get_outer_loops(body, attrs["layout"])
base_address = [get_base_address(index) for index in inner.value.indices]
read_stmt = inner.value
floor_mod_mul = None
def _compute_stride(for_stmt):
stride = 1
while isinstance(for_stmt.body, tvm.tir.For):
for_stmt = for_stmt.body
stride *= for_stmt.extent
return stride
def _get_floor_mod_mul(stmt):
nonlocal floor_mod_mul
if (
isinstance(stmt, tvm.tir.expr.Mul)
and isinstance(stmt.b, tvm.tir.expr.IntImm)
and isinstance(stmt.a, tvm.tir.FloorMod)
and isinstance(stmt.a.b, tvm.tir.expr.IntImm)
and isinstance(stmt.a.a, tvm.tir.expr.Add)
and isinstance(stmt.a.a.a, tvm.tir.expr.Var)
and isinstance(stmt.a.a.b, tvm.tir.expr.IntImm)
):
floor_mod_mul = stmt
tvm.tir.stmt_functor.post_order_visit(read_stmt, _get_floor_mod_mul)
if floor_mod_mul is not None:
rolling_var = floor_mod_mul.a.a.a
count = 0
def _count_var(var):
nonlocal count
if var == rolling_var:
count += 1
tvm.tir.stmt_functor.ir_transform(inner, _count_var, None, ["tir.Var"])
if count == 2:
stride = floor_mod_mul.b
tile_length = floor_mod_mul.a.b - floor_mod_mul.a.a.b
if rolling_var == h.loop_var and _compute_stride(h) == stride:
return Tiles(
height_0=tile_length,
height_1=0,
width_0=w.extent,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=0,
address_2=tvm.tir.BufferLoad(inner.value.buffer, [0]),
)
if rolling_var == w.loop_var and _compute_stride(w) == stride:
return Tiles(
height_0=h.extent,
height_1=h.extent,
width_0=tile_length,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=tvm.tir.BufferLoad(inner.value.buffer, [0]),
address_2=0,
)
return Tiles(
height_0=h.extent,
height_1=0,
width_0=w.extent,
address_0=tvm.tir.BufferLoad(inner.value.buffer, base_address),
address_1=0,
address_2=0,
)
def get_read_params(stmt):
"""Get the feature map parameters from a read loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a read loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(inner.value.indices) == 1, "Ethos-U DMA expects flattened buffers"
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.value.indices[0], stride_vars)
data_type = inner.buffer.data.type_annotation.element_type.dtype
tiles = create_tiles(stmt)
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=tiles.height_0,
tile_height_1=tiles.height_1,
tile_width_0=tiles.width_0,
tile_address_0=tiles.address_0,
tile_address_1=tiles.address_1,
tile_address_2=tiles.address_2,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
input_pointer,
output_pointer,
)
def get_write_params(stmt):
"""Get the feature map parameters from a write loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a write loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer.data
output_pointer = inner.buffer.data
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(inner.indices) == 1, "Ethos-U DMA expects flattened buffers"
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.indices[0], stride_vars)
base_address = [get_base_address(index) for index in inner.indices]
data_type = inner.buffer.data.type_annotation.element_type.dtype
if "block_config_height" in attrs:
block_config = SerialBlockConfig(
height=int(attrs["block_config_height"]),
width=int(attrs["block_config_width"]),
depth=int(attrs["block_config_depth"]),
)
else:
block_config = SerialBlockConfig(0, 0, 0)
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=h.extent,
tile_height_1=0,
tile_width_0=w.extent,
tile_address_0=tvm.tir.BufferLoad(inner.buffer, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
block_config,
input_pointer,
output_pointer,
)
def get_ifm_params(pointer, producers_consumers, stmt):
"""Get the parameters associated with the DMA capabilities for an IFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the IFM DMA pipeline produces.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable IFM.
serial_padding : SerialPadding
The serializable padding.
"""
pad = producers_consumers.get_producer(pointer, stmt)
serial_padding, input_pointer, _ = get_pad_params(pad)
upscale = producers_consumers.get_producer(input_pointer, pad)
input_pointer, _ = get_upscale_params(upscale)
convert_to_nhwc = producers_consumers.get_producer(input_pointer, upscale)
in_channels, input_pointer, _ = get_convert_to_nhwc_params(convert_to_nhwc)
read = producers_consumers.get_producer(input_pointer, convert_to_nhwc)
serial_ifm, _, _ = get_read_params(read)
serial_ifm.channels = in_channels
floor_mod_stmt = None
for_stmt = None
def _get_buffer_var(stmt):
nonlocal for_stmt
nonlocal floor_mod_stmt
if isinstance(stmt, tvm.tir.For):
for_stmt = stmt
if isinstance(stmt, tvm.tir.FloorMod):
floor_mod_stmt = stmt
tvm.tir.stmt_functor.post_order_visit(stmt, _get_buffer_var)
if floor_mod_stmt is not None:
layout = get_op_attrs(read)[0]["layout"]
channels = serial_ifm.channels
if for_stmt.body.loop_var == floor_mod_stmt.a.a.a:
height_a = floor_mod_stmt.b - floor_mod_stmt.a.b
height_b = serial_ifm.height
serial_ifm.height = height_a + height_b
serial_ifm.tile_height_0 = serial_ifm.height
address = serial_ifm.tile_address_0
offset = (
height_a * (channels // 16 + 1) * serial_ifm.width * 16
if layout == "NHCWB16"
else height_a * serial_ifm.width * channels
)
serial_ifm.tile_address_0 = tvm.tir.BufferLoad(
address.buffer, [address.indices[0] - offset]
)
else:
width_a = floor_mod_stmt.b - floor_mod_stmt.a.b
width_b = serial_ifm.width
serial_ifm.width = width_a + width_b
serial_ifm.tile_width_0 = serial_ifm.width
address = serial_ifm.tile_address_0
offset = width_a * 16 if layout == "NHCWB16" else width_a * channels
serial_ifm.tile_address_0 = tvm.tir.BufferLoad(
address.buffer, [address.indices[0] - offset]
)
return serial_ifm, serial_padding
def get_ofm_params(pointer, producers_consumers, stmt):
"""Get the parameters associated with the DMA capabilities for an OFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline consumes.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable OFM.
serial_block_config : SerialBlockConfig
The serializable block config.
output_pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline produces.
is_allocator : bool
Whether this operator allocates its output.
"""
convert_to_nhcwb16 = producers_consumers.get_consumer(pointer, stmt)
out_channels, _, output_pointer = get_convert_to_nhcwb16_params(convert_to_nhcwb16)
write = producers_consumers.get_consumer(output_pointer, convert_to_nhcwb16)
serial_ofm, serial_block_config, _, output_pointer = get_write_params(write)
is_allocator = True
producer = producers_consumers.get_producer(output_pointer, write)
if producer is None or producer != write:
is_allocator = False
serial_ofm.channels = out_channels
return serial_ofm, serial_block_config, output_pointer, is_allocator
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/identity.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the identity operator in TIR."""
from typing import Tuple
import tvm
from .spec import (
SerialBlockConfig,
SerialKernel,
SerialActivation,
SerialPooling,
SerialPadding,
SerialFeatureMap,
)
from .utils import get_op_attrs, get_base_address, get_strides, get_loads
from .producers_consumers import ProducersConsumers
def _get_feature_map(stmt: tvm.tir.AttrStmt, fm_type: str) -> Tuple[SerialFeatureMap, tvm.tir.Var]:
"""Get the feature map parameters from a loop nest of any shape (as long there are at
most 4 nested loops).
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a loop nest.
fm_type: str
Either "ifm" or "ofm", depending on whether it is an input or output feature map
Returns
-------
SerialFeatureMap
The serializable feature map.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
assert fm_type in ("ifm", "ofm")
attrs, body = get_op_attrs(stmt)
loops = []
inner = body
# extact the loops and the innermost statement
while hasattr(inner, "body"):
loops.append(inner)
inner = inner.body
# If the batch size loop is present, we need to remove it
if len(loops) > 3:
assert loops[0].extent == 1
loops = loops[1:]
fm_inner = inner.value if fm_type == "ifm" else inner
# Needed for stride calculation, can replace with
# inner.value.buffer.strides in future.
assert len(fm_inner.indices) == 1, "Ethos-U passes expect flattened buffers"
stride_vars = [l.loop_var for l in loops]
strides = get_strides(fm_inner.indices[0], stride_vars)
base_address = [get_base_address(index) for index in fm_inner.indices]
data_type = inner.buffer.data.type_annotation.element_type.dtype
serial_feature_map = SerialFeatureMap(
data_type=data_type,
height=loops[0].extent,
width=loops[1].extent if len(loops) > 1 else 1,
channels=loops[2].extent if len(loops) > 2 else 1,
tile_height_0=loops[0].extent,
tile_height_1=0,
tile_width_0=loops[1].extent if len(loops) > 1 else 1,
tile_address_0=tvm.tir.BufferLoad(fm_inner.buffer, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout="NHWC",
stride_h=strides[0] if len(strides) > 0 else 1,
stride_w=strides[1] if len(strides) > 1 else 1,
stride_c=strides[2] if len(strides) > 2 else 1,
)
output_pointer = inner.buffer.data
return serial_feature_map, output_pointer
def get_identity_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialPooling, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for an identity pooling.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of an identity pooling loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialPooling
The parameters needed to construct a 2D pooling.
output_pointer : tvm.tir.Var
The output pointer of the pooling operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the pooling output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, _ = get_op_attrs(stmt)
# Find the inner loop
store = stmt
while hasattr(store, "body"):
store = store.body
# loads = [input, LUT, LUT]
loads = get_loads(store)
input_pointer = loads[0].buffer.data
output_pointer = store.buffer.data
read = producers_consumers.get_producer(input_pointer, stmt)
write = producers_consumers.get_consumer(output_pointer, stmt)
serial_ifm, _ = _get_feature_map(read, "ifm")
serial_ofm, write_output_pointer = _get_feature_map(write, "ofm")
replace_pointer = write_output_pointer
is_allocator = True
producer = producers_consumers.get_producer(write_output_pointer, write)
if producer is None or producer != write:
is_allocator = False
# TODO: We might want to support stand alone ReLU in the future by adding clip_min and
# clip max attributes to the identity operator
serial_activation = SerialActivation(op=attrs["activation"], clip_min=0, clip_max=0)
# Create a serialized identity pooling to be run on the NPU
return (
SerialPooling(
ifm=serial_ifm,
ofm=serial_ofm,
pooling_type="AVG",
pool_shape=SerialKernel(1, 1, 1, 1, 1, 1),
padding=SerialPadding(0, 0, 0, 0),
activation=serial_activation,
upscale="NONE",
rounding_mode="TFL",
block_config=SerialBlockConfig(0, 0, 0),
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/passes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, inconsistent-return-statements, too-many-nested-blocks
"""The TIR passes to be run on Arm(R) Ethos(TM)-U NPU TIR Compiler."""
from collections import namedtuple
from typing import Optional
import numpy as np # type: ignore
import tvm
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator as tirtocs
from ethosu.vela import api as vapi
from .convolution import get_conv2d_params
from .depthwise import get_depthwise_conv2d_params
from .pooling import get_pooling_params
from .binary_elementwise import get_binary_elementwise_params
from .identity import get_identity_params
from .unary_elementwise import get_unary_elementwise_params
from .transform import get_copy_params
from .producers_consumers import ProducersConsumers
from .. import _ffi_api
def RemoveZeroStores():
"""This pass removes stores which just store zero to initialise buffers.
We don't codegen these stores and they otherwise considerably reduce
the simplicity of the static traversal of convolution."""
def _remove_zero_store(stmt):
if isinstance(stmt.value, tvm.tir.IntImm) and int(stmt.value) == 0:
return tvm.tir.Evaluate(tvm.tir.IntImm("uint8", 0))
return stmt
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _remove_zero_store, None, ["tir.Store"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.remove_zero_stores"
)
def ReplaceOperators():
"""Replace operators represented as explicit loop nests with call_externs
to NPU operators."""
op_map = {
"ethosu_conv2d": get_conv2d_params,
"ethosu_copy": get_copy_params,
"ethosu_depthwise_conv2d": get_depthwise_conv2d_params,
"ethosu_pooling": get_pooling_params,
"ethosu_binary_elementwise": get_binary_elementwise_params,
"ethosu_identity": get_identity_params,
"ethosu_unary_elementwise": get_unary_elementwise_params,
}
producers_consumers = ProducersConsumers()
replace_output_pointer = {}
pointer_to_extents = {}
ReplaceInfo = namedtuple("ReplaceInfo", ["pointer", "reallocate"])
def _find_pointer_to_extent(stmt):
if isinstance(stmt, tvm.tir.Allocate):
pointer_to_extents[stmt.buffer_var] = stmt.extents
def _resolve_pointers(stmt):
"""This pass determines information about the pointers present in the IR.
In particular, it associates pointers with both the operations that
produce them and the operations that consume them through the
pointer_to_producer and pointer_to_consumer dicts.
Additionally, it determines the extent (size/shape) of each pointer which
is required for the _replace_pointers pass which runs later."""
loads = []
def _get_loads(stmt):
if isinstance(stmt, tvm.tir.BufferLoad):
loads.append(stmt.buffer.data)
buffer_var = None
def _get_buffer_var(stmt):
if isinstance(stmt, tvm.tir.BufferStore):
nonlocal buffer_var
buffer_var = stmt.buffer.data
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "pragma_op":
tvm.tir.stmt_functor.post_order_visit(stmt, _get_buffer_var)
producers_consumers.add_producer(buffer_var, stmt)
tvm.tir.stmt_functor.post_order_visit(stmt, _get_loads)
for load_pointer in loads:
if load_pointer != buffer_var:
producers_consumers.add_consumer(load_pointer, stmt)
def _replace_operator(stmt):
"""Replace operators with call_externs, having derived the parameters
from the relevant TIR expressions/statements.
Note the complexity of this pass is mostly from the concept of 'replace
pointers'. A call_extern may in principle require information from several
loop nests in TIR (each corresponding to a different TE compute op). For
example, a convolution operator will have other TE compute ops before and
after corresponding to the input/output DMA functionality. Therefore, when
the 'central' convolution op is replaced with a call_extern, the memory
from the final DMA output op must be hoisted to the location/scope of
the call_extern.
The is done by replacing the pointer corresponding to the current operation
with the 'true' output operator through the replace_output_pointer dict.
Because of this, the param_func must provide a replace_pointer if the op
isn't the true output but instead a no_compile op is."""
if isinstance(stmt, tvm.tir.AttrStmt):
op_name = stmt.value.value
if stmt.attr_key == "pragma_op" and op_name in op_map:
# Get the parameters for the extern call
param_func = op_map[op_name]
info, output_pointer, replace_pointer, is_allocator = param_func(
stmt, producers_consumers
)
if replace_pointer is not None:
replace_output_pointer[output_pointer] = ReplaceInfo(
replace_pointer, is_allocator
)
# Make the extern call
irb = tvm.tir.ir_builder.create()
irb.emit(tvm.tir.call_extern("handle", op_name, *info))
return irb.get()
return None
def _remove_no_compile(stmt):
"""Certain operators are marked as 'no compile' operators. This means they
should be removed from the IR as they are compiled as part of other operators.
The IFM DMA operations are an example of this, as they don't get compiled
independently but instead get compiled into the operator they're associated with,
e.g. a conv2d.
There are potentially 2 parts to remove for an operator:
the allocate for its output and the compute nest itself. For the
allocate, we can check if the pointer they reference is produced by a 'no compile'
operator. For the compute nest, we can just check the op pragma."""
if isinstance(stmt, tvm.tir.AttrStmt):
# Remove compute nests
if stmt.attr_key == "pragma_op" and stmt.value.value not in op_map:
return tvm.tir.Evaluate(0)
if isinstance(stmt, tvm.tir.Allocate):
# Remove allocates
producer = producers_consumers.get_last_producer(stmt.buffer_var)
if producer:
if producer.attr_key == "pragma_op" and producer.value.value not in op_map:
return stmt.body
return None
def _replace_pointers(stmt):
if isinstance(stmt, tvm.tir.Allocate):
# If the allocate allocates a pointer that needs replacing
if stmt.buffer_var in replace_output_pointer:
replace_pointer, reallocate = replace_output_pointer[stmt.buffer_var]
if not reallocate:
return stmt.body
# Otherwise, rewrite the allocation statement with the new pointer
# and the new extent
replace_type = replace_pointer.type_annotation.element_type.dtype
replace_extents = pointer_to_extents[replace_pointer]
return tvm.tir.Allocate(
replace_pointer, replace_type, replace_extents, stmt.condition, stmt.body
)
return None
def _post_transform(stmt):
# Replace operators with call_externs
result = _replace_operator(stmt)
# Remove operators that don't need compiling
result = result or _remove_no_compile(stmt)
# Replace necessary pointers that were removed in the previous step
return result or _replace_pointers(stmt)
def _ftransform(f, mod, ctx):
tvm.tir.stmt_functor.post_order_visit(f.body, _find_pointer_to_extent)
tvm.tir.stmt_functor.post_order_visit(f.body, _resolve_pointers)
producers_consumers.add_allocate_variables(pointer_to_extents.keys())
return f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body, None, _post_transform, ["tir.AttrStmt", "tir.Allocate"]
)
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.replace_operators"
)
def DivideConstants(const_dict):
"""This pass rewrites the IR and constant dict such that all constant
accesses are at 0 offset and full length (i.e. they read the whole buffer).
Where necessary, new constants are created in order to ensure the rewrite
can take place. As an example, if a convolution is tiled along the channels
axis, the accesses to the weights will need to be offset. This pass will
create new constants consisting of 'slices' of the weights so each tile
of the compute can access one of these 'slices'.
The purpose of this pass is to transform the IR into a form we can apply
constant encoding to (which will compress weights and encode biases)."""
buffer_to_const = {} # type: ignore
new_buffers = []
new_consts = []
keep_buffers = set()
new_const_dict = {}
def _visit(stmt):
new_args = []
# We don't want to divide the constant that will be executed on two cores in parallel
is_u65_conv2d = (
vela_api.get_accelerator_config() == vapi.NpuAccelerator.Ethos_U65_512
and stmt.args[0] == "ethosu_conv2d"
)
for i, arg in enumerate(stmt.args):
if isinstance(arg, tvm.tir.expr.BufferLoad):
# If we're trying to load a buffer that maps to a constant
if arg.buffer.data in buffer_to_const:
const = buffer_to_const[arg.buffer.data]
flattened_const_shape = np.prod(const.shape)
offset = int(arg.indices[0])
# Note by convention the arg after a constant read is the length of the read
length = int(stmt.args[i + 1])
# If it's anything other than a full read, create a new buffer
if (offset != 0 or flattened_const_shape != length) and not is_u65_conv2d:
out_channels = const.shape[0]
offset_channels = int((offset * out_channels) / flattened_const_shape)
length_channels = int((length * out_channels) / flattened_const_shape)
# split the constant up across channels
split_const = np.split(const, out_channels, axis=0)
# create a new const out of the channels we want to keep
new_const = np.concatenate(
split_const[offset_channels : offset_channels + length_channels], axis=0
)
new_consts.append(new_const)
new_buffer = tvm.tir.decl_buffer(
(length,), arg.dtype, scope=arg.buffer.scope()
)
new_buffers.append(new_buffer)
new_args.append(tvm.tir.expr.BufferLoad(new_buffer, [0]))
continue
keep_buffers.add(arg.buffer.data)
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
def _ftransform(f, mod, ctx):
for i, param in enumerate(f.params):
if i in const_dict:
buffer_to_const[param] = const_dict[i]
buffer_to_const[f.buffer_map[param].data] = const_dict[i]
new_body = tvm.tir.stmt_functor.ir_transform(f.body, _visit, None, ["tir.Call"])
# Both the params and buffer map need updating for the newly introduced buffers
new_params = [] # type: ignore
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
pointer = buffer.data
if pointer in buffer_to_const:
if pointer not in keep_buffers:
continue
new_const_dict[len(new_params)] = const_dict[i]
new_params.append(param)
new_buffer_map[param] = buffer
for i, new_buffer in enumerate(new_buffers):
handle = tvm.tir.Var("placeholder", "handle")
new_params.append(handle)
new_buffer_map[handle] = new_buffer
new_const_dict[len(new_params) - 1] = new_consts[i]
new_f = tvm.tir.PrimFunc(
new_params,
new_body,
f.ret_type,
new_buffer_map,
f.preflattened_buffer_map,
f.attrs,
f.span,
)
return new_f
def _divide_constants(mod):
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.divide_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _divide_constants
def EncodeConstants(const_dict):
"""the NPU requires that weights are compressed and bias/scales are 'encoded', both
of which are performed by this pass.
This pass modifies both the constant dict to contain the post-encoding values of the
constants and the IR to adjust buffer types/sizes/accesses so they align with the
encoded constants. Calls to the Vela API are made to perform the actual compression/
encoding.
"""
new_const_dict = {}
def collect_encoding_definitions(stmt, old_buffer_to_const):
# Map from copy destination to copy source.
copy_map = {}
# List of buffer copies that occurred
copied_buffers = []
# List of encoded buffer information
constant_buffer_replacements = []
def _align_scale_bias(tir_extern_call, bias):
"""Align the scale_bias to 16 bytes."""
value_bytes = bytearray()
value_bytes.extend(bias.tobytes())
# Align to 16
remainder = (len(value_bytes)) % 16
if remainder > 0:
value_bytes.extend(bytearray(16 - remainder))
value = np.frombuffer(value_bytes, dtype="uint8")
return value
accel_config = vela_api.get_accelerator_config()
def _encode_weights(tir_extern_call, weights):
"""Encode the weights for a TIR extern call."""
value_bytes = vela_api.encode_weights(tir_extern_call, weights, accel_config)
value = np.frombuffer(value_bytes, dtype="uint8")
return value
def _declare_constant_buffer(old_buffer, encoded_constants, split_idx):
"""Create a new buffer and add the old buffer and its pointer to the
rewriting maps."""
new_buffer = tvm.tir.decl_buffer(
shape=[len(encoded_constants)],
dtype=str(encoded_constants.dtype),
name=old_buffer.name + "_encoded",
scope=old_buffer.scope(),
)
constant_buffer_replacements.append(
{
"old_buffer": old_buffer,
"new_buffer": new_buffer,
"encoded_constants": encoded_constants,
"split_idx": split_idx,
}
)
def _encode_weights_or_bias(buffer1, buffer2, stmt, encode_func):
"""Encode the weights or align the bias either for one or two cores,
depending on the variant."""
constant = old_buffer_to_const[buffer1]
# If we have just one core, encode the whole constant
if buffer2 is None:
new_const = encode_func(stmt, constant)
return new_const, None
# Assume that the constant tensor has not been flattened yet
assert len(constant.shape) != 1
channels = constant.shape[0]
split_const = np.split(constant, channels, axis=0)
const_list = [split_const[i] for i in range(channels) if i % 2 == 0]
const_to_encode = np.concatenate(const_list, axis=0)
new_const = encode_func(stmt, const_to_encode)
split_idx = len(new_const)
# Encode half of the constant separately for the other core if it exists
assert buffer1.same_as(buffer2)
const2_list = [split_const[i] for i in range(channels) if i % 2 == 1]
const2_to_encode = np.concatenate(const2_list, axis=0)
new_const2 = encode_func(stmt, const2_to_encode)
new_const = np.append(new_const, new_const2).astype("uint8")
return new_const, split_idx
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
op = str(stmt.args[0].value)
# Handle copies as a special-case by propagating the buffer information
# from the read to the write pointer.
if op == "ethosu_copy":
read_buffer = stmt.args[1].buffer
write_buffer = stmt.args[3].buffer
# Assert writing to the base of the write_var (pre-StorageRewrite)
assert list(stmt.args[3].indices) == [0]
assert list(stmt.args[1].indices) == [0]
copied_buffers.append({"source": read_buffer, "dest": write_buffer})
copy_map[write_buffer] = read_buffer
ops_with_weights = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
if op in ops_with_weights:
npu_op, _ = ops_with_weights[op](stmt)
# Encode the weights
weights_buffer = npu_op.weights[0].address.buffer
if weights_buffer in copy_map:
weights_buffer = copy_map[weights_buffer]
# In case of U65 512 mac variant the weights are split across two cores
# and need to be encoded separately
weights2_buffer = (
npu_op.weights[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if weights2_buffer in copy_map:
weights2_buffer = copy_map[weights2_buffer]
new_weights, split_idx = _encode_weights_or_bias(
weights_buffer, weights2_buffer, stmt, _encode_weights
)
_declare_constant_buffer(weights_buffer, new_weights, split_idx)
# Align the scale_bias to 16 bytes
scale_bias_buffer = npu_op.biases[0].address.buffer
if scale_bias_buffer in copy_map:
scale_bias_buffer = copy_map[scale_bias_buffer]
scale_bias2_buffer = (
npu_op.biases[1].address.buffer
if accel_config == vapi.NpuAccelerator.Ethos_U65_512
else None
)
if scale_bias2_buffer in copy_map:
scale_bias2_buffer = copy_map[scale_bias2_buffer]
new_scale_bias, split_idx = _encode_weights_or_bias(
scale_bias_buffer, scale_bias2_buffer, stmt, _align_scale_bias
)
_declare_constant_buffer(scale_bias_buffer, new_scale_bias, split_idx)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return {
"copied_buffers": copied_buffers,
"constant_buffer_replacements": constant_buffer_replacements,
}
def transform_stmt(
stmt, buf_remap, var_remap, pointer_to_buffer, new_buffer_to_const, new_buffer_to_split_idx
):
def _visit_rewrite(stmt):
if isinstance(stmt, tvm.tir.Call):
# For extern calls, we need to rewrite pairs of arguments corresponding to
# base address load and the length of the load.
old_args = list(stmt.args)
new_args = [stmt.args[0]]
for prev_arg, arg in zip(old_args[:-1], old_args[1:]):
# If the previous argument was a load from an
# encoded buffer, the current should be a length.
if (
isinstance(prev_arg, tvm.tir.BufferLoad)
and prev_arg.buffer in new_buffer_to_const
):
buffer_size = np.prod(list(prev_arg.buffer.shape))
arg = buffer_size
# We have to check for split weights/bias for conv2d and depthwise_conv2d
if old_args[0] in ("ethosu_conv2d", "depthwise_conv2d"):
# We have split weights/bias
if prev_arg.buffer in new_buffer_to_split_idx:
split_idx = new_buffer_to_split_idx[prev_arg.buffer]
# The first half of the split buffer
if prev_arg.indices[0] == 0:
arg = split_idx
# the second half of the split buffer
else:
arg = buffer_size - split_idx
new_args.append(arg)
return tvm.tir.Call(stmt.dtype, stmt.op, new_args, stmt.span)
if isinstance(stmt, tvm.tir.Allocate):
# Where a pointer needs rewriting, the allocate for it must be rewritten
allocate_pointer = stmt.buffer_var
if allocate_pointer in var_remap:
new_allocate_pointer = var_remap[allocate_pointer]
new_buffer = pointer_to_buffer[new_allocate_pointer]
return tvm.tir.Allocate(
new_buffer.data,
new_buffer.dtype,
new_buffer.shape,
stmt.condition,
stmt.body,
stmt.span,
)
# The following rewrites would be better expressed by just
# rewriting the Buffers. However ir_transform doesn't
# visit Buffers, so instead we do the next best thing and
# rewrite the nodes which contain the Buffers.
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buf_remap:
new_buffer = buf_remap[stmt.buffer]
new_indices = stmt.indices
offset = new_indices[0]
if offset != 0 and new_buffer in new_buffer_to_split_idx:
offset = new_buffer_to_split_idx[new_buffer]
return tvm.tir.BufferLoad(buf_remap[stmt.buffer], [offset], stmt.span)
if isinstance(stmt, tvm.tir.AttrStmt):
node_pointer = stmt.node
if node_pointer in var_remap:
return tvm.tir.AttrStmt(
var_remap[node_pointer],
stmt.attr_key,
stmt.value,
stmt.body,
stmt.span,
)
return None
return tvm.tir.stmt_functor.ir_transform(
stmt,
None,
_visit_rewrite,
["tir.Call", "tir.Allocate", "tir.BufferLoad", "tir.AttrStmt"],
)
def _ftransform(f, mod, ctx):
# Step 0: Unpack the constant dictionary in terms of the
# functions buffers.
old_buffer_to_const = {}
for i, param in enumerate(f.params):
if i in const_dict:
old_buffer_to_const[f.buffer_map[param]] = const_dict[i]
# Step 1: Collect information on the buffers that will be
# replaced by encodings.
buffer_information = collect_encoding_definitions(f.body, old_buffer_to_const)
# Step 2: Generate variable/buffer remaps, based on the
# collected information.
buf_remap = {}
new_buffer_to_const = {}
new_buffer_to_split_idx = {}
# Any encoded buffers must be replaced
for info in buffer_information["constant_buffer_replacements"]:
buf_remap[info["old_buffer"]] = info["new_buffer"]
new_buffer_to_const[info["new_buffer"]] = info["encoded_constants"]
if info["split_idx"]:
new_buffer_to_split_idx[info["new_buffer"]] = info["split_idx"]
# Any buffers that are copied into from an encoded buffer must
# be replaced.
for info in buffer_information["copied_buffers"]:
copy_source = info["source"]
while copy_source in buf_remap:
copy_source = buf_remap[copy_source]
copy_dest = info["dest"]
if copy_source.shape != copy_dest.shape or copy_source.dtype != copy_dest.dtype:
new_dest = tvm.tir.decl_buffer(
shape=copy_source.shape,
dtype=copy_source.dtype,
name=copy_dest.name,
scope=copy_dest.scope(),
)
buf_remap[copy_dest] = new_dest
if copy_source in new_buffer_to_const:
new_buffer_to_const[new_dest] = new_buffer_to_const[copy_source]
if copy_source in new_buffer_to_split_idx:
new_buffer_to_split_idx[new_dest] = new_buffer_to_split_idx[copy_source]
# Define additional dependent lookup tables.
var_remap = {old.data: new.data for (old, new) in buf_remap.items()}
pointer_to_buffer = {
buf.data: buf for (old, new) in buf_remap.items() for buf in [old, new]
}
# Step 3: Then perform the rewrites
new_body = transform_stmt(
f.body,
buf_remap,
var_remap,
pointer_to_buffer,
new_buffer_to_const,
new_buffer_to_split_idx,
)
# Step 4: Rewrite the buffer map and const dict to instead use the encoded versions
new_buffer_map = {}
for i, param in enumerate(f.params):
buffer = f.buffer_map[param]
if buffer in buf_remap:
buffer = buf_remap[buffer]
if buffer in new_buffer_to_const:
new_const_dict[i] = new_buffer_to_const[buffer].flatten()
elif buffer in old_buffer_to_const:
new_const_dict[i] = old_buffer_to_const[buffer].flatten()
new_buffer_map[param] = buffer
new_f = tvm.tir.PrimFunc(
f.params,
new_body,
f.ret_type,
new_buffer_map,
f.preflattened_buffer_map,
f.attrs,
f.span,
)
return new_f
def _encode_constants(mod):
mod, divided_const_dict = DivideConstants(const_dict)(mod)
const_dict.clear()
for key, value in divided_const_dict.items():
const_dict[key] = value
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.encode_constants"
)
new_func = transform_func(mod)
return new_func, new_const_dict
return _encode_constants
# This need to be kept in sync with kDisableLowerTVMBuiltin in include/tvm/tir/transform.h
DISABLE_LOWER_BUILTIN = "disable_lower_builtin"
def AnnotateAllocates():
"""
This is pass to annotate all allocate
nodes of the PrimFuncs of the microNPU
to be not lowered to built-ins.
"""
def _post_transform(allocate):
return tvm.tir.Allocate(
buffer_var=allocate.buffer_var,
dtype=allocate.dtype,
extents=allocate.extents,
condition=allocate.condition,
body=allocate.body,
annotations={DISABLE_LOWER_BUILTIN: True},
)
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, _post_transform, ["tir.Allocate"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.annotate_allocates"
)
def RemoveConcatenates():
"""Remove concatenate operators by modifying the input buffers to write directly into
the concatenated buffer with the appropriate offset.
This pass works in two stages. The first finds every concatenate operation (marked by
pragma_op = ethosu_concatenate) and it performs the following analysis. For each buffer
that is concatenated, the buffer is marked that it is to be replaced with the concat
buffer and the axis along which it is concatenated as well as the offset along that
axis is recorded in 'ReplaceInfo'. Once this analysis is completed, the concatenate
loop nest along with its buffer realization statements are removed.
In the second stage, the input buffers to the concatenate operators are rewritten
to use the concat buffer directly. This means applying the correct offset to the
concatenation axis where ever the buffer is loaded or stored. Additionally, as the
realization statements for the concat buffers were removed in the first stage, they
are rewritten in place of the input buffer realization with the earliest liveness."""
in_concat = [False] # Whether the visitor is currently inside a concatenate operator
concat_buffers = [] # The buffers produced by concatenate operators
buffer_replace_map = {} # A map of buffers to be replaced with the concat buffer
attrs_by_buffer = {} # AttrStmts by the buffer they reference
realizes_by_buffer = {} # BufferRealize statements by the buffer they reference
first_replacements = {} # The first buffers to be replaced by a given concat buffer
ReplaceInfo = namedtuple("ReplaceInfo", ["buffer", "axis", "offset"])
def _get_replace_info(buffer_load, concat_buffer):
axis = 0
offset = 0
dmap = dict()
for i, index in enumerate(buffer_load.indices):
if isinstance(index, tvm.tir.Sub):
axis = i
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = tvm.arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
offset = abs(int(tvm.arith.Analyzer().int_set(index, dmap).max_value))
return ReplaceInfo(concat_buffer, axis, offset)
def _pre_remove(stmt):
if isinstance(stmt, tvm.tir.BufferRealize):
# Record the realize statements by buffer as we need to hoist some of these
realizes_by_buffer[stmt.buffer] = stmt
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "realize_scope" and isinstance(stmt.node, tvm.tir.Buffer):
# Record the realize_scope attrs by buffer as we need to hoist some of these
attrs_by_buffer[stmt.node] = stmt
if stmt.attr_key == "pragma_op" and stmt.value.value == "ethosu_concatenate":
# Record that we're entering a concatenate loop nest
in_concat[0] = True
if isinstance(stmt, tvm.tir.BufferLoad) and in_concat[0]:
# Any buffer loaded inside a concat is a buffer we intend to replace with this pass.
# The buffer_replace_map keeps track of which buffers need replacing with the
# concat buffer.
replace_info = _get_replace_info(stmt, concat_buffers[-1])
buffer_replace_map[stmt.buffer] = replace_info
if isinstance(stmt, tvm.tir.BufferStore) and in_concat[0]:
# If we're inside a concat, the BufferStore indicates what the concat buffer is
concat_buffers.append(stmt.buffer)
def _post_remove(stmt):
if isinstance(stmt, tvm.tir.AttrStmt):
if isinstance(stmt.node, tvm.tir.Buffer) and stmt.node in concat_buffers:
return stmt.body
if stmt.attr_key == "pragma_op" and stmt.value.value == "ethosu_concatenate":
# When we leave a concatenate operator, record it and then remove the loop nest
in_concat[0] = False
return tvm.tir.Evaluate(0)
if isinstance(stmt, tvm.tir.BufferRealize):
if stmt.buffer in concat_buffers:
return stmt.body
return None
def _pre_replace(stmt):
if isinstance(stmt, (tvm.tir.BufferLoad, tvm.tir.BufferStore)):
# The first buffer referenced that needs replacing with a concat buffer shall
# be the one that the concat buffer realize is hoisted to.
if stmt.buffer in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.buffer].buffer
if concat_buffer not in first_replacements:
first_replacements[concat_buffer] = stmt.buffer
def _post_replace(stmt):
if isinstance(stmt, tvm.tir.BufferStore):
if stmt.buffer in buffer_replace_map:
# Replace the original buffer store with a new one into the concat buffer
# and adjust the indices accordingly to account for the offset
replace_info = buffer_replace_map[stmt.buffer]
concat_buffer = replace_info.buffer
new_indices = list(stmt.indices)
new_indices[replace_info.axis] += replace_info.offset
# The new buffer store node that stores the tensor directly into the concat buffer
new_store = tvm.tir.BufferStore(concat_buffer, stmt.value, new_indices, stmt.span)
return new_store
if isinstance(stmt, tvm.tir.BufferLoad):
if stmt.buffer in buffer_replace_map:
# Replace the original buffer load with a new one into the concat buffer
# and adjust the indices accordingly to account for the offset
replace_info = buffer_replace_map[stmt.buffer]
concat_buffer = replace_info.buffer
new_indices = list(stmt.indices)
new_indices[replace_info.axis] += replace_info.offset
new_load = tvm.tir.BufferLoad(concat_buffer, new_indices, stmt.span)
return new_load
if isinstance(stmt, tvm.tir.BufferRealize):
if stmt.buffer in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.buffer].buffer
# If this isn't the first buffer replaced, don't hoist the realize
if first_replacements[concat_buffer] != stmt.buffer:
return stmt.body
# Otherwise, do hoist it
else:
concat_realize = realizes_by_buffer[concat_buffer]
new_realize = tvm.tir.BufferRealize(
concat_realize.buffer,
concat_realize.bounds,
concat_realize.condition,
stmt.body,
stmt.span,
)
return new_realize
if isinstance(stmt, tvm.tir.AttrStmt):
if isinstance(stmt.node, tvm.tir.Buffer) and stmt.node in buffer_replace_map:
concat_buffer = buffer_replace_map[stmt.node].buffer
# If this isn't the first buffer replaced, don't hoist the attrstmt
if first_replacements[concat_buffer] != stmt.node:
return stmt.body
# Otherwise, do hoist it
else:
concat_attr = attrs_by_buffer[concat_buffer]
new_attr = tvm.tir.AttrStmt(
concat_attr.node,
concat_attr.attr_key,
concat_attr.value,
stmt.body,
stmt.span,
)
return new_attr
def _ftransform(f, mod, ctx):
f = f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body,
_pre_remove,
_post_remove,
["tir.AttrStmt", "tir.BufferLoad", "tir.BufferStore", "tir.BufferRealize"],
)
)
return f.with_body(
tvm.tir.stmt_functor.ir_transform(
f.body,
_pre_replace,
_post_replace,
["tir.AttrStmt", "tir.BufferLoad", "tir.BufferStore", "tir.BufferRealize"],
)
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.remove_concatenates"
)
def CreatePrimFuncWithoutConstants(const_dict):
"""
This pass will remove arguments that are constants
from PrimFunc Args. These should be replaced properly
with tir.allocate_const when it becomes available.
It also modifies the constant dictionary to
rewrite the keys as the actual tir.Vars that are params
rather than the index because this pass removes PrimFunc
arguments that represent constants.
"""
new_const_dict = dict()
def _ftransform(f, mod, ctx):
new_params = list()
new_buffer_map = dict()
new_preflattened_buffer_map = dict()
for param_idx in const_dict.keys():
# We are using buffer_var to key the constants as
# PrimFunc params of constants will be removed.
new_const_dict[f.buffer_map[f.params[param_idx]].data] = const_dict[param_idx]
for i, param in enumerate(f.params):
if i not in const_dict.keys():
new_params.append(param)
new_buffer_map[param] = f.buffer_map[param]
if param in f.preflattened_buffer_map:
new_preflattened_buffer_map[param] = f.preflattened_buffer_map[param]
return tvm.tir.PrimFunc(
new_params,
f.body,
f.ret_type,
new_buffer_map,
new_preflattened_buffer_map,
f.attrs,
f.span,
)
def _create_primfunc_without_constants(mod):
transform_func = tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.contrib.ethos-u.CreatePrimFuncWithoutConstants"
)
mod = transform_func(mod)
return mod, new_const_dict
return _create_primfunc_without_constants
def HoistAllocates() -> tvm.IRModule:
"""
Hoist allocate nodes up to the top of the body of the main function.
Returns
-------
tvm.IRModule
The new module with hoisted allocate nodes.
"""
return _ffi_api.HoistAllocates()
def CopyComputeReordering(
max_copy_movements: Optional[int] = None, reorder_by_cycles: Optional[bool] = None
) -> tvm.IRModule:
"""
Reorders copy and compute nodes in such a way that independent DMA copies
and computes happen in parallel.
Copies to buffers with local scope are not reordered since they copy LUT
into the SHRAM and that already happens in parallel with copying weights into
the weights encoder.
If reorder_by_cycles is set, we use the compute_cycles_hint to decide the reordering. If it is
not set, we move the copies up by a fixed number of movements, either by max_copy_movements if
it is specified, or by default value of 1.
If reordering based on the cycle count is enabled, we try to achieve further copy latency
hiding with a two step algorithm:
(1) Move all the global copies (i.e. copies that copy a constant into SRAM for conv2d or
depthwise_conv2d) above a preceding compute op. If in general the computes take longer than
copies, this should be enough to hide the copy latencies.
(2) If there are some global copies that take longer than the computes, we might be able to
hide them further by moving them further up in a graph since in general there are more compute
ops than copy ops in a graph (as only conv2d and depthwise_conv2d have constants associated
with them). The algortithm checks whether a copy is hidden and if it is not, it checks if a
preceding compute op has a preceding copy and if it doesn't it moves the copy that we try to
hide further up. It keeps moving the copy until it can't move it any further or until the
latency is hidden.
Parameters
----------
max_copy_movements: Optional[int]
The maximum number of movements allowed for a copy.
If None, the pass context option
tir.contrib.ethos-u.copy_compute_reordering_max_copy_movements
is used if provided, otherwise the default value will be 1.
reorder_by_cycles: Optional[bool]
Whether to reorder the computes and copies based on the cycle hint.
If None, the pass context option
tir.contrib.ethos-u.copy_compute_reordering_reorder_by_cycles
is used if provided, otherwise the default value will be False.
Returns
-------
tvm.IRModule
The new module with copy and compute nodes reordered.
"""
return _ffi_api.CopyComputeReordering(max_copy_movements, reorder_by_cycles)
def MergeConstants(const_dict):
"""
This pass looks for the constants used by each compute operator
and merges them into a single buffer.
Constants written to a buffer with local scope are not merged.
"""
def _merge_constants(mod):
nonlocal const_dict
try:
mod["main"]
except:
raise tvm.TVMError(
"Expected a single primitive function called 'main'. "
"Please run the MergeConstants pass in conjunction with the LowerToTIR() pass."
)
new_const_dict = {}
for param in const_dict.keys():
new_const_dict[tvm.tir.IntImm("int64", param)] = tvm.nd.array(const_dict[param])
mod["main"] = mod["main"].with_attr("ethos-u.const_dict", new_const_dict)
mod = _ffi_api.MergeConstants()(mod)
const_dict = mod["main"].attrs["ethos-u.const_dict"]
mod = _ffi_api.RemoveConstDictAttribute()(mod)
new_const_dict = {}
for param in const_dict.keys():
new_const_dict[int(param)] = const_dict[param].numpy()
return mod, new_const_dict
return _merge_constants
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the pooling operators in TIR."""
from typing import Tuple
import tvm
from .utils import get_outer_loops, get_op_attrs, get_loads, get_stores
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialKernel, SerialActivation, SerialPooling
from .producers_consumers import ProducersConsumers
def get_pooling_params(
stmt: tvm.tir.AttrStmt, producers_consumers: ProducersConsumers
) -> Tuple[SerialPooling, tvm.tir.Var, tvm.tir.Var]:
"""Get the parameters necessary to construct a call_extern for a pooling.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convolution loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialPooling
The parameters needed to construct a 2D convolution.
output_pointer : tvm.tir.Var
The output pointer of the convolution operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
rh = inner
rw = rh.body
# loads = [output, input, LUT, LUT]
loads = get_loads(rw.body)
# stores = [output]
stores = get_stores(rw.body)
input_pointer = loads[1].buffer.data
output_pointer = stores[0].buffer.data
# Get feature map info
serial_ifm, serial_padding = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get kernel info
serial_kernel = SerialKernel(
width=int(rw.extent),
height=int(rh.extent),
stride_w=int(attrs["stride_w"]),
stride_h=int(attrs["stride_h"]),
dilation_w=1,
dilation_h=1,
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
SerialPooling(
ifm=serial_ifm,
ofm=serial_ofm,
pooling_type=attrs["pooling_type"],
pool_shape=serial_kernel,
padding=serial_padding,
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
upscale=attrs["upscale"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/producers_consumers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""The ProducersConsumers class"""
from typing import Optional
from collections.abc import KeysView
import tvm
class ProducersConsumers:
"""It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values."""
def __init__(self) -> None:
self.indices: dict[tvm.tir.AttrStmt, int] = {}
self.producers: list[(tvm.tir.AttrStmt, tvm.tir.expr.Var)] = []
self.consumers: list[(tvm.tir.AttrStmt, list[tvm.tir.expr.Var])] = []
self.allocate_variables: Optional[KeysView] = None
def add_producer(self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt) -> None:
"""Add the attribute statement attr as producer of the variable var."""
self.indices[attr] = len(self.producers)
self.producers.append((attr, var))
def get_producer(
self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt
) -> Optional[tvm.tir.AttrStmt]:
"""Get the last attribute statement which produces the variable var when
the current attribute statement is attr."""
if var not in self.allocate_variables:
return None
index = self.indices[attr]
for i in list(reversed(range(index + 1))):
if self.producers[i][1] == var:
return self.producers[i][0]
return None
def get_last_producer(self, var: tvm.tir.expr.Var) -> Optional[tvm.tir.AttrStmt]:
"""Get the last attribute statement which produces the variable var."""
return self.get_producer(var, self.producers[-1][0])
def add_allocate_variables(self, allocate_variables: KeysView) -> None:
"""Add the allocated variables."""
self.allocate_variables = allocate_variables
def add_consumer(self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt) -> None:
"""Add the attribute statement attr as consumer of the variable var."""
index = self.indices[attr]
if index < len(self.consumers):
self.consumers[index][1].append(var)
else:
self.consumers.append((attr, [var]))
def get_consumer(
self, var: tvm.tir.expr.Var, attr: tvm.tir.AttrStmt
) -> Optional[tvm.tir.AttrStmt]:
"""Get the first attribute statement which consumes the variable var when
the current attribute statement is attr."""
index = self.indices[attr]
for i in range(index, len(self.consumers)):
if var in self.consumers[i][1]:
return self.consumers[i][0]
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Scheduling for Arm(R) Ethos(TM)-U NPU."""
import tvm
from tvm.contrib.ethosu.cascader import Propagator
def schedule(cached_func, const_dict, cascader=None):
"""Schedule a CachedFunc for NPU compilation.
Parameters
----------
cached_func : CachedFunc
The CachedFunc to schedule.
const_dict : dict of int to numpy.ndarray
The constant dictionary.
cascader : callable, optional
A cascading function to apply optimizing scheduling
to the graph.
Returns
-------
s : tvm.te.Schedule
The completed schedule for the graph.
"""
s = tvm.te.create_schedule([t.op for t in cached_func.outputs])
if cascader:
cascader(cached_func, const_dict, s)
inline_no_ops(cached_func, s)
copy_luts()(cached_func, const_dict, s)
inline_no_ops(cached_func, s)
schedule_pragmas(s)
schedule_cache_reads(s)
return s
def tile_nd(s, tensor, tile):
"""Scheduling utility to perform N-dimensional tiling.
Parameters
----------
s : tvm.te.Schedule
The schedule to apply the tiling to.
tensor : tvm.te.Tensor
The tensor to apply the tiling to.
tile : tuple
The N-dimensional tile size.
Returns
-------
outer_indices : list of tvm.tir.IterVar
The outer iteration variables.
inner_indices : list of tvm.tir.IterVar
The inner iteration variables.
"""
outer_indices = []
inner_indices = []
for i, size in enumerate(tile):
outer, inner = s[tensor].split(tensor.op.axis[i], size)
outer_indices.append(outer)
inner_indices.append(inner)
s[tensor].reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
def total_cascader(stripe_size):
"""A demo/test cascader which tries to cascade every op in the graph together.
The desired output stride size should be specified. Note this only works
for single output graphs.
Parameters
----------
stripe_size : tuple
The output stripe size.
Returns
-------
func : callable
The cascading function.
"""
def _cascader(cached_func, const_dict, sch):
scheduled = set()
def _visit(tensor, stage, ax):
if tensor not in scheduled and isinstance(tensor.op, tvm.te.ComputeOp):
sch[tensor].compute_at(stage, ax)
scheduled.add(tensor)
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, stage, ax)
assert len(cached_func.outputs) == 1
out = cached_func.outputs[0]
oi, _ = tile_nd(sch, out, stripe_size)
for ax in oi:
sch[out].unroll(ax)
for input_tensor in out.op.input_tensors:
_visit(input_tensor, sch[out], oi[-1])
return _cascader
def copy_constants():
"""A simple planner which copies all constant data from FLASH -> SRAM.
Returns
-------
planner : callable
The planning function.
"""
def _planner(cached_func, const_dict, sch):
planned = set() # type: ignore
def _visit(tensor, reader, lut):
if tensor not in planned:
planned.add(tensor)
if isinstance(tensor.op, tvm.te.PlaceholderOp) and tensor != lut:
# Find index of input using 'same_as' check to prevent equality
# ambiguity when encountering a scalar.
is_same = [var.same_as(tensor) for var in cached_func.inputs]
index = is_same.index(True)
if index in const_dict:
sch.cache_read(tensor, "global", [reader])
elif isinstance(tensor.op, tvm.te.ComputeOp):
if "lut" in tensor.op.attrs.keys():
lut = tensor.op.attrs["lut"]
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, tensor, lut)
for output_tensor in cached_func.outputs:
_visit(output_tensor, None, None)
return _planner
def copy_luts():
"""A scheduler that copies LUTs to SHRAM.
Returns
-------
planner : callable
The planning function.
"""
def _planner(te_graph, const_dict, sch):
planned = set() # type: ignore
def _visit(tensor, reader, lut):
if tensor not in planned:
planned.add(tensor)
if isinstance(tensor.op, tvm.te.PlaceholderOp) and tensor == lut:
index = list(te_graph.inputs).index(tensor)
if index in const_dict:
sch.cache_read(tensor, "local", [reader])
elif isinstance(tensor.op, tvm.te.ComputeOp):
if "lut" in tensor.op.attrs.keys():
lut = tensor.op.attrs["lut"]
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor, tensor, lut)
for output_tensor in te_graph.outputs:
_visit(output_tensor, None, None)
return _planner
def schedule_pragmas(sch):
"""Add pragmas to the operators that require them.
This adds the pragmas used for codegen to the NPU ops.
They are taken directly from the TE compute op's attributes.
Modifies the schedule in-place.
Parameters
----------
sch : tvm.te.Schedule
The schedule.
"""
def _add_pragmas(stage, ax):
if stage.op.name == "T_concat":
stage.pragma(ax, "op", "ethosu_concatenate")
if "op" in [attr for attr, val in stage.op.attrs.items()]:
stage.pragma(ax, "op", stage.op.attrs["op"])
for attr, val in stage.op.attrs.items():
if attr not in ("op", "lut") and not isinstance(val, Propagator):
stage.pragma(ax, str(attr), val)
if stage.op.axis[0] in stage.iter_var_attrs:
attrs = stage.iter_var_attrs[stage.op.axis[0]]
if "block_config_height" in attrs.pragma_keys:
pragmas = dict(zip([k.value for k in attrs.pragma_keys], attrs.pragma_values))
stage.pragma(ax, "block_config_height", pragmas["block_config_height"])
stage.pragma(ax, "block_config_width", pragmas["block_config_width"])
stage.pragma(ax, "block_config_depth", pragmas["block_config_depth"])
for stage in sch.stages:
if (
isinstance(stage.op, tvm.te.ComputeOp)
and len(stage.op.axis) + len(stage.op.reduce_axis) > 0
):
# The logic ensures the pragmas are assigned to the inner tiling loops
# rather than the outer ones (which end up getting unrolled).
num_inner_loops = len(stage.op.axis) + len(stage.op.reduce_axis)
ax = stage.leaf_iter_vars[-num_inner_loops]
_add_pragmas(stage, ax)
def schedule_cache_reads(sch):
"""Schedule cache reads that have been introduced.
There are two things we need to happen to cache_read stages. They should be tagged
with the 'ethosu_copy' pragma and have all their axes fused to make them 1D.
Parameters
----------
sch : tvm.te.Schedule
The schedule.
"""
def _detect_cache_read(stage):
# Try and detect cache_reads by checking if the compute op is identity
if isinstance(stage.op, tvm.te.ComputeOp):
op = stage.op
if "ethosu" in op.name:
return False
axes = op.axis
if len(op.input_tensors) == 1:
tensor = op.input_tensors[0]
try:
identity_op = tensor(*axes)
except ValueError:
return False
if tvm.tir.analysis.expr_deep_equal(identity_op, op.body[0]):
return True
return False
for stage in sch.stages:
if stage.attach_type != 2: # Not inlined
if _detect_cache_read(stage):
fax = stage.fuse(*stage.op.axis)
# propagate pragmas placed on the outer loop
if len(stage.op.axis) > 0 and stage.op.axis[0] in stage.iter_var_attrs:
attrs = stage.iter_var_attrs[stage.op.axis[0]]
for k, v in zip(attrs.pragma_keys, attrs.pragma_values):
stage.pragma(fax, k.value, v)
stage.pragma(fax, "op", "ethosu_copy")
def inline_no_ops(cached_func, sch):
"""Inline 'no-ops' - operations that in principle do nothing.
Modifies the schedule in-place. For now we inline reshape and
strided slice - more could be added.
Parameters
----------
cached_func : CachedFunc
The cached func.
sch : tvm.te.Schedule
The schedule.
"""
no_ops = {"T_reshape", "T_strided_slice"}
scheduled = set()
def _visit(tensor):
if tensor not in scheduled and isinstance(tensor.op, tvm.te.ComputeOp):
if tensor.op.name in no_ops:
sch[tensor].compute_inline()
scheduled.add(tensor)
for input_tensor in tensor.op.input_tensors:
_visit(input_tensor)
for out in cached_func.outputs:
_visit(out)
class OperatorCompute:
"""A helper class to manipulate the series of compute ops that make up an operator."""
def __init__(self, read, convert_to_nhwc, pad, upscale, op, convert_to_nhcwb16, write):
self.read = read
self.convert_to_nhwc = convert_to_nhwc
self.pad = pad
self.upscale = upscale
self.op = op
self.convert_to_nhcwb16 = convert_to_nhcwb16
self.write = write
@classmethod
def from_output(cls, out):
write = out
convert_to_nhcwb16 = write.op.input_tensors[0]
op = convert_to_nhcwb16.op.input_tensors[0]
pad = op.op.input_tensors[0]
upscale = pad.op.input_tensors[0]
convert_to_nhwc = upscale.op.input_tensors[0]
read = convert_to_nhwc.op.input_tensors[0]
return cls(read, convert_to_nhwc, pad, upscale, op, convert_to_nhcwb16, write)
def split(self, sch, axis, val):
outer, inner = sch[self.write].split(self.write.op.axis[axis], val)
iter_vars = [ax for ax in self.write.op.axis if ax != self.write.op.axis[axis]]
iter_vars.insert(axis, inner)
sch[self.write].reorder(outer, *iter_vars)
sch[self.write].unroll(outer)
g = sch.create_group(outputs=self.convert_to_nhcwb16, inputs=self.read, include_inputs=True)
g.compute_at(sch[self.write], outer)
return outer
def rolling_buffer(self, sch):
sch[self.read].rolling_buffer()
sch[self.convert_to_nhwc].rolling_buffer()
sch[self.pad].rolling_buffer()
sch[self.upscale].rolling_buffer()
sch[self.op].rolling_buffer()
sch[self.convert_to_nhcwb16].rolling_buffer()
sch[self.write].rolling_buffer()
def compute_at(self, sch, stage, axis):
sch[self.read].compute_at(stage, axis)
sch[self.convert_to_nhwc].compute_at(stage, axis)
sch[self.pad].compute_at(stage, axis)
sch[self.upscale].compute_at(stage, axis)
sch[self.op].compute_at(stage, axis)
sch[self.convert_to_nhcwb16].compute_at(stage, axis)
sch[self.write].compute_at(stage, axis)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/spec.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The TIR serialization specification for Arm(R) Ethos(TM)-U NPU."""
from typing import Union
from typing import get_type_hints
from inspect import isclass
import tvm
from tvm.relay.backend.contrib.ethosu import util
def create_serial_object(serialized_type, deserialized_elements):
"""
This function will create serialized type that is one of the subclasses
of tvm.relay.backend.contrib.ethosu.tir.spec.SerializableFormat
Parameters
----------
serialized_type : a subclass type of SerializableFormat
deserialized_elements : list
The list of arguments that needs to packed to create SerializableFormat objects
Returns
-------
The constructed object of type serialized_type
"""
def _create_serial_object(internal_serialized_type, read_element_idx=0):
"""The internal function that increments the read_element_idx
when creating nested serial objects"""
arg_len = util.get_arg_count(internal_serialized_type.__init__) - 1
serial_init_types = get_type_hints(internal_serialized_type.__init__)
serial_init_arg_names = list(serial_init_types.keys())
serial_init_args = []
assert arg_len == len(serial_init_arg_names)
for si_arg_name in serial_init_arg_names:
si_arg_type = serial_init_types[si_arg_name]
if isclass(si_arg_type) and issubclass(si_arg_type, SerializableFormat):
sia, read_element_idx = _create_serial_object(si_arg_type, read_element_idx)
serial_init_args.append(sia)
else:
serial_init_args.append(deserialized_elements[read_element_idx])
read_element_idx += 1
return internal_serialized_type(*serial_init_args), read_element_idx
# Just return the primary serial object
return _create_serial_object(serialized_type)[0]
class SerializableFormat:
"""Base class to retrieve arguments on a predefined ordering"""
def __iter__(self):
# Note class attribute definition order is preserved - see PEP 520
for name in self.__dict__:
value = self.__getattribute__(name)
if isinstance(value, SerializableFormat):
yield from list(value)
else:
yield value
def __getitem__(self, index):
# Note class attribute definition order is preserved - see PEP 520
name = list(self.__dict__.keys())[index]
return self.__getattribute__(name)
class SerialFeatureMap(SerializableFormat):
"""Specialization class to retrieve arguments of a Feature Map
(similiar to NpuFeatureMap of Vela) on a predefined ordering"""
def __init__(
self,
data_type: str,
height: int,
width: int,
channels: int,
tile_height_0: int,
tile_height_1: int,
tile_width_0: int,
tile_address_0: tvm.tir.expr.BufferLoad,
tile_address_1: Union[tvm.tir.expr.BufferLoad, int],
tile_address_2: Union[tvm.tir.expr.BufferLoad, int],
tile_address_3: Union[tvm.tir.expr.BufferLoad, int],
scale: float,
zero_point: int,
layout: str,
stride_h: int,
stride_w: int,
stride_c: int,
):
self.data_type = data_type
self.height = height
self.width = width
self.channels = channels
self.tile_height_0 = tile_height_0
self.tile_height_1 = tile_height_1
self.tile_width_0 = tile_width_0
self.tile_address_0 = tile_address_0
self.tile_address_1 = tile_address_1
self.tile_address_2 = tile_address_2
self.tile_address_3 = tile_address_3
self.scale = scale
self.zero_point = zero_point
self.layout = layout
self.stride_h = stride_h
self.stride_w = stride_w
self.stride_c = stride_c
class SerialKernel(SerializableFormat):
"""Specialization class to retrieve arguments of a Kernel
(similiar to NpuKernel of Vela) on a predefined ordering"""
def __init__(
self,
width: int,
height: int,
stride_w: int,
stride_h: int,
dilation_w: int,
dilation_h: int,
):
self.width = width
self.height = height
self.stride_w = stride_w
self.stride_h = stride_h
self.dilation_w = dilation_w
self.dilation_h = dilation_h
class SerialAddressRange(SerializableFormat):
"""Specialization class to retrieve arguments of a AddressRange
(similiar to NpuAddressRange of Vela) on a predefined ordering"""
def __init__(self, address: tvm.tir.expr.BufferLoad, length: int):
self.address = address
self.length = length
class SerialPadding(SerializableFormat):
"""Specialization class to retrieve arguments of a Padding
(similiar to NpuPadding of Vela) on a predefined ordering"""
def __init__(self, top: int, left: int, bottom: int, right: int):
self.top = top
self.left = left
self.bottom = bottom
self.right = right
class SerialActivation(SerializableFormat):
"""Specialization class to retrieve arguments of a Activation
(similiar to NpuActivation of Vela) on a predefined ordering"""
def __init__(self, op: str, clip_min: int, clip_max: int):
self.op = op
self.clip_min = clip_min
self.clip_max = clip_max
class SerialBlockConfig(SerializableFormat):
"""Specialization class to retrieve arguments of a BlockConfig
(similar to NpuBlockConfig of Vela) on a predefined ordering"""
def __init__(self, height: int, width: int, depth: int):
self.height = height
self.width = width
self.depth = depth
class Serial2DConvolution(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.conv2d tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight2: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
scale_bias2: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight2 = weight2
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.scale_bias2 = scale_bias2
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class Serial2DDepthwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.depthwise_conv2d TIR extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
kernel: SerialKernel,
weight: SerialAddressRange,
weight_zero_point: int,
scale_bias: SerialAddressRange,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.kernel = kernel
self.weight = weight
self.weight_zero_point = weight_zero_point
self.scale_bias = scale_bias
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class SerialCopy(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.copy tir extern call on a predefined ordering"""
def __init__(
self,
read_address: tvm.tir.expr.BufferLoad,
length: int,
write_address: tvm.tir.expr.BufferLoad,
):
self.read_address = read_address
self.length = length
self.write_address = write_address
class SerialPooling(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.pooling tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
pooling_type: str,
pool_shape: SerialKernel,
padding: SerialPadding,
activation: SerialActivation,
rounding_mode: str,
upscale: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.pooling_type = pooling_type
self.pool_shape = pool_shape
self.padding = padding
self.activation = activation
self.rounding_mode = rounding_mode
self.upscale = upscale
self.block_config = block_config
class SerialBinaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.binary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ifm2: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
reversed_operands: bool,
activation: SerialActivation,
rounding_mode: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ifm2 = ifm2
self.ofm = ofm
self.operator_type = operator_type
self.reversed_operands = reversed_operands
self.activation = activation
self.rounding_mode = rounding_mode
self.block_config = block_config
class SerialUnaryElementwise(SerializableFormat):
"""Specialization class to retrieve arguments of
a ethosu.unary_elementwise tir extern call on a predefined ordering"""
def __init__(
self,
ifm: SerialFeatureMap,
ofm: SerialFeatureMap,
operator_type: str,
activation: SerialActivation,
rounding_mode: str,
block_config: SerialBlockConfig,
):
self.ifm = ifm
self.ofm = ofm
self.operator_type = operator_type
self.activation = activation
self.rounding_mode = rounding_mode
self.block_config = block_config
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the transform operators in TIR."""
import tvm
from .spec import SerialCopy
from .utils import get_base_address, get_op_attrs
def get_copy_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a copy.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a copy loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialCopy
The parameters needed to construct a copy.
tvm.tir.Var
The output pointer of the copy operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the convolution output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
_, body = get_op_attrs(stmt)
length = body.extent
write_store = body.body
write_base = [get_base_address(index) for index in write_store.indices]
read_load = body.body.value
read_base = [get_base_address(index) for index in read_load.indices]
return (
SerialCopy(
read_address=tvm.tir.expr.BufferLoad(read_load.buffer, read_base),
length=length,
write_address=tvm.tir.expr.BufferLoad(write_store.buffer, write_base),
),
write_store.buffer.data,
None,
True,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract information from the unary_elementwise operators in TIR."""
from tvm import tir
from .utils import get_outer_loops, get_op_attrs
from .dma import get_ifm_params, get_ofm_params
from .spec import SerialActivation, SerialUnaryElementwise
def get_unary_elementwise_params(stmt, producers_consumers):
"""Get the parameters necessary to construct a call_extern for a unary_elementwise.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a unary elementwise loop nest.
producers_consumers: ProducersConsumers
It associates pointers with the loop nest that produces
their values and with the loop nest that consumes their values.
Returns
-------
SerialUnaryElementwise
The parameters needed to construct a unary elementwise operator.
output_pointer : tvm.tir.Var
The output pointer of the unary elementwise operation.
replace_pointer : tvm.tir.Var
The output pointer of the DMA write operation, which is to replace
the unary elementwise output pointer.
is_allocator : bool
Whether this operator allocates its output.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, _, _, inner = get_outer_loops(body, "NHWC")
input_pointer = None
if isinstance(inner.value, tir.expr.Select):
# ABS
input_pointer = inner.value.condition.b.buffer.data
if isinstance(inner.value, tir.expr.Sub):
# CLZ
input_pointer = inner.value.b.args[0].buffer.data
output_pointer = inner.buffer.data
# Get feature map info
serial_ifm, _ = get_ifm_params(input_pointer, producers_consumers, stmt)
serial_ofm, serial_block_config, replace_pointer, is_allocator = get_ofm_params(
output_pointer, producers_consumers, stmt
)
# Get activation info
serial_activation = SerialActivation(
op=attrs["activation"], clip_min=attrs["clip_min"], clip_max=attrs["clip_max"]
)
return (
SerialUnaryElementwise(
ifm=serial_ifm,
ofm=serial_ofm,
operator_type=attrs["operator_type"],
activation=serial_activation,
rounding_mode=attrs["rounding_mode"],
block_config=serial_block_config,
),
output_pointer,
replace_pointer,
is_allocator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper utility functions used by the NPU TIR compiler"""
import tvm
from tvm import arith
def get_op_attrs(stmt):
"""Iterate through nested attribute statements accumulating their values
in an attribute dictionary.
The "pragma_" prefix is removed as a convenience.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement to begin from.
Returns
-------
attrs : dict of str to object
The attribute dictionary.
stmt : tvm.tir.Stmt
The body after having collected the final attribute statement.
"""
attrs = {}
while isinstance(stmt, tvm.tir.AttrStmt):
# The pragma scheduler inserts "pragma_" before all the
# attr names, this is annoying so we get rid of it
attr = stmt.attr_key.replace("pragma_", "")
attrs[attr] = stmt.value
stmt = stmt.body
return attrs, stmt
def get_strides(index, stride_vars):
"""Get the striding of given vars in an indexing expression.
Parameters
----------
index : tvm.tir.PrimExpr
The index expression where the stride vars are present.
stride_vars : list of tvm.tir.Var
The vars to determine the striding of.
Returns
-------
strides : list of int
The striding of each stride var in the index expression
in the same order as the stride vars were given.
"""
strides = [1] * len(stride_vars)
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
min_value = int(arith.Analyzer().int_set(index, dmap).min_value)
for var in dmap:
if var in stride_vars:
# NOTE: Doing this using a [0, 1] interval doesn't work reliably
# Seems to be a bug
dmap[var] = arith.IntervalSet(1, 1)
max_value = int(arith.Analyzer().int_set(index, dmap).max_value)
stride = int(max_value - min_value)
i = stride_vars.index(var)
strides[i] = stride
dmap[var] = arith.IntervalSet(0, 0)
return strides
def get_base_address(index):
"""Determine the first (base) address accessed by an index expression.
Parameters
----------
index : tvm.tir.PrimExpr
The index expression to determine the base address of.
Returns
-------
base_address:
The first address accessed by the index expression.
"""
dmap = {}
def _visit(stmt):
if isinstance(stmt, tvm.tir.Var):
dmap[stmt] = arith.IntervalSet(0, 0)
tvm.tir.stmt_functor.post_order_visit(index, _visit)
base_address = int(arith.Analyzer().int_set(index, dmap).min_value)
return base_address
def get_outer_loops(stmt, layout):
"""Get the outer loops of an operator.
Parameters
----------
stmt : tvm.tir.For
The outermost loop.
layout : str
The output tensor layout (NHWC or NHCWB16).
Returns
-------
n : tvm.tir.For
The batch loop.
h : tvm.tir.For
The height loop.
w : tvm.tir.For
The width loop.
c : tvm.tir.For
The channels loop.
b : tvm.tir.For
The brick loop. None for NHWC
body : tvm.tir.Stmt
The inner body of the loops.
"""
if layout == "NHWC":
n = stmt
h = n.body
w = h.body
c = w.body
b = tvm.tir.For(tvm.tir.Var("b", "int32"), 0, 0, 0, tvm.tir.Evaluate(0))
return n, h, w, c, b, c.body
if layout == "NHCWB16":
n = stmt
h = n.body
cb = h.body
w = cb.body
b = w.body
return n, h, w, cb, b, b.body
return None
def collect_buffer_map(stmt):
"""Collect a map of Var -> Buffer
Generate a map from a buffer's backing `tir.Var` to the
`tir.Buffer` object that uses it. If multiple such buffers exist,
return the first occurrence.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferLoads from.
Returns
-------
buffer_map : Dict[Var, Buffer]
The map from buffer var to the buffers that use it.
"""
buffer_map = {}
def _visit(node):
if isinstance(node, (tvm.tir.BufferLoad, tvm.tir.BufferStore)):
buf = node.buffer
if buf.data not in buffer_map:
buffer_map[buf.data] = buf
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return buffer_map
def get_loads(stmt):
"""Get the BufferLoad statements.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferLoads from.
Returns
-------
loads : list of tvm.tir.BufferLoad
The BufferLoads found.
"""
loads = []
def _visit(s):
if isinstance(s, tvm.tir.BufferLoad):
loads.append(s)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return loads
def get_stores(stmt):
"""Get the BufferStore statements.
Parameters
----------
stmt : tvm.tir.Stmt
The statement to get the BufferStores from.
Returns
-------
stores : list of tvm.tir.BufferStore
The BufferStores found.
"""
stores = []
def _visit(s):
if isinstance(s, tvm.tir.BufferStore):
stores.append(s)
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
return stores
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/tir_to_cs_translator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This source will contain code to convert TIR, as produced by
the Relay to TIR compilation process, to Vela API calls to
generate command stream.
"""
from typing import Dict, NamedTuple, Tuple, Union, List
from enum import auto
from enum import Enum
import numpy as np # type: ignore
import ethosu.vela.api as vapi # type: ignore
import tvm
from tvm.tir import stmt_functor
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir import utils as tir_utils
class BufferType(Enum):
"""The type of information that a buffer contains."""
constant = auto()
input_or_output = auto()
scratch = auto()
input = auto()
output = auto()
shram = auto()
class BufferInfo(NamedTuple):
"""A data structure to hold metadata of the buffer."""
# If the buffer holds constants, the values will contain that otherwise None
values: np.ndarray
shape: tvm.ir.container.Array
dtype: np.dtype
btype: BufferType
class AcceleratorArchConfig:
def __init__(self, total_shram_banks):
self.shram_bank_size = 1024
self.total_shram_banks = total_shram_banks
self.shram_size_bytes = self.shram_bank_size * self.total_shram_banks
self.lut_size_bytes = 2048
self.lut_start_address = self.shram_size_bytes - self.lut_size_bytes
def get_accelerator_arch_config(accel_type):
accel_config_str_map = {
"ethos-u55-32": AcceleratorArchConfig(16),
"ethos-u55-64": AcceleratorArchConfig(16),
"ethos-u55-128": AcceleratorArchConfig(24),
"ethos-u55-256": AcceleratorArchConfig(48),
"ethos-u65-256": AcceleratorArchConfig(48),
}
return accel_config_str_map[accel_type]
class RegionOffset(NamedTuple):
"""A data structure to hold region and address offset corresponding to a tensor"""
region: int
offset: int
def analyze_scratch_memory_acesses(mod: tvm.IRModule, candidate_regions_for_scratch: List[int]):
"""
This function analyzes the IRModule for intermediary tensors that can be resulting
from a offset of pool variables (via Let nodes) and/or allocate nodes. The allocate
nodes will be folded into a single TVMBackendallocWorkspace call with offsets. Ultimately
this will produce a mapping from each such node to a RegionOffset named tuple that
has the region and the obtained offset, as mentioned above.
Parameters
----------
mod: tvm.IRModule
The TIR module containing ethosu extern calls
candidate_regions_for_scratch: List[int]
A list of region integers that could be used for scratch regions
Returns
-------
scratch_region_map : Dict[tvm.tir.Var, RegionOffset]
A map between buffer vars to scratch regions they are assigned
tvm_backend_alloc_workspace_size : int
The size of tvm_backend_alloc_workspace call required to service
remaining allocate nodes if any
tvm_backend_alloc_workspace_region : int
The region associated with the tvm_backend_alloc_workspace
"""
scratch_region_map = dict()
pool_var_region_map = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
if "pool_args" in primfunc.attrs.keys():
pool_args = primfunc.attrs["pool_args"]
for pool_arg in pool_args:
pool_param = primfunc.params[int(pool_arg.pool_var_idx)]
pool_var_region_map[pool_param] = candidate_regions_for_scratch.pop()
scratch_region_map[pool_param] = RegionOffset(
region=pool_var_region_map[pool_param], offset=None
)
def analyze_pool_access(stmt):
if isinstance(stmt, tvm.tir.stmt.LetStmt):
call_address_of = stmt.value
load = call_address_of.args[0]
pool_var = load.buffer.data
scratch_region_map[stmt.var] = RegionOffset(
region=pool_var_region_map[pool_var], offset=int(load.indices[0])
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, analyze_pool_access)
dynamic_allocation_region = None
if len(candidate_regions_for_scratch) > 0:
dynamic_allocation_region = candidate_regions_for_scratch.pop()
dynamic_allocation_size = 0
# If there are tir.Allocate remaining by now, they need to be serviced via
# dynamic_allocation calls.
def analyze_remaining_allocates(stmt):
nonlocal dynamic_allocation_size
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
pointer_type = allocate.buffer_var.type_annotation
storage_scope = pointer_type.storage_scope
if storage_scope == "global":
dtype_bytes = np.iinfo(np.dtype(allocate.dtype)).bits // 8
size_in_bytes = int(dtype_bytes * np.prod(list(allocate.extents)))
# Every memory address the NPU access have to be 16 byte aligned
size_in_bytes = util.round_up(size_in_bytes, 16)
address = dynamic_allocation_size
dynamic_allocation_size += size_in_bytes
scratch_region_map[allocate.buffer_var] = RegionOffset(
region=dynamic_allocation_region, offset=address
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, analyze_remaining_allocates)
return (
scratch_region_map,
dynamic_allocation_size,
dynamic_allocation_region,
)
def _get_region(buffer_type, var=None, scratch_region_map=None):
"""A helper to obtain regions for buffer_types and buffer vars"""
static_regions = {
BufferType.constant: 0,
BufferType.input: 3,
BufferType.output: 4,
BufferType.shram: int((1 << 8) | (3 << 0)),
}
if buffer_type in static_regions.keys():
return static_regions[buffer_type]
assert buffer_type == BufferType.scratch
assert var in scratch_region_map.keys(), f"{var} is not analyzed for scratch regions"
return scratch_region_map[var].region
def translate(tir_module, params):
"""This will take an tir module for the NPU
and compile to command stream
Parameters
----------
tir_module : tvm.IRModule
The TIR module containing ethosu extern calls
params : dict
A dictionary containing TIR primfunc argument ordering
idx to constant NDArray map
accel_type : ethosu.vela.api.NpuAccelerator
the accelerator variant the tir module needs to compiled to
Returns
-------
cs : str
An hex string of the bytes of command stream
encoded_constants : str
An hex string of the bytes that includes concat'd
encoded weights, encoded biases and scales.
base_addresses : List[util.BaseAddress]
base addresses to be used by the driver
"""
# The NPU has 6 usable regions ranging from 0-6
# The regions 0, 3, and 4 is already used for input,
# output and constant, respectively (See _get_regions()).
# Thus, for scratch we are left with 5, 2 and 1.
candidate_regions_for_scratch = [5, 2, 1]
(
scratch_region_map,
dynamic_allocation_size,
dynamic_allocation_region,
) = analyze_scratch_memory_acesses(tir_module, candidate_regions_for_scratch)
buffer_info = extract_buffer_info(tir_module, params)
call_extern_list = extract_call_extern_list(tir_module)
_npu_ops = list()
for call_extern in call_extern_list:
_npu_ops.append(translate_ethosu_tir_call_extern(call_extern))
_npu_ops, constant_data = assign_addresses(buffer_info, _npu_ops, scratch_region_map)
base_addresses = extract_param_base_addresses(tir_module, buffer_info, scratch_region_map)
if dynamic_allocation_size:
base_addresses.append(
util.BaseAddress(
name="dynamic_allocation",
primfunc_param_idx=None,
region=dynamic_allocation_region,
size=dynamic_allocation_size,
is_runtime_allocation=True,
)
)
target_accel_config = vela_api.get_accelerator_config()
cmds = vapi.npu_generate_register_command_stream(_npu_ops, target_accel_config)
payload = vapi.npu_create_driver_payload(cmds, target_accel_config)
return payload.hex(), constant_data, base_addresses
def extract_param_base_addresses(mod, buffer_info, scratch_region_map) -> List[util.BaseAddress]:
"""This function extracts base addresses to be used by the driver
Parameters
----------
mod : tvm.IRModule
The TIR Module for NPU
buffer_info : Dict[tvm.tir.Var, BufferInfo]
Information regarding buffer vars used in the PrimFunc
Returns
-------
List[util.BaseAddress]
base addresses to be used by the driver
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
buffer_map = tir_utils.collect_buffer_map(primfunc.body)
base_addresses = list()
idx = 0
for param in primfunc.params:
# constants are pooled together and handled specially
# this will change after tir.allocate_const.
# For now, we are skipping generating buffer addresses here
if buffer_info[param].btype == BufferType.constant:
continue
if param in buffer_map:
buffer = buffer_map[param]
dtype = buffer.dtype
element_size_bytes = np.iinfo(dtype).bits // 8
size_bytes = element_size_bytes * np.prod(list(buffer.shape))
base_addresses.append(
util.BaseAddress(
param.name.replace("-", "_"),
idx,
_get_region(buffer_info[param].btype, param, scratch_region_map),
size_bytes,
)
)
else:
base_addresses.append(
util.BaseAddress(
param.name.replace("-", "_"),
idx,
_get_region(buffer_info[param].btype, param, scratch_region_map),
0,
)
)
idx += 1
return base_addresses
def extract_call_extern_list(mod):
"""This function will obtain all extern
calls from a TIR module
Parameters
----------
mod : tvm.IRModule
The TIR Module for NPU
Returns
-------
list
of tvm.tir.Call objects
that are tir extern calls
"""
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
call_extern_list = list()
def populate_call_extern_list(stmt):
if isinstance(stmt, tvm.tir.Call) and stmt.op.name == "tir.call_extern":
call_extern_list.append(stmt)
stmt_functor.post_order_visit(primfunc.body, populate_call_extern_list)
return call_extern_list
def extract_buffer_info(
mod: tvm.IRModule, param_dict: Dict[int, np.ndarray]
) -> Dict[str, BufferInfo]:
"""This function is to read the tvm.IRModule that
contains Relay to TIR compiled IRModule. Thereafter,
this will extract the buffer information as the shape
and constant data (if any).
Parameters
----------
mod : tvm.IRModule
The NPU TIR IRModule.
param_dict : Dict[tvm.tir.Var, np.ndarray]
A dictionary containing param idx --> const numpy.NDArray
Returns
-------
dict : Dict[str, BufferInfo]
A dictionary of buffer names --> BufferInfo
"""
buffer_info = dict()
# There should only be a single function
assert len(mod.functions.items()) == 1
primfunc = mod.functions.items()[0][1]
for param, const_data in param_dict.items():
if isinstance(param, tvm.tir.Buffer):
param = param.data
buffer_info[param] = BufferInfo(
const_data, const_data.shape, const_data.dtype, BufferType.constant
)
pool_param_indices = list()
if "pool_args" in primfunc.attrs.keys():
pool_args = primfunc.attrs["pool_args"]
pool_param_indices = [allocated_pool_info.pool_var_idx for allocated_pool_info in pool_args]
for idx, param in enumerate(primfunc.params):
if param not in buffer_info.keys():
if idx in pool_param_indices:
btype = BufferType.scratch
else:
btype = BufferType.input_or_output
buffer_info[param] = BufferInfo(
None,
None,
None,
btype,
)
def populate_allocate_buffer_info(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
allocate = stmt
pointer_type = allocate.buffer_var.type_annotation
storage_scope = pointer_type.storage_scope
if storage_scope == "local":
buffer_info[allocate.buffer_var] = BufferInfo(
None,
allocate.extents,
allocate.dtype,
BufferType.shram,
)
tvm.tir.stmt_functor.post_order_visit(primfunc.body, populate_allocate_buffer_info)
return buffer_info
def assign_addresses(buffer_info, npu_ops, scratch_region_map):
"""This function will assign addresses to tensors
within two buffers : scratch and constants.
The scratch is the buffer created to hold all intermediary data
The constants is the buffer created via unifying all the constant data
(post-encoding).
Parameters
----------
buffer_info : dict
This is the dictionary obtained via calling extract_buffer_info.
The key is the buffer name to BufferInfo
npu_ops : list
A list of Vela NpuOps with tir.BufferLoads for addresses
A list of Vela NpuOps with tir.Loads for addresses
scratch_region_map : Dict[tvm.tir.Var, RegionOffset]
A buffer_var to region and offset map.
Returns
-------
npu_ops : list
A list of Vela NpuOps with addesses within scratch and constant buffers
constant_tensor : NDArray
A unified constant data array of uint8 as the constant buffer
"""
def replace_npu_fm_with_address(npu_fm):
assert isinstance(npu_fm.tiles.addresses[0], tvm.tir.BufferLoad)
buffer = npu_fm.tiles.addresses[0].buffer.data
if buffer in scratch_region_map.keys():
address = scratch_region_map[buffer].offset
region = scratch_region_map[buffer].region
else:
assert buffer in buffer_addresses.keys()
address, buffer_type = buffer_addresses[buffer]
region = _get_region(buffer_type)
assert (
len(npu_fm.tiles.addresses[0].indices) == 1
), "Ethos-U translation expects flattened buffers"
index = npu_fm.tiles.addresses[0].indices[0] * (
np.iinfo(np.dtype(npu_fm.tiles.addresses[0])).bits // 8
)
npu_fm.tiles.addresses[0] = address + int(index)
npu_fm.tiles.addresses[1] = (
address if isinstance(npu_fm.tiles.addresses[1], tvm.tir.BufferLoad) else 0
)
npu_fm.tiles.addresses[2] = (
address if isinstance(npu_fm.tiles.addresses[2], tvm.tir.BufferLoad) else 0
)
npu_fm.tiles.addresses[3] = 0
npu_fm.region = region
return npu_fm
def replace_npu_address_range_with_address(npu_addr_range):
assert isinstance(npu_addr_range.address, tvm.tir.BufferLoad)
buffer = npu_addr_range.address.buffer.data
index = int(
npu_addr_range.address.indices[0]
* (np.iinfo(np.dtype(npu_addr_range.address)).bits // 8)
)
if buffer in scratch_region_map.keys():
return vapi.NpuAddressRange(
scratch_region_map[buffer].region,
scratch_region_map[buffer].offset + index,
npu_addr_range.length,
)
assert buffer in buffer_addresses.keys(), f"searching for buffer : {buffer}, but not found"
address, buffer_type = buffer_addresses[buffer]
address = address + int(npu_addr_range.address.indices[0].value)
return vapi.NpuAddressRange(_get_region(buffer_type), address, npu_addr_range.length)
def replace_tir_loads(npu_object):
if isinstance(npu_object, vapi.NpuFeatureMap):
return replace_npu_fm_with_address(npu_object)
if isinstance(npu_object, vapi.NpuAddressRange):
return replace_npu_address_range_with_address(npu_object)
return npu_object
def classify_io(buffer):
for _npu_op in npu_ops:
if issubclass(type(_npu_op), vapi.NpuBlockOperation):
if _npu_op.ifm and _npu_op.ifm.tiles.addresses[0].buffer.data == buffer:
return BufferType.input
if _npu_op.ifm2 and _npu_op.ifm2.tiles.addresses[0].buffer.data == buffer:
return BufferType.input
if _npu_op.ofm and _npu_op.ofm.tiles.addresses[0].buffer.data == buffer:
return BufferType.output
raise ValueError(f"Unused IO : {buffer} in tir module.")
constant_hex_data = []
total_constant_len = 0
buffer_addresses = dict()
for _buffer, info in buffer_info.items():
if info.values is not None:
assert info.btype == BufferType.constant
assert len(info.shape) == 1
buffer_addresses[_buffer] = (
(total_constant_len, info.btype) if constant_hex_data else (0, info.btype)
)
dtype_bytes = np.iinfo(np.dtype(info.dtype)).bits // 8
size_in_bytes = dtype_bytes * np.prod(list(info.shape))
# Every memory address the NPU access have to be 16 byte aligned
size_in_bytes = util.round_up(size_in_bytes, 16)
constant_tensor = np.resize(info.values, size_in_bytes // dtype_bytes)
constant_tensor = constant_tensor.tobytes().hex()
constant_hex_data.append(constant_tensor)
total_constant_len += len(constant_tensor) // 2
else:
if info.btype == BufferType.input_or_output or info.btype == BufferType.input:
buffer_type = info.btype
if info.btype == BufferType.input_or_output:
buffer_type = classify_io(_buffer)
assert buffer_type in (BufferType.input, BufferType.output)
address = 0
buffer_addresses[_buffer] = (address, buffer_type)
buffer_info[_buffer] = BufferInfo(
values=None, shape=info.dtype, dtype=info.dtype, btype=buffer_type
)
elif info.btype == BufferType.shram:
accl_config = util.get_accelerator_config()
arch_config = get_accelerator_arch_config(accl_config)
address = arch_config.lut_start_address
buffer_addresses[_buffer] = (address, info.btype)
else:
# These buffer_vars are already updated in scratch_region_map
assert info.btype == BufferType.scratch
for npu_op in npu_ops:
for attr_name, attr in npu_op.__dict__.items():
if isinstance(attr, list):
new_attr = list()
for attr_ in attr:
new_attr.append(replace_tir_loads(attr_))
setattr(npu_op, attr_name, new_attr)
else:
setattr(npu_op, attr_name, replace_tir_loads(attr))
constant_data = "".join(constant_hex_data)
return (npu_ops, constant_data)
def translate_ethosu_tir_call_extern(tir_call_extern):
"""This is a dispatcher function to dispatch
correct translation call depending on the extern call's
first argument"""
supported_call_extern = {
"ethosu_conv2d": translate_ethosu_conv2d,
"ethosu_copy": translate_ethosu_copy,
"ethosu_depthwise_conv2d": translate_ethosu_depthwise_conv2d,
"ethosu_pooling": translate_ethosu_pooling,
"ethosu_binary_elementwise": translate_ethosu_binary_elementwise,
"ethosu_identity": translate_ethosu_pooling,
"ethosu_unary_elementwise": translate_ethosu_unary_elementwise,
}
ext_call_type = tir_call_extern.args[0].value
assert ext_call_type in supported_call_extern.keys(), f"{ext_call_type} is not yet supported"
npu_op = supported_call_extern[ext_call_type](tir_call_extern)
# Some conversions return additional outputs
# if they are needed, the caller should use the function directly
if isinstance(npu_op, tuple):
return npu_op[0]
return npu_op
def translate_ethosu_copy(tir_call_extern: tvm.tir.Call) -> vapi.NpuDmaOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
Returns
-------
ethosu.vela.api.NpuDmaOperation
The vela object containing the params of ethosu_copy
"""
# We skip the first element as it is the call_extern function name
serial_object = spec.create_serial_object(spec.SerialCopy, tir_call_extern.args[1:])
return _create_npu_dma_op(serial_object)
def _convert_clip_bounds(npu_op: vapi.NpuBlockOperation):
"""This function will convert the min and max value
of clip activations to non quantized floats as
expected by the API.
Parameters
----------
npu_op : vapi.NpuBlockOperation
"""
clip_min_quant = npu_op.activation.min
clip_max_quant = npu_op.activation.max
clip_min_actual = (
clip_min_quant - npu_op.ofm.quantization.zero_point
) * npu_op.ofm.quantization.scale_f32
clip_max_actual = (
clip_max_quant - npu_op.ofm.quantization.zero_point
) * npu_op.ofm.quantization.scale_f32
npu_op.activation.min = clip_min_actual
npu_op.activation.max = clip_max_actual
def translate_ethosu_conv2d(tir_call_extern: tvm.tir.Call) -> Tuple[vapi.NpuConv2DOperation, int]:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See Serial2DConvolution in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuConv2DOperation
The vela object containing the params of ethosu_conv2d
weights_zero_point : int
The zero point of the weights
"""
# We skip the first element as it is the call_extern function name
serial_object = spec.create_serial_object(spec.Serial2DConvolution, tir_call_extern.args[1:])
return _create_npu_op_conv2d(serial_object)
def _create_npu_op_conv2d(
serial_2d_convolution: spec.Serial2DConvolution,
) -> Tuple[vapi.NpuConv2DOperation, int]:
"""This is a helper function to capture a list
of arguments to create Vela NpuConv2DOperation object.
"""
has_two_weights = serial_2d_convolution.weight2.address != -1
has_two_biases = serial_2d_convolution.scale_bias2.address != -1
npu_conv2d_op = vapi.NpuConv2DOperation()
npu_conv2d_op.ifm = _create_npu_feature_map(serial_2d_convolution.ifm)
npu_conv2d_op.ofm = _create_npu_feature_map(serial_2d_convolution.ofm)
npu_conv2d_op.kernel = _create_npu_kernel(serial_2d_convolution.kernel)
npu_conv2d_op.weights = (
[
_create_npu_address_range(serial_2d_convolution.weight),
_create_npu_address_range(serial_2d_convolution.weight2),
]
if has_two_weights
else [_create_npu_address_range(serial_2d_convolution.weight)]
)
weights_zero_point = np.int64(serial_2d_convolution.weight_zero_point.value)
npu_conv2d_op.biases = (
[
_create_npu_address_range(serial_2d_convolution.scale_bias),
_create_npu_address_range(serial_2d_convolution.scale_bias2),
]
if has_two_biases
else [_create_npu_address_range(serial_2d_convolution.scale_bias)]
)
npu_conv2d_op.padding = _create_npu_padding(serial_2d_convolution.padding)
npu_conv2d_op.activation = _create_npu_activation(serial_2d_convolution.activation)
if (
npu_conv2d_op.activation
and npu_conv2d_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_conv2d_op)
npu_conv2d_op.rounding_mode = _create_npu_rounding_mode(serial_2d_convolution.rounding_mode)
npu_conv2d_op.ifm_upscale = _create_npu_resampling_mode(serial_2d_convolution.upscale)
weights_shape_ohwi = [
npu_conv2d_op.ofm.shape.depth,
npu_conv2d_op.kernel.height,
npu_conv2d_op.kernel.width,
npu_conv2d_op.ifm.shape.depth,
]
npu_conv2d_op.block_traversal = vela_api.calculate_block_traversal_mode(
is_depthwise=False,
weights_shape_ohwi=weights_shape_ohwi,
ifm_bitdepth=npu_conv2d_op.ifm.data_type.size_in_bits(),
)
npu_conv2d_op.block_config = _create_npu_block_config(serial_2d_convolution.block_config)
if not npu_conv2d_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(npu_conv2d_op, target_accel_config)
npu_conv2d_op.block_config = block_config
return npu_conv2d_op, weights_zero_point
def translate_ethosu_depthwise_conv2d(
tir_call_extern: tvm.tir.Call,
) -> Tuple[vapi.NpuConvDepthWiseOperation, int]:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See Serial2DDepthwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuConvDepthWiseOperation
The vela object containing the params of ethosu_depthwise_conv2d
weights_zero_point : int
The zero point of the weights
"""
serial_object = spec.create_serial_object(spec.Serial2DDepthwise, tir_call_extern.args[1:])
return _create_npu_op_depthwise_conv2d(serial_object)
def _create_npu_op_depthwise_conv2d(serial_2d_depthwise):
npu_depthwise_conv2d_op = vapi.NpuConvDepthWiseOperation()
npu_depthwise_conv2d_op.ifm = _create_npu_feature_map(serial_2d_depthwise.ifm)
npu_depthwise_conv2d_op.ofm = _create_npu_feature_map(serial_2d_depthwise.ofm)
npu_depthwise_conv2d_op.kernel = _create_npu_kernel(serial_2d_depthwise.kernel)
npu_depthwise_conv2d_op.weights = [_create_npu_address_range(serial_2d_depthwise.weight)]
weights_zero_point = np.int64(serial_2d_depthwise.weight_zero_point.value)
npu_depthwise_conv2d_op.biases = [_create_npu_address_range(serial_2d_depthwise.scale_bias)]
npu_depthwise_conv2d_op.padding = _create_npu_padding(serial_2d_depthwise.padding)
npu_depthwise_conv2d_op.activation = _create_npu_activation(serial_2d_depthwise.activation)
if (
npu_depthwise_conv2d_op.activation
and npu_depthwise_conv2d_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_depthwise_conv2d_op)
npu_depthwise_conv2d_op.rounding_mode = _create_npu_rounding_mode(
serial_2d_depthwise.rounding_mode
)
npu_depthwise_conv2d_op.ifm_upscale = _create_npu_resampling_mode(serial_2d_depthwise.upscale)
npu_depthwise_conv2d_op.block_config = _create_npu_block_config(
serial_2d_depthwise.block_config
)
if not npu_depthwise_conv2d_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_depthwise_conv2d_op, target_accel_config
)
npu_depthwise_conv2d_op.block_config = block_config
return npu_depthwise_conv2d_op, weights_zero_point
def _create_npu_feature_map(serial_feature_map: spec.SerialFeatureMap) -> vapi.NpuFeatureMap:
"""This is a helper function to capture a list
of arguments to create Vela NpuFeatureMap object.
"""
layout_map = {"NHWC": vapi.NpuLayout.NHWC, "NHCWB16": vapi.NpuLayout.NHCWB16}
datatype_map = {
"uint8": vapi.NpuDataType.UINT8,
"int8": vapi.NpuDataType.INT8,
"uint16": vapi.NpuDataType.UINT16,
"int16": vapi.NpuDataType.INT16,
"int32": vapi.NpuDataType.INT32,
}
layout = str(serial_feature_map.layout.value)
data_type = str(serial_feature_map.data_type.value)
date_type_bytes = np.iinfo(np.dtype(data_type)).bits // 8
assert layout in layout_map.keys()
assert data_type in datatype_map.keys()
nfm = vapi.NpuFeatureMap()
nfm.data_type = datatype_map[data_type]
nfm.shape = vapi.NpuShape3D(
int(serial_feature_map.height),
int(serial_feature_map.width),
int(serial_feature_map.channels),
)
nfm.tiles = vapi.NpuTileBox(
int(serial_feature_map.tile_height_0),
int(serial_feature_map.tile_height_1),
int(serial_feature_map.tile_width_0),
[
serial_feature_map.tile_address_0,
serial_feature_map.tile_address_1,
serial_feature_map.tile_address_2,
serial_feature_map.tile_address_3,
],
)
nfm.quantization = _create_npu_quantization(
serial_feature_map.scale, serial_feature_map.zero_point
)
nfm.layout = layout_map[layout]
nfm.strides = vapi.NpuShape3D(
int(serial_feature_map.stride_h.value) * date_type_bytes,
int(serial_feature_map.stride_w.value) * date_type_bytes,
int(serial_feature_map.stride_c.value) * date_type_bytes,
)
return nfm
def _create_npu_kernel(serial_kernel: spec.SerialKernel) -> vapi.NpuKernel:
"""This is a helper function to capture a list
of arguments to create Vela NpuKernel object.
"""
nknl = vapi.NpuKernel(
w=int(serial_kernel.width),
h=int(serial_kernel.height),
stride_x=int(serial_kernel.stride_w),
stride_y=int(serial_kernel.stride_h),
dilation_x=int(serial_kernel.dilation_w),
dilation_y=int(serial_kernel.dilation_h),
)
return nknl
def _create_npu_address_range(
serial_address_range: spec.SerialAddressRange,
) -> vapi.NpuAddressRange:
"""This is a helper function to capture a list
of arguments to create Vela NpuAddressRange object.
"""
addr_range = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_address_range.address,
length=int(serial_address_range.length),
)
return addr_range
def _create_npu_quantization(
scale: Union[tvm.tir.FloatImm, float],
zero_point: Union[tvm.tir.IntImm, int],
) -> vapi.NpuQuantization:
"""This is a helper function to capture a list
of arguments to create Vela NpuQuantization object.
"""
return vapi.NpuQuantization(scale_f32=float(scale), zero_point=int(zero_point))
def _create_npu_weights_zero_point(
zero_point: Union[int, tvm.tir.IntImm],
) -> int:
"""This is a helper function to capture the weights zero point."""
return int(zero_point)
def _create_npu_padding(serial_padding: spec.SerialPadding) -> vapi.NpuPadding:
"""This is a helper function to capture a list
of arguments to create Vela NpuPadding object."""
padding = vapi.NpuPadding(
top=int(serial_padding.top),
left=int(serial_padding.left),
bottom=int(serial_padding.bottom),
right=int(serial_padding.right),
)
return padding
def _create_npu_block_config(serial_block_config: spec.SerialBlockConfig) -> vapi.NpuShape3D:
"""A helper function to convert a SerialBlockConfig into an NpuShape3D"""
if serial_block_config.height * serial_block_config.width * serial_block_config.depth == 0:
return None
block_config = vapi.NpuShape3D(
height=int(serial_block_config.height),
width=int(serial_block_config.width),
depth=int(serial_block_config.depth),
)
return block_config
def _create_npu_activation(serial_activation: spec.SerialActivation) -> vapi.NpuActivation:
"""This is a helper function to capture a list
of arguments to create Vela NpuActivation object."""
if serial_activation.op == "NONE":
return None
if (
serial_activation.op == "CLIP"
and serial_activation.clip_min == 0
and serial_activation.clip_max == 0
):
return None
op_map = {
"CLIP": vapi.NpuActivationOp.NONE_OR_RELU,
"TANH": vapi.NpuActivationOp.TABLE_LOOKUP,
"SIGMOID": vapi.NpuActivationOp.TABLE_LOOKUP,
"LUT": vapi.NpuActivationOp.TABLE_LOOKUP,
}
op = str(serial_activation.op.value)
assert op in op_map.keys()
act_op = vapi.NpuActivation(op_map[op])
if serial_activation.op == "CLIP":
act_op.min = int(serial_activation.clip_min.value)
act_op.max = int(serial_activation.clip_max.value)
if op_map[op] == vapi.NpuActivationOp.TABLE_LOOKUP:
act_op.lookup_table_index = 0
return act_op
def _create_npu_resampling_mode(
mode: str,
) -> vapi.NpuResamplingMode:
"""This is a helper function to capture a list
of arguments to create Vela NpuResamplingMode object."""
mode_map = {
"NONE": vapi.NpuResamplingMode.NONE,
"NEAREST": vapi.NpuResamplingMode.NEAREST,
"ZEROS": vapi.NpuResamplingMode.TRANSPOSE,
}
mode = str(mode.value)
assert mode in mode_map.keys()
return mode_map[mode]
def _create_npu_rounding_mode(
mode: str,
) -> vapi.NpuRoundingMode:
"""This is a helper function to capture a list
of arguments to create Vela NpuRoundingMode object."""
mode_map = {
"TFL": vapi.NpuRoundingMode.TFL,
"TRUNCATE": vapi.NpuRoundingMode.TRUNCATE,
"NATURAL": vapi.NpuRoundingMode.NATURAL,
}
mode = str(mode.value)
assert mode in mode_map.keys()
return mode_map[mode]
def _create_npu_dma_op(serial_copy):
"""This is a helper function to capture the list of arguments
to create a NpuDmaOperation object"""
data_type_bytes = np.iinfo(np.dtype(serial_copy.read_address.dtype)).bits // 8
src = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_copy.read_address,
length=int(serial_copy.length.value) * data_type_bytes,
)
dest = vapi.NpuAddressRange(
# region will be updated later
region=0,
address=serial_copy.write_address,
length=int(serial_copy.length.value) * data_type_bytes,
)
return vapi.NpuDmaOperation(src, dest)
def translate_ethosu_pooling(tir_call_extern: tvm.tir.Call) -> vapi.NpuPoolingOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See SerialPooling in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuPoolingOperation
The vela object containing the params of ethosu_pooling
"""
serial_object = spec.create_serial_object(spec.SerialPooling, tir_call_extern.args[1:])
return _create_npu_op_pooling(serial_object)
def _create_npu_op_pooling(serial_pooling: spec.SerialPooling):
pooling_type = serial_pooling.pooling_type
if pooling_type == "AVG":
npu_pooling_op = vapi.NpuPoolingOp.AVERAGE
elif pooling_type == "MAX":
npu_pooling_op = vapi.NpuPoolingOp.MAX
npu_pooling_op = vapi.NpuPoolingOperation(npu_pooling_op)
npu_pooling_op.ifm = _create_npu_feature_map(serial_pooling.ifm)
npu_pooling_op.ofm = _create_npu_feature_map(serial_pooling.ofm)
npu_pooling_op.kernel = _create_npu_kernel(serial_pooling.pool_shape)
npu_pooling_op.padding = _create_npu_padding(serial_pooling.padding)
npu_pooling_op.activation = _create_npu_activation(serial_pooling.activation)
if (
npu_pooling_op.activation
and npu_pooling_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_pooling_op)
npu_pooling_op.rounding_mode = _create_npu_rounding_mode(serial_pooling.rounding_mode)
npu_pooling_op.ifm_upscale = _create_npu_resampling_mode(serial_pooling.upscale)
npu_pooling_op.block_config = _create_npu_block_config(serial_pooling.block_config)
if not npu_pooling_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(npu_pooling_op, target_accel_config)
npu_pooling_op.block_config = block_config
return npu_pooling_op
def translate_ethosu_binary_elementwise(
tir_call_extern: tvm.tir.Call,
) -> vapi.NpuElementWiseOperation:
"""This function will translate a TIR call_extern
as produced by NPU Relay to TIR compilation.
Parameters
----------
tir_call_extern : tvm.tir.Call
This should be a TIR call_extern that has agreed upon ordering
for TIR Compiler. See SerialBinaryElementwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuElementWiseOperation
The vela object containing the params of ethosu_binary_elementwise
"""
serial_object = spec.create_serial_object(
spec.SerialBinaryElementwise, tir_call_extern.args[1:]
)
return _create_npu_op_binary_elementwise(serial_object)
def _create_npu_op_binary_elementwise(serial_binary_elementwise: spec.SerialBinaryElementwise):
operator_type = serial_binary_elementwise.operator_type
if operator_type == "ADD":
op = vapi.NpuElementWiseOp.ADD
elif operator_type == "SUB":
op = vapi.NpuElementWiseOp.SUB
elif operator_type == "MUL":
op = vapi.NpuElementWiseOp.MUL
elif operator_type == "MIN":
op = vapi.NpuElementWiseOp.MIN
elif operator_type == "MAX":
op = vapi.NpuElementWiseOp.MAX
elif operator_type == "SHR":
op = vapi.NpuElementWiseOp.SHR
elif operator_type == "SHL":
op = vapi.NpuElementWiseOp.SHL
npu_binary_elementwise_op = vapi.NpuElementWiseOperation(op)
npu_binary_elementwise_op.ifm = _create_npu_feature_map(serial_binary_elementwise.ifm)
npu_binary_elementwise_op.ifm2 = _create_npu_feature_map(serial_binary_elementwise.ifm2)
npu_binary_elementwise_op.ofm = _create_npu_feature_map(serial_binary_elementwise.ofm)
npu_binary_elementwise_op.reversed_operands = serial_binary_elementwise.reversed_operands
npu_binary_elementwise_op.activation = _create_npu_activation(
serial_binary_elementwise.activation
)
if (
npu_binary_elementwise_op.activation
and npu_binary_elementwise_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_binary_elementwise_op)
npu_binary_elementwise_op.rounding_mode = _create_npu_rounding_mode(
serial_binary_elementwise.rounding_mode
)
npu_binary_elementwise_op.block_config = _create_npu_block_config(
serial_binary_elementwise.block_config
)
if not npu_binary_elementwise_op.block_config:
target_accel_config = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_binary_elementwise_op, target_accel_config
)
npu_binary_elementwise_op.block_config = block_config
return npu_binary_elementwise_op
def translate_ethosu_unary_elementwise(
tir_extern_call: tvm.tir.Call,
) -> vapi.NpuElementWiseOperation:
"""This function will translate a tir extern_call
as produced by Relay to TIR compilation.
Parameters
----------
tir_extern_call : tvm.tir.Call
This should be a tir external call that has a agreed upon ordering
for the NPU TIR Compiler. See SerialUnaryElementwise in
tvm/relay/backend/contrib/ethosu/tir/spec.py for the ordering.
Returns
-------
ethosu.vela.api.NpuElementWiseOperation
The vela object containing the params of ethosu_unary_elementwise
"""
serial_object = spec.create_serial_object(spec.SerialUnaryElementwise, tir_extern_call.args[1:])
return _create_npu_op_unary_elementwise(serial_object)
def _create_npu_op_unary_elementwise(serial_unary_elementwise):
operator_type = serial_unary_elementwise.operator_type
if operator_type == "ABS":
op = vapi.NpuElementWiseOp.ABS
if operator_type == "CLZ":
op = vapi.NpuElementWiseOp.CLZ
npu_unary_elementwise_op = vapi.NpuElementWiseOperation(op)
npu_unary_elementwise_op.ifm = _create_npu_feature_map(serial_unary_elementwise.ifm)
npu_unary_elementwise_op.ofm = _create_npu_feature_map(serial_unary_elementwise.ofm)
npu_unary_elementwise_op.activation = _create_npu_activation(
serial_unary_elementwise.activation
)
if (
npu_unary_elementwise_op.activation
and npu_unary_elementwise_op.activation.op_type == vapi.NpuActivationOp.NONE_OR_RELU
):
_convert_clip_bounds(npu_unary_elementwise_op)
npu_unary_elementwise_op.rounding_mode = _create_npu_rounding_mode(
serial_unary_elementwise.rounding_mode
)
npu_unary_elementwise_op.block_config = _create_npu_block_config(
serial_unary_elementwise.block_config
)
if not npu_unary_elementwise_op.block_config:
target_accel_type = vela_api.get_accelerator_config()
block_config = vela_api.get_optimal_block_config(
npu_unary_elementwise_op, target_accel_type
)
npu_unary_elementwise_op.block_config = block_config
return npu_unary_elementwise_op
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.