file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/relay/backend/contrib/ethosu/util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Helper utility Enums and Functions used through out code generation.
The rest of the utility functions are misc.
Refer to the description inside such functions
"""
from inspect import signature
from enum import Enum
from typing import Union, Tuple, List
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm._ffi import register_object
from tvm.runtime import Object
from . import _ffi_api
class QConv2DArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.conv2d arguments.
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
class QConv2DTransposeArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.conv2d_transpose arguments.
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
class RequantArgs(Enum):
"""
This is a helper enum to obtain the correct index
of qnn.requantize arguments.
"""
IFM_SCALE = 1
IFM_ZERO_POINT = 2
OFM_SCALE = 3
OFM_ZERO_POINT = 4
class BiasAddArgs(Enum):
"""
This is a helper enums to obtain the correct index
of qnn.bias_add arguments.
"""
BIASES = 1
class ClipArgs(Enum):
"""
This is a helper enums to obtain the correct index
of clip arguments.
"""
A_MIN = 1
A_MAX = 2
class BinaryElementwiseArgs(Enum):
"""This is a helper enums to access the correct index
of binary elementwise arguments
"""
IFM = 0
IFM2 = 1
IFM_SCALE = 2
IFM_ZERO_POINT = 3
IFM2_SCALE = 4
IFM2_ZERO_POINT = 5
OFM_SCALE = 6
OFM_ZERO_POINT = 7
class QuantizeArgs(Enum):
"""
This is a helper enums to access the correct index of
quantize arguments
"""
IFM = 0
OFM_SCALE = 1
OFM_ZERO_POINT = 2
class DequantizeArgs(Enum):
"""
This is a helper enums to access the correct index of
dequantize arguments
"""
IFM = 0
IFM_SCALE = 1
IFM_ZERO_POINT = 2
class QDenseArgs(Enum):
"""
This is a helper enum to access the correct index of
qnn.dense arguments
"""
IFM = 0
WEIGHTS = 1
IFM_ZERO_POINT = 2
WEIGHTS_ZERO_POINT = 3
IFM_SCALE = 4
WEIGHTS_SCALE = 5
def is_npu_func(func: relay.Function) -> bool:
"""Check if the given function is an NPU function."""
return func.attrs and "Compiler" in func.attrs and func.attrs["Compiler"] == "ethos-u"
def is_composite_func(func: relay.Function, name: str) -> bool:
"""
This method checks whether the call is to
a composite function of a given name.
Parameters
----------
func : relay.Function
The header to be displayed along with the dump.
name : str
The candidate name to be checked
Returns
--------
a boolean
"""
if not hasattr(func, "attrs"):
return False
if "Composite" not in func.attrs.keys():
return False
composite_name = func.attrs["Composite"]
return composite_name == name
def is_named_ethosu_op(expr: tvm.relay.Expr, name: str) -> bool:
"""Checks whether a relay expression matches that of the
named operator.
Parameters
----------
expr : tvm.relay.Expr
The expression to check.
name : str
The name of the expected operator
(without NPU prefix "contrib.ethosu").
Returns
-------
bool
True if expression matches name, false if not.
"""
prefix = "contrib.ethosu."
return (
isinstance(expr, tvm.relay.expr.Call)
and isinstance(expr.op, tvm.ir.op.Op)
and expr.op.name == prefix + name
)
def get_range_for_dtype_str(dtype: str) -> Tuple[int, int]:
"""
Produce the min,max for a give data type.
Parameters
----------
dtype : str
a type string (e.g., int8)
Returns
-------
type_info.min : int
the minimum of the range
type_info.max : int
the maximum of the range
"""
try:
type_info = np.iinfo(dtype)
except ValueError:
type_info = np.finfo(dtype)
return type_info.min, type_info.max
def round_away_zero(f: Union[float, np.double, np.single, np.float32, np.float64]) -> np.float64:
"""Round the number away from zero towards +inf / -inf"""
offset = -0.5 if (f < 0) else 0.5
return np.trunc(f + offset)
def round_up(a: int, b: int) -> int:
"""Round up to a multiple of b"""
return ((a + b - 1) // b) * b
def get_accelerator_config():
"""Get the variant of the accelerator to compile for"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return compiler_attrs.accelerator_config
def is_cascader_enabled():
"""Determine whether the cascader is enabled"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return compiler_attrs.enable_cascader
def is_striping_enabled():
"""Determine whether the cascader is enabled"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
return compiler_attrs.enable_striping
def get_arg_count(func):
"""Helper function to get the number of
arguments in a python function"""
sig = signature(func)
return len(sig.parameters)
def get_dim_value(layout: str, dim: int):
"""This is a helper function to retrieve the value
of the dimension given the shape and the layout
"""
assert isinstance(layout, str)
assert dim in list(layout)
for idx, dim_char in enumerate(layout):
if dim_char == dim:
return idx
return None
def calculate_size_bytes(expr):
"""This is a helper function to calculate the number
of bytes required to hold the tensor/relay.expr"""
try:
type_info = np.iinfo(expr.checked_type.dtype)
except ValueError:
type_info = np.finfo(expr.checked_type.dtype)
element_size = type_info.bits // 8
elements = np.prod(list(expr.checked_type.shape))
return element_size * elements
@register_object("relay.ext.ethos-u.BaseAddress")
class BaseAddress(Object):
"""
This is a structure to hold base addresses for pointers
provided for the driver.
"""
def __init__(
self,
name: str,
primfunc_param_idx: int,
region: int,
size: int,
is_runtime_allocation: bool = False,
):
self.__init_handle_by_constructor__(
_ffi_api.BaseAddress, # type: ignore # pylint: disable=no-member
name,
primfunc_param_idx,
region,
size,
is_runtime_allocation,
)
@register_object("relay.ext.ethos-u.CompilationArtifact")
class CompilationArtifact(Object):
"""
This is a structure to hold binary artifacts
for the microNPU.
"""
def __init__(
self,
function_name: str,
command_stream: str,
encoded_constants: str,
base_addresses: List[BaseAddress],
):
self.__init_handle_by_constructor__(
_ffi_api.CompilationArtifact, # type: ignore # pylint: disable=no-member
function_name,
command_stream,
encoded_constants,
base_addresses,
)
def create_npu_function_pass(opt_level: int, name: str = ""):
"""
A utility decorator that wraps a given class as an NPU function pass. That is,
a pass that behaves like a function pass and only traverses NPU external
functions. How each NPU function is mutated is defined by the
`transform_npu_function(global_variable, relay_function)` function which should
be created in the class that is to be decorated. See the example below.
Example
-------
This small example demonstrates a pass over NPU functions that performs no
mutation.
@create_npu_function_pass(opt_level=1)
class MyPass:
def transform_npu_function(self, global_var, func):
return func
mod = tvm.IRModule()
mod = MyPass()(mod)
Parameters
----------
opt_level: int
Optimization level for the module pass.
name: str, optional
Name for the module pass.
Returns
-------
decorator
The npu_pass decorator.
"""
def decorator(npu_pass_class):
@tvm.ir.transform.module_pass(name=name, opt_level=opt_level)
class ModulePassWrapper:
"""The wrapper for the NPU pass."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def transform_module(self, mod: tvm.ir.IRModule, _) -> tvm.ir.IRModule:
npu_functions = filter(lambda x: is_npu_func(x[1]), mod.functions.items())
for global_var, func in npu_functions:
npu_pass = npu_pass_class(*self.args, **self.kwargs)
func = npu_pass.transform_npu_function(global_var, func)
mod.update_func(global_var, func)
return mod
return ModulePassWrapper
return decorator
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/ethosu/vela_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is an adapter module for conversions between TVM and Vela.
The following conversion APIs are added :
*Obtaining the best block config
*Compressing weights
*Packing biases
"""
import logging
import math
from typing import Tuple, Optional, List
import numpy as np # type: ignore
from ethosu.vela import api as vapi # type: ignore
import tvm
from tvm.relay.backend.contrib.ethosu import util # type: ignore
from tvm.relay.backend.contrib.ethosu import tir_to_cs_translator as tirtocs
# pylint: disable=invalid-name
logger = logging.getLogger("Ethos-U")
VELA_TO_NP_DTYPES = {
vapi.NpuDataType.UINT8: np.uint8,
vapi.NpuDataType.UINT16: np.uint16,
vapi.NpuDataType.INT8: np.int8,
vapi.NpuDataType.INT16: np.int16,
vapi.NpuDataType.INT32: np.int32,
}
SCALE_BIAS_LENGTH = 10
def get_optimal_block_config(
npu_op: vapi.NpuOperation, accel_config: vapi.NpuAccelerator
) -> vapi.NpuShape3D:
"""
"The NPU's unit of work is known as a block. It will fetch block(s) from Input
Feature Map (IFM) and a compute block for Output Feature Map (OFM).
Therefore, we need to pick an optimal block configuration considering bandwidth
to bring IFM blocks and the number of OFM block computes need to happen
to cover the OFM as indicated by the npu op.
Parameters
----------
npu_op : ethosu.vela.api.NpuOperation
The NPU operation and its params
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
Returns
-------
ethosu.vela.api.NpuShape3D :
The optimal block config for the operator
"""
options = tvm.transform.PassContext.current().config.get("relay.ext.ethos-u.options", None)
if options and options.dev_force_block_config:
block_config = [int(v) for v in options.dev_force_block_config.split("x")]
return vapi.NpuShape3D(height=block_config[0], width=block_config[1], depth=block_config[2])
all_valid_block_configs = vapi.npu_find_block_configs(npu_op, accel_config)
return _get_optimal_block_config(all_valid_block_configs)
def _get_optimal_block_config(all_valid_block_configs: List[vapi.NpuShape3D]) -> vapi.NpuShape3D:
"""An internal function to get block config with largest depth
and then highest volume/area"""
assert isinstance(all_valid_block_configs, list)
for block_cfg in all_valid_block_configs:
assert isinstance(block_cfg, vapi.NpuShape3D)
# Getting the largest volume block for benchmarking
all_valid_block_configs.sort(
key=lambda _cfg: _cfg.depth * _cfg.height * _cfg.width, reverse=True
)
largest_volume_block_config = all_valid_block_configs[0]
largest_volume = (
largest_volume_block_config.depth
* largest_volume_block_config.height
* largest_volume_block_config.width
)
all_valid_block_configs.sort(key=lambda _cfg: _cfg.depth, reverse=True)
max_d = all_valid_block_configs[0].depth
max_depth_block_configs = [_cfg for _cfg in all_valid_block_configs if _cfg.depth == max_d]
max_depth_block_configs.sort(key=lambda _cfg: _cfg.height * _cfg.width, reverse=True)
max_area = max_depth_block_configs[0].height * max_depth_block_configs[0].width
max_area_depth_block_configs = [
_cfg for _cfg in max_depth_block_configs if _cfg.height * _cfg.width == max_area
]
# This to get a deterministic anwser everytime
max_area_depth_block_configs.sort(key=lambda _cfg: _cfg.height, reverse=True)
assert len(max_area_depth_block_configs) > 0
current_volume = (
max_area_depth_block_configs[0].depth
* max_area_depth_block_configs[0].height
* max_area_depth_block_configs[0].width
)
logger.info("Using block config=%s", max_area_depth_block_configs[0])
logger.info(
"Quality of the block config w.r.t. max volume block config=%s",
100.0 * (current_volume / largest_volume),
)
return max_area_depth_block_configs[0]
def encode_weights(
tir_extern_call: tvm.tir.Call, values: np.ndarray, accel_config: vapi.NpuAccelerator
):
"""This is an API function to compress weights by passing
a tir_extern_call to NPU Convolution operation and values.
Parameters
----------
tir_extern_call : tvm.tir.Call
tir_extern_call to NPU Convolution operation
values : numpy.ndarray
The constant flattened weight data in OHWI layout
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
Returns
-------
bytearray
Compressed weights
"""
supported_ops = {
"ethosu_conv2d": tirtocs.translate_ethosu_conv2d,
"ethosu_depthwise_conv2d": tirtocs.translate_ethosu_depthwise_conv2d,
}
op = str(tir_extern_call.args[0].value)
assert op in supported_ops.keys()
npu_op, weights_zero_point = supported_ops[op](tir_extern_call)
is_depthwise = op == "ethosu_depthwise_conv2d"
# Recover the original shape if we are dealing with a flattened tensor
if len(values.shape) == 1:
shape_ohwi = (
npu_op.ofm.shape.depth,
npu_op.kernel.height,
npu_op.kernel.width,
1 if is_depthwise else npu_op.ifm.shape.depth,
)
assert values.size == np.prod(shape_ohwi)
values = np.reshape(values, shape_ohwi)
return compress_weights(
weights=values,
weights_zp=weights_zero_point,
# The weight layout is assumed to be OHWI, always.
weights_layout="OHWI",
ifm_bitdepth=npu_op.ifm.data_type.size_in_bits(),
block_depth=npu_op.block_config.depth,
dilation=(npu_op.kernel.dilation_x, npu_op.kernel.dilation_y),
accel_config=accel_config,
is_depthwise=is_depthwise,
)
def compress_weights(
weights: np.ndarray,
weights_zp: int,
weights_layout: str,
ifm_bitdepth: int,
block_depth: int,
dilation: Tuple[int, int],
accel_config: vapi.NpuAccelerator,
is_depthwise: Optional[bool] = False,
) -> bytearray:
"""The NPU requires the weights to be compressed
to be executed. Therefore, this function calls into
the Vela APIs to compress the weights.
Parameters
----------
weights : numpy.ndarray
The raw weights
weights_zp : int
The zero point of the weights
weights_layout : str
A string literal indicating the layout
Supported values : HWIO, HWOI, OHWI
ifm_bitdepth : int
The bit depth of the ifm the weights are used with
block_depth : int
The depth of the optimal block config for the operator
dilation : tuple
A tuple of 2 elements indicating dilation in h and w
accel_config : ethosu.vela.api.NpuAccelerator
The NPU accelerator config
is_depthwise : bool, Optional
This indicates whether the weights are compressed for depthwise convolution
Returns
-------
compressed_weights : bytearray
Compressed weights
"""
layout_transform_indices = {"HWIO": (3, 0, 1, 2), "HWOI": (2, 0, 1, 3), "OHWI": (0, 1, 2, 3)}
assert weights_layout in layout_transform_indices.keys()
assert isinstance(weights_zp, np.int64)
weights = weights.astype(np.int16) - weights_zp
# Vela needs the weights in OHWI layout
weights_ohwi = np.transpose(weights, layout_transform_indices[weights_layout])
shape_ohwi = [
weights.shape[layout_transform_indices[weights_layout][0]],
weights.shape[layout_transform_indices[weights_layout][1]],
weights.shape[layout_transform_indices[weights_layout][2]],
weights.shape[layout_transform_indices[weights_layout][3]],
]
block_traversal = calculate_block_traversal_mode(is_depthwise, shape_ohwi, ifm_bitdepth)
compressed_weights = vapi.npu_encode_weights(
accelerator=accel_config,
weights_volume=weights_ohwi,
dilation_xy=dilation,
ifm_bitdepth=ifm_bitdepth,
ofm_block_depth=block_depth,
is_depthwise=is_depthwise,
block_traversal=block_traversal,
)
return compressed_weights
def calculate_block_traversal_mode(
is_depthwise: bool, weights_shape_ohwi: List[int], ifm_bitdepth: int
) -> vapi.NpuBlockTraversal:
"""Calculate a block traversal mode given whether the op is depthwise convolution,
shape of weights and bit-depth of the ifm.
"""
if is_depthwise:
return vapi.NpuBlockTraversal.DEPTH_FIRST
# Determine which block traversal strategy has better DPU utilization
kernel_size = weights_shape_ohwi[1] * weights_shape_ohwi[2]
depth_utilization = weights_shape_ohwi[3] / util.round_up(
weights_shape_ohwi[3], 32 if ifm_bitdepth == 8 else 16
)
part_kernel_utilization = (weights_shape_ohwi[3] / util.round_up(weights_shape_ohwi[3], 8)) * (
kernel_size / util.round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
)
if part_kernel_utilization >= depth_utilization or weights_shape_ohwi[3] <= 8:
# Part-kernel first is always better for ifm depths <= 8
return vapi.NpuBlockTraversal.PART_KERNEL_FIRST
return vapi.NpuBlockTraversal.DEPTH_FIRST
def pack_biases(
biases: np.ndarray,
ifm_scale: float,
ifm_dtype: np.dtype,
weight_scales: np.ndarray,
ofm_scale: float,
is_activation_tanh_or_sigmoid: bool = False,
) -> np.ndarray:
"""
The NPU requires the each bias value to be packed with
output scale parameters in a 80-bit format (that is returned
via npu_encode_bias API). This function will pack such values
to a binary artifact that the NPU will use in the execution.
Parameters
----------
biases : numpy.ndarray
The values of biases
ifm_scale : float
The quantization scale parameter of input feature map
ifm_dtype : numpy.dtype
The data type of input feature map data.
weight_scales : numpy.ndarray
The quantization scale parameter of weight feature map
This could be a tuple if per-channel quantization is present.
ofm_scale : float
The quantization scale parameter of output feature map.
is_activation_tanh_or_sigmoid : bool
Indicates whether the fused activation function is tanh or sigmoid.
Returns
-------
scale_bias : numpy.ndarray
Packed scales/biases as the hardware requires them.
"""
# The BYOC infra should not partition anything else.
supported_ifm_dtypes = (np.uint8, np.int8, np.int16)
assert ifm_dtype in supported_ifm_dtypes
if weight_scales.size == 1:
weight_scales = [weight_scales] * biases.size
hw_bias_scales = _calculate_hw_bias_scales(
ifm_scale, weight_scales, ofm_scale, ifm_dtype, is_activation_tanh_or_sigmoid
)
assert len(hw_bias_scales) == biases.size
biases = biases.astype("int64")
packed_biases = bytearray()
for idx, scale in enumerate(hw_bias_scales):
packed_biases.extend(vapi.npu_encode_bias(biases[idx], *scale))
scale_bias = np.frombuffer(packed_biases, dtype=np.uint8)
scale_bias = np.reshape(scale_bias, (-1, 10))
return scale_bias
def _quantize_scale(scale: float) -> Tuple[int, int]:
"""Quantize floating point scale into 32-bit int scale with a 6-bit shift.
This is to be used with 8-bit data.
"""
mantissa, exponent = math.frexp(scale)
mantissa_scaled = mantissa * (1 << 31)
mantissa_scaled = int(util.round_away_zero(mantissa_scaled))
required_shift = 31 - exponent
if required_shift < 0 or required_shift >= (1 << 6):
# Shift outside of valid range, set scale to 0
return 0, 16
return mantissa_scaled, required_shift
def _reduced_quantize_scale(scale: float) -> Tuple[int, int]:
"""A reduction of precision is required for 16 bit data."""
mantissa_scaled, required_shift = _quantize_scale(scale)
# This is max a signed 16-bit number could represent
max_reduced_mantissa_scaled = (1 << 15) - 1
# if the current value is larger than pre-scaled max_reduced_mantissa_scaled
# we need to saturate the anwser to max_reduced_mantissa_scaled
if mantissa_scaled >= max_reduced_mantissa_scaled << 16:
reduced_mantissa_scaled = max_reduced_mantissa_scaled
else:
reduced_mantissa_scaled = (mantissa_scaled + (1 << 15)) >> 16
reduced_shift = required_shift - 16
if required_shift < 0 or required_shift >= (1 << 6):
# Shift outside of valid range, set scale to 0
return 0, 16
return reduced_mantissa_scaled, reduced_shift
def _calculate_hw_bias_scales(
ifm_scale: float,
weight_scales: List[float],
ofm_scale: float,
ifm_dtype: np.dtype,
is_faf_tanh_sigmoid: bool = False,
) -> List[Tuple[int, int]]:
"""This function will produce a scale that is calculated using scales of ifm,
weights and ofm. It is also important to note that if per-channel / per-value
quantization required they should go into hw bias scales"""
if is_faf_tanh_sigmoid:
ifm_scale = ifm_scale * 0x3000
if ifm_dtype == np.uint8:
bias_scales = [np.double(ifm_scale * ws) / np.double(ofm_scale) for ws in weight_scales]
else:
assert ifm_dtype in (np.int8, np.int16)
ifm_scale_dbl = np.double(ifm_scale)
ofm_scale_dbl = np.double(ofm_scale)
bias_scales = [ifm_scale_dbl * np.double(ws) / ofm_scale_dbl for ws in weight_scales]
if ifm_dtype == np.int16:
hw_bias_scales = [_reduced_quantize_scale(bs) for bs in bias_scales]
else:
assert ifm_dtype in (np.uint8, np.int8)
hw_bias_scales = [_quantize_scale(bs) for bs in bias_scales]
return hw_bias_scales
def get_accelerator_config() -> vapi.NpuAccelerator:
"""Get the configuration of the NPU accelerator.
The configuration string provided as a compiler option is converted into
an NpuAccelerator object. Valid configuration strings:
- 'ethos-u55-256'
- 'ethos-u55-128'
- 'ethos-u55-64'
- 'ethos-u55-32'
"""
npu_accel_str_map = {
"ethos-u55-256": vapi.NpuAccelerator.Ethos_U55_256,
"ethos-u55-128": vapi.NpuAccelerator.Ethos_U55_128,
"ethos-u55-64": vapi.NpuAccelerator.Ethos_U55_64,
"ethos-u55-32": vapi.NpuAccelerator.Ethos_U55_32,
"ethos-u65-256": vapi.NpuAccelerator.Ethos_U65_256,
"ethos-u65-512": vapi.NpuAccelerator.Ethos_U65_512,
}
compiler_attrs = tvm.get_global_func("relay.ext.ethos-u.get_compiler_attrs")()
accel_config_str = compiler_attrs.accelerator_config
assert accel_config_str in npu_accel_str_map.keys(), f"{accel_config_str} is not supported"
return npu_accel_str_map[accel_config_str]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA modules for Relay."""
from .backend import UMABackend
from .api.utils import uma_available
__all__ = ["UMABackend", "uma_available"]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA: Universal Modular Accelerator Interface API"""
from .codegen import UMACodegen
from .lower import UMALower
from .partitioner import UMAPartitioner
__all__ = ["UMACodegen", "UMALower", "UMAPartitioner"]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for relay transformation passes."""
import tvm._ffi # type: ignore
tvm._ffi._init_api("relay.ext.uma", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Codegen base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import Callable, Optional
import tvm
class UMACodegen(object):
"""
Codegen base class of the Universal Modular Accelerator Interface (UMA)
"""
def __init__(self, target_name: str) -> None:
self.target_name = target_name
def _register_codegen(
self, fmt: str = "c", includes: Optional[Callable[[], str]] = None, **kwargs
) -> None:
"""Registration codegen in UMA.
Parameters
----------
fmt: str
format of codegen. Currently only "c" is supported.
includes : OptionalCallable[[], str]]
user-defined function that adds C-#include statement to UMA C-Code.
"""
if fmt == "c":
self._register_c_codegen(includes, **kwargs)
else:
raise RuntimeError(f'Unsupported codegen format "{fmt}"')
def _register_c_codegen(self, includes: Optional[Callable[[], str]] = None) -> None:
"""Registration of UMA helper functions, e.g. includes and replace_call_extern.
Parameters
----------
includes : OptionalCallable[[], str]]
user-defined function that adds C-#include statement to UMA C-Code.
"""
if includes is not None:
tvm._ffi.register_func(
f"relay.ext.uma.codegen_c_includes_{self.target_name}",
includes,
override=True,
)
def register(self) -> None:
pass
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/lower.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Lowering base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import List, Tuple, Callable, Optional
import tvm
from tvm import relay, te
from tvm.relay.op.op import register_strategy
from . import _ffi_api
from .utils import PassPhase
OperatorStrategies = List[
Tuple[
str,
Callable[
[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy,
],
Optional[int],
]
]
class UMALower:
"""Lowering base class of the Universal Modular Accelerator Interface (UMA)."""
def __init__(self, target_name: str) -> None:
self.target_name = target_name
self._operator_strategies: OperatorStrategies = []
self._tir_passes: List[Tuple[PassPhase, tvm.tir.transform.PrimFuncPass]] = []
def _lower_relay_to_tir(self, relay_prim_func: relay.Function) -> tvm.tir.PrimFunc:
"""Lower a Relay primitive function to a S-TIR primitive function.
Parameters
----------
prim_func : tvm.relay.Function
The Relay function to lower.
Returns
-------
out : tvm.tir.PrimFunc
The lowered schedulable TensorIR primitive function.
"""
def _get_tensors(te_cached_func):
return list(te_cached_func.inputs) + list(te_cached_func.outputs)
lower_to_te = tvm._ffi.get_global_func("relay.backend.LowerToTE")
te_cached_func = lower_to_te(relay_prim_func)
x = _get_tensors(te_cached_func)
tir_prim_func = te.create_prim_func(x)
tir_prim_func = tir_prim_func.with_attr(
"global_symbol", relay_prim_func.attrs["global_symbol"]
)
compiler_attr = relay_prim_func.attrs["Compiler"]
target = tvm.target.Target.current()
if target.kind.name != compiler_attr:
target = tvm.target.Target(compiler_attr)
tir_prim_func = tir_prim_func.with_attr("target", target)
tir_prim_func = tir_prim_func.with_attr("relay_attrs", relay_prim_func.attrs)
return tir_prim_func
def _lower_stir_to_nstir(self, prim_func: tvm.tir.PrimFunc) -> tvm.tir.PrimFunc:
"""Lower a S-TIR primitive function to a NS-TIR primitive function.
Parameters
----------
prim_func : tvm.tir.PrimFunc
The primitive function to lower.
Returns
-------
out : tvm.tir.PrimFunc
The lowered non-schedulable TensorIR primitive function.
"""
curr_ctxt = tvm.transform.PassContext().current()
assert "tir.add_lower_pass" not in curr_ctxt.config
pass_map = {
PassPhase.TIR_PHASE_0: 0,
PassPhase.TIR_PHASE_1: 1,
PassPhase.TIR_PHASE_2: 2,
PassPhase.TIR_PHASE_3: 3,
}
lower_passes = [(pass_map[k], v) for k, v in self._tir_passes]
with tvm.transform.PassContext(
opt_level=curr_ctxt.opt_level,
required_pass=curr_ctxt.required_pass,
disabled_pass=curr_ctxt.disabled_pass,
instruments=curr_ctxt.instruments,
config={**dict(curr_ctxt.config), "tir.add_lower_pass": lower_passes},
):
mod = tvm.lower(tvm.ir.IRModule.from_expr(prim_func))
prim_func = mod[prim_func.attrs["global_symbol"]]
return prim_func
def relay_to_tir(self, mod: tvm.ir.IRModule) -> tvm.ir.IRModule:
"""
This is the hook for python-based lowering of a Relay module which lowers NPU
external functions to TIR.
Parameters
----------
mod : tvm.ir.IRModule
This is the Relay module.
Returns
-------
mod : tvm.ir.IRModule
The Relay module with scheduled NPU external functions.
"""
mod = _ffi_api.OutlineCompilerFunctions(self.target_name)(mod)
for gvar, func in mod.functions.items():
if "Compiler" in func.attrs and func.attrs["Compiler"] == self.target_name:
func = self._lower_relay_to_tir(func)
func = self._lower_stir_to_nstir(func)
mod.update_func(gvar, func)
return mod
def register(self) -> None:
"""Register all relevant relay-to-tir functions."""
tvm._ffi.register_func(f"relay.ext.uma.{self.target_name}.relay_to_tir", self.relay_to_tir)
for op, strategy, plevel in self._operator_strategies:
register_strategy(op, strategy, plevel)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/partitioner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Partitioner base class of the Universal Modular Accelerator Interface (UMA)"""
from typing import Callable, Dict, List, Tuple, Optional
import tvm
from tvm import relay
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import register_pattern_table
from .utils import PassPhase
PatternTable = List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]
class UMAPartitioner:
"""Partitioner base class of the Universal Modular Accelerator Interface (UMA)."""
def __init__(self, target_name: str, merge_compiler_regions: bool = True) -> None:
self.target_name = target_name
self.merge_compiler_regions = merge_compiler_regions
self._relay_passes: List[Tuple[PassPhase, tvm.transform.Pass]] = []
self._patterns: PatternTable = []
def add_pattern(
self,
name: str,
pattern: tvm.relay.dataflow_pattern.DFPattern,
predicate: Optional[Callable] = None,
) -> None:
"""Add pattern to UMA partitioner
Parameters
----------
name : str
relay name of pattern
pattern: tvm.relay.dataflow_pattern.DFPattern
pattern description as DFPattern
predicate: Optional[Callable]
Optional predicate
"""
name = self.target_name + "." + name
if predicate:
self._patterns.append((name, pattern, predicate))
else:
self._patterns.append((name, pattern))
def _pattern_table(self) -> PatternTable:
return self._patterns
def register(self) -> None:
"""Register all relevant relay-to-relay functions."""
register_pattern_table(self.target_name, self._pattern_table)
def partition(
self, mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
"""Partition the relay graph in parts supported and unsupported by the
target hardware accelerator.
Parameters
----------
mod : tvm.IRModule
The relay module to be partitioned.
params: Optional[Dict[str, tvm.runtime.NDArray]]
Returns
-------
out : tvm.IRModule
The partitioned relay module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pass_sequence = []
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.PRE_PARTITIONING]
)
pass_sequence.append(relay.transform.MergeComposite(self._pattern_table()))
pass_sequence.append(relay.transform.AnnotateTarget(self.target_name))
if self.merge_compiler_regions:
pass_sequence.append(relay.transform.MergeCompilerRegions())
pass_sequence.append(relay.transform.PartitionGraph())
pass_sequence.extend(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_0]
)
sequential_passes = tvm.transform.Sequential(pass_sequence)
mod = sequential_passes(mod)
# Defunctionalize the partitioned functions to allow lowering
for gvar, func in mod.functions.items():
mod.update_func(gvar, relay.transform.Defunctionalization(func, mod))
post_partition_passes_1 = tvm.transform.Sequential(
[p[1] for p in self._relay_passes if p[0] == PassPhase.POST_PARTITIONING_1]
)
mod = post_partition_passes_1(mod)
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/api/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility methods for the Universal Modular Accelerator Interface (UMA)"""
from enum import Enum, auto
import uuid
import tvm
import tvm.tir
from tvm.contrib import utils, clang
def uma_available() -> bool:
registration_func = tvm.get_global_func(
"relay.backend.contrib.uma.RegisterTarget", allow_missing=True
)
return registration_func is not None
class PassPhase(Enum):
"""
UMA pass phases:
PRE_PARTITIONING: prior to UMA partitioning
POST_PARTITIONING_0: after UMA partitioning, before Defunctionalization
POST_PARTITIONING_1: after UMA partitioning and after Defunctionalization
TIR_PHASE_0: Generates the raw IR and loop levels.
TIR_PHASE_1: Flattens the array storage.
TIR_PHASE_2: Transforms loops, like unroll, vectorization and thread-binding.
TIR_PHASE_3: Does some cleanup work.
Reference to TIR phases: src/driver/driver_api.c
"""
PRE_PARTITIONING = auto()
POST_PARTITIONING_0 = auto()
POST_PARTITIONING_1 = auto()
TIR_PHASE_0 = auto()
TIR_PHASE_1 = auto()
TIR_PHASE_2 = auto()
TIR_PHASE_3 = auto()
def _c_to_llvm(c_code: str) -> str:
unique_filename = str(uuid.uuid4())
temp = utils.tempdir()
ll_path = temp.relpath(f"{unique_filename}.ll")
ll_code = clang.create_llvm([c_code], output=ll_path)
return ll_code
def add_llvm_to_block(
sch: tvm.tir.Schedule, block_name: str, c_code_str: str = ""
) -> tvm.tir.Schedule:
block = sch.get_block(block_name)
loops = sch.get_loops(block)
assert len(loops) > 0
sch.annotate(loops[0], "pragma_import_llvm", _c_to_llvm(c_code_str))
return sch
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/contrib/uma/backend.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend base class of the Universal Modular Accelerator Interface (UMA)"""
from abc import ABC, abstractmethod
from typing import Union, Dict, Callable, Optional, Any
import tvm
from tvm.relay.backend.contrib.uma.api.codegen import UMACodegen
from tvm.relay.backend.contrib.uma.api.lower import UMALower
from tvm.relay.backend.contrib.uma.api.partitioner import UMAPartitioner
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
class UMABackend(ABC):
"""Backend base class of the Universal Modular Accelerator Interface (UMA)"""
def __init__(self, merge_compiler_regions: bool = True) -> None:
self._target_attrs: Dict = {}
self._target_preprocessor: Callable[[str], Dict[str, Any]] = None
self._relay_to_relay = UMAPartitioner(self.target_name, merge_compiler_regions)
self._relay_to_tir = UMALower(self.target_name)
self._tir_to_runtime = UMACodegen(self.target_name)
@property
@abstractmethod
def target_name(self) -> str:
"""Name of the hardware target.
Returns
-------
out : str
The hardware target name.
"""
...
# Target configuration
def _register_target_attr(
self,
name: str,
default: Optional[Union[str, int, bool]] = "",
) -> None:
"""Register a target attribute name that can be used during target instantiation.
Parameters
----------
name: str
The name of the target attribute.
default: Optional[Union[str, int, bool]]
A default value for the attribute.
If none is provided, the attribute will be treated as a string.
Example
-------
Here is an example of how two attribute options are registered.
.. code-block:: python
self._register_target_attr("attrA", default=0)
self._register_target_attr("attrB", default=False)
"""
self._target_attrs[name] = default
# Relay to Relay function registration
def _register_relay_pass(self, phase: PassPhase, relay_pass: tvm.transform.Pass) -> None:
"""Registers a relay pass at the given phase in the lowering process.
Parameters
----------
phase: PassPhase
The phase at which the pass is registered.
relay_pass: tvm.transform.Pass
The relay pass to be registered.
Example
-------
Here is an example of how two relay passes are registered.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_relay_pass(PassPhase.PRE_PARTITIONING, MyPassA)
self._register_relay_pass(PassPhase.POST_PARTITIONING, MyPassB)
Where a relay pass can look like this:
.. code-block:: python
@tvm.ir.transform.module_pass(opt_level=0)
class MyPassA:
def transform_module(self, mod, ctx):
# My pass functionality...
return mod
"""
self._relay_to_relay._relay_passes.append((phase, relay_pass))
def _register_pattern(
self,
name: str,
pattern: tvm.relay.dataflow_pattern.DFPattern,
predicate: Optional[Callable] = None,
) -> None:
"""Registers a dataflow pattern that is used to partition the relay graph.
Parameters
----------
name: str
The name of the pattern
pattern: tvm.relay.dataflow_pattern.DFPattern
Relay DFPattern
predicate: Optional[Callable]
Optional predicate for Relay DFPattern
Example
-------
Here is an example of how two dataflow patterns are registered.
During partioning, patterns are searched in order of registration.
.. code-block:: python
self._register_pattern("conv1d", conv1d_pattern)
self._register_pattern("conv2d", conv2d_pattern)
Where a dataflow pattern can look like this:
.. code-block:: python
conv1d_pattern = is_op("nn.conv1d")(wildcard(), wildcard())
optional_bias = lambda x: is_op("nn.bias_add")(x, wildcard())
optional_relu = lambda x: is_op("nn.relu")(x)
conv1d_pattern = conv1d_pattern.optional(optional_bias).optional(optional_relu)
"""
self._relay_to_relay.add_pattern(name, pattern, predicate)
# Relay to TIR function registration
def _register_operator_strategy(
self,
op: str,
strategy: Callable[
[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy,
],
plevel: Optional[int] = 11,
) -> None:
"""Registers an operator strategy that is used to partition the relay graph.
Parameters
----------
op: str
The name of the operator for which this strategy will be registered.
strategy: Callable[[tvm.ir.Attrs, tvm.ir.Array, tvm.ir.TensorType, tvm.target.Target],
tvm.relay.op.op.OpStrategy]
The strategy function.
plevel: Optional[int] = 11
The priority level of the strategy. Higher plevel equals higher priorization.
The TVM default for topi strategies is 10 so by default new UMA strategies are
always used.
Example
-------
Here is an example of how two operator strategies are registered.
.. code-block:: python
self._register_operator_strategy("nn.conv1d", custom_conv1d_strategy)
self._register_operator_strategy("nn.conv2d", custom_conv2d_strategy)
Where a strategy function can look like this:
.. code-block:: python
@relay.op.strategy.override_native_generic_func("custom_conv1d_strategy")
def custom_conv1d_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv1d(custom_conv1d_compute),
wrap_topi_schedule(custom_conv1d_schedule),
name="custom_conv1d.generic",
return strategy
"""
self._relay_to_tir._operator_strategies.append((op, strategy, plevel))
def _register_tir_pass(
self, phase: PassPhase, tir_pass: tvm.tir.transform.PrimFuncPass
) -> None:
"""Registers a TIR pass at the given phase in the lowering process.
Parameters
----------
phase: PassPhase
The phase at which the pass is registered.
tir_pass: tvm.tir.transform.PrimFuncPass
The TIR pass to be registered.
Example
-------
Here is an example of how two TIR passes are registered.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_tir_pass(PassPhase.TIR_PHASE_0, MyPassA)
self._register_tir_pass(PassPhase.TIR_PHASE_1, MyPassB)
Where a TIR pass can look like this:
.. code-block:: python
@tvm.tir.transform.prim_func_pass(opt_level=0)
class MyPassA:
def transform_function(self, func, mod, ctx):
# My pass functionality...
return func
"""
self._relay_to_tir._tir_passes.append((phase, tir_pass))
# TIR to runtime function registration
def _register_codegen(self, fmt: str = "c", **kwargs) -> None:
"""Registers a codegen which is used in place of the default C-codegen.
Parameters
----------
fmt: str
The codegen format. For now, only C-codegen is supported by UMA.
**kwargs
Keyword arguments for the chosen codegen.
Example
-------
Here is an example of how the custom C-codegen is registered and configured.
Passes of the same phase are executed in the order they are registered.
.. code-block:: python
self._register_codegen(
fmt="c", includes=gen_includes
)
The C-codegen currently provides one hook which allows the user to insert code through
the python API.
- `includes` hooks into the include stream and allows insertion of custom includes.
The code generation functions can look like this:
.. code-block:: python
def gen_includes() -> str:
includes = "#include <my_custom_header.h>\n"
return includes
"""
self._tir_to_runtime._register_codegen(fmt, **kwargs)
# Backend functions
def register(self) -> None:
"""
Registering UMABackend:
registering target attributes, relay_to_relay, relay_to_tir and tir_to_runtime
"""
registration_func = tvm.get_global_func("relay.backend.contrib.uma.RegisterTarget")
for name, attr in self._target_attrs:
if attr is None:
raise ValueError("Target attribute None is not supported.")
if registration_func(self.target_name, self._target_attrs):
self._relay_to_relay.register()
self._relay_to_tir.register()
self._tir_to_runtime.register()
def partition(
self, mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
return self._relay_to_relay.partition(mod, params)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Executor configuration"""
import tvm
from tvm.runtime import Object
from . import _backend
@tvm._ffi.register_object
class Executor(Object):
"""Executor configuration"""
flag_registry_name = "executor"
def __init__(self, name, options=None) -> None:
if options is None:
options = {}
self.__init_handle_by_constructor__(_backend.CreateExecutor, name, options)
self._attrs = _backend.GetExecutorAttrs(self)
def __contains__(self, name):
return name in self._attrs
def __getitem__(self, name):
return self._attrs[name]
def __eq__(self, other):
return str(other) == str(self) and dict(other._attrs) == dict(self._attrs)
@staticmethod
def list_registered():
"""Returns a list of possible executors"""
return list(_backend.ListExecutors())
@staticmethod
def list_registered_options(executor):
"""Returns the dict of available option names and types"""
return dict(_backend.ListExecutorOptions(str(executor)))
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/executor_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executor factory modules."""
from abc import abstractmethod
import warnings
from ..._ffi.base import string_types
from ..._ffi.registry import get_global_func
from ...runtime import ndarray
class ExecutorFactoryModule:
"""Common interface for executor factory modules
This class describes the common API of different
factory modules
"""
@abstractmethod
def get_executor_config(self):
"""Return the internal configuration the executor uses to execute the network"""
raise NotImplementedError
@abstractmethod
def get_params(self):
"""Return the compiled parameters."""
raise NotImplementedError
@abstractmethod
def get_lib(self):
"""Return the generated library"""
raise NotImplementedError
def __getitem__(self, item):
return self.module.__getitem__(item)
def __iter__(self):
warnings.warn(
"legacy graph executor behavior of producing json / lib / params will be "
"removed in the next release."
" Please see documents of tvm.contrib.graph_executor.GraphModule for the "
" new recommended usage.",
DeprecationWarning,
2,
)
return self
def __next__(self):
if self.iter_cnt > 2:
raise StopIteration
objs = [self.get_executor_config(), self.lib, self.params]
obj = objs[self.iter_cnt]
self.iter_cnt += 1
return obj
class AOTExecutorFactoryModule(ExecutorFactoryModule):
"""AOT executor factory module.
Attributes
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build.
lowered_ir_mods : dict[Target, IRModule]
The IR modules lowered per Target.
target : tvm.Target
The Target used to build this module.
executor : tvm.relay.backend.Executor
Internal representation of the Executor
runtime : tvm.relay.backend.Runtime
Internal representation of the Runtime
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
function_metadata : Map of String to FunctionInfo
This holds a map function names to their information
devices : List[str]
List of devices used in the module
"""
def __init__(
self,
ir_mod,
lowered_ir_mods,
target,
executor,
runtime,
libmod,
libmod_name,
params,
function_metadata,
executor_codegen_metadata,
devices,
):
fcreate = get_global_func("tvm.aot_executor_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.module = fcreate(libmod, libmod_name, *args)
self.ir_mod = ir_mod
self.lowered_ir_mods = lowered_ir_mods
self.target = target
self.executor = executor
self.runtime = runtime
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
self.function_metadata = function_metadata
self.executor_codegen_metadata = executor_codegen_metadata
self.devices = devices
def get_devices(self):
return self.devices
def get_params(self):
return self.params
def get_executor_config(self):
return None
def get_lib(self):
return self.lib
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
class GraphExecutorFactoryModule(ExecutorFactoryModule):
"""Graph executor factory module.
This is a module of graph executor factory
Attributes
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build.
target : tvm.Target
The Target used to build this module.
executor : tvm.relay.backend.Executor
Internal representation of the Executor
graph_json_str : the json graph to be deployed in json format output by graph compiler.
The graph can contain operator(tvm_op) that points to the name of
PackedFunc in the libmod.
libmod : tvm.Module
The module of the corresponding function
libmod_name: str
The name of module
params : dict of str to NDArray
The parameters of module
function_metadata : Map of String to FunctionInfo
This holds a map function names to their information
"""
def __init__(
self,
ir_mod,
target,
executor,
graph_json_str,
libmod,
libmod_name,
params,
function_metadata,
):
assert isinstance(graph_json_str, string_types)
fcreate = get_global_func("tvm.graph_executor_factory.create")
args = []
for k, v in params.items():
args.append(k)
args.append(ndarray.array(v))
self.ir_mod = ir_mod
self.target = target
self.executor = executor
self.module = fcreate(graph_json_str, libmod, libmod_name, *args)
self.graph_json = graph_json_str
self.lib = libmod
self.libmod_name = libmod_name
self.params = params
self.iter_cnt = 0
self.function_metadata = function_metadata
def export_library(self, file_name, fcompile=None, addons=None, **kwargs):
return self.module.export_library(file_name, fcompile, addons, **kwargs)
def get_devices(self):
return []
def get_params(self):
return self.params
def get_graph_json(self):
return self.graph_json
def get_executor_config(self):
return self.graph_json
def get_lib(self):
return self.lib
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/graph_executor_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A compiler from a Relay expression to TVM's graph executor.
The compiler is built from a few pieces.
First we define a compiler from a single Relay expression to the
graph language. We require the expression to be a function.
The function's parameters correspond to the placeholder/inputs
and model parameters found in the computation graph representation.
The body of the function represents the computation graph.
The compiler's output is a program in the graph language, which is composed of
Node, NodeRef, InputNode, OpNode. This "little language" represents programs in
TVM's graph format.
To connect to the graph executor, we use a printer that converts our graph format
into TVM's JSON format. The resulting string can be loaded by
contrib.graph_executor or any other TVM runtime compatible systems.
"""
from tvm.runtime.ndarray import empty
from tvm.relay import _build_module
from tvm.target import Target
from .utils import mangle_module_name
class GraphExecutorCodegen(object):
"""The compiler from Relay to the TVM runtime system."""
def __init__(self, mod, target):
self._mod = _build_module._GraphExecutorCodegen()
self._init = self._mod["init"]
self._codegen = self._mod["codegen"]
self._get_graph_json = self._mod["get_graph_json"]
self._list_params_name = self._mod["list_params_name"]
self._get_param_by_name = self._mod["get_param_by_name"]
self._get_irmodule = self._mod["get_irmodule"]
self._setup(mod, target)
def _setup(self, mod, target):
raw_targets = Target.canon_multi_target_and_host(target)
self._init(mod, raw_targets)
def codegen(self, ir_module, func):
"""Compile a single function into a graph.
Parameters
----------
ir_module: tvm.ir.Module
The module to compile
func: tvm.relay.Expr
The function to compile.
Returns
-------
graph_json : str
The graph json that can be consumed by runtime.
mod : IRModule or Dict[Target, IRModule]
The lowered functions.
params : Dict[str, tvm.nd.NDArray]
Additional constant parameters.
"""
default_mod_name = mangle_module_name("default")
self._codegen(ir_module, func, default_mod_name)
graph_json = self._get_graph_json()
lowered_func = self._get_irmodule()
param_names = self._list_params_name()
params = {}
for key in param_names:
arr = self._get_param_by_name(key)
param = empty(arr.shape, dtype=arr.dtype, device=arr.device)
arr.copyto(param)
params[key] = param
return graph_json, lowered_func, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/interpreter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
"""The Python interface to the Relay reference interpreter."""
from __future__ import absolute_import
import numpy as np
import tvm._ffi
from tvm.runtime import container, Object
from . import _backend
from .. import _make, analysis
from ... import nd
from ..expr import Tuple, RefCreate, Call, Constant, GlobalVar, const
from ..function import Function
from ..scope_builder import ScopeBuilder
@tvm._ffi.register_object("relay.ConstructorValue")
class ConstructorValue(Object):
def __init__(self, tag, fields, constructor):
self.__init_handle_by_constructor__(_make.ConstructorValue, tag, fields, constructor)
@tvm._ffi.register_object("relay.RefValue")
class RefValue(Object):
def __init__(self, value):
self.__init_handle_by_constructor__(_make.RefValue, value)
def _arg_to_ast(mod, arg):
if isinstance(arg, nd.NDArray):
return Constant(arg.copyto(nd.cpu(0)))
elif isinstance(arg, container.ADT):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, tuple):
return Tuple([_arg_to_ast(mod, field) for field in arg])
elif isinstance(arg, RefValue):
return RefCreate(_arg_to_ast(mod, arg.value))
elif isinstance(arg, ConstructorValue):
return Call(mod.get_constructor(arg.tag), [_arg_to_ast(mod, field) for field in arg.fields])
elif isinstance(arg, np.ndarray):
return Constant(nd.array(arg))
elif isinstance(arg, Constant):
return arg
else:
return const(arg)
class Executor(object):
"""An abstract interface for executing Relay programs."""
def _convert_args(self, expr, args, kwargs):
"""
Convert the combination of arguments and keyword arguments
into a sequence of arguments that may be passed to
a Relay evaluator.
We first provide all positional arguments, and then attempt
to fill in the remaining arguments using the keyword arguments. We
map the keyword arguments to the corresponding parameters, if there
is an ambiguity between positional and keyword arguments this
procedure will raise an error.
Parameters
----------
expr: relay.Expr
The expression to evaluate
args: List[tvm.nd.NDArray]
The arguments to pass to the evaluator.
kwargs: Dict[str, tvm.NDArrray]
The keyword arguments to pass to the evaluator.
Returns:
args: List[tvm.nd.NDArray]
The new arguments with all keyword arguments placed in the correct slot.
"""
assert expr is not None
if not kwargs:
return args
if kwargs and not isinstance(expr, Function):
raise Exception(
"can only supply keyword parameters for a " "relay.Function, found {0}".format(expr)
)
params = expr.params
param_names = [p.name_hint for p in params]
num_of_args = len(args)
cargs = list(args)[:]
for i, name in enumerate(param_names):
if i < num_of_args:
if kwargs.get(name):
raise Exception(
"duplicate argument supplied in "
"both positional args (at position: {0}), "
"and keyword argument (with name: {1})".format(i, name)
)
else:
cargs.append(kwargs[name])
if len(cargs) != len(params):
raise Exception(
"insufficient arguments, expected "
"{0}, provided {1}".format(len(cargs), len(params))
)
return tuple(cargs)
def _make_executor(self, expr=None):
"""
Construct a Python function that implements the evaluation
of expression.
Parameters
----------
expr: Optional[relay.Expr]
The Relay expression to execute.
Returns
-------
executor: function,
A Python function which implements the behavior of `expr`.
"""
raise NotImplementedError()
def evaluate(self, expr=None, binds=None):
"""
Evaluate a Relay expression on the executor.
Parameters
----------
expr: Optional[tvm.relay.Expr]
The expression to evaluate.
binds: Optional[Map[tvm.relay.Var, tvm.relay.Expr]]
Additional binding of free variable.
Returns
-------
val : Union[function, Object]
The evaluation result.
"""
if binds:
scope_builder = ScopeBuilder()
for key, value in binds.items():
scope_builder.let(key, _arg_to_ast(self.mod, value))
scope_builder.ret(expr)
expr = scope_builder.get()
if not expr:
return self._make_executor()
if isinstance(expr, Function):
assert not analysis.free_vars(expr)
if isinstance(expr, (Function, GlobalVar)):
return self._make_executor(expr)
# normal expression evaluated by running a function.
# TODO(mbs): This should really be type rather than syntax driven.
func = Function([], expr)
return self._make_executor(func)()
class Interpreter(Executor):
"""
Simple interpreter interface.
Parameters
----------
mod : tvm.IRModule
The module to support the execution.
device : Device
The runtime device to run the code on.
target : tvm.Target
The target option to build the function. Only homogeneous execution is supported.
CAUTION: Despite the API the module is prepared upon each call to evaluate
rather than once in create_executor.
That is:
.. code-block:: python
executor = relay.create_executor(kind="debug", mod=module)
a = executor.evaluate(expr)(args1)
b = executor.evaluate(expr)(args2)
will prepare all the bindings in module twice. For efficiency, try to hoist
calls to evaluate as high as possible, preferably immediately after create_executor:
.. code-block:: python
func = relay.create_executor(kind="debug", mod=module).evaluate(expr)
a = func(args1)
b = func(args2)
"""
def __init__(self, mod, device, target):
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr is None or isinstance(expr, GlobalVar):
assert self.mod is not None
if expr is None:
# A missing expr denotes 'main' in the given module.
expr = self.mod.get_global_var("main")
# Evaluate expr to a packed function we can efficiently re-apply
# to Relay arguments.
func = _backend.EvalFunction(self.mod, expr, self.device, self.target)
def _apply_args(*args, **kwargs):
if isinstance(expr, GlobalVar):
# When expanding args, look inside the actual global definition so kwargs
# can be matched.
args = self._convert_args(self.mod[expr.name_hint], args, kwargs)
else:
args = self._convert_args(expr, args, kwargs)
# Reflect python arguments up into Relay.
relay_args = []
for arg in args:
relay_args.append(_arg_to_ast(self.mod, arg))
# Apply func to Relay args
return func(relay_args)
return _apply_args
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/name_transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Name transformation functions for use in code generation
"""
from typing import List, Union
from tvm import TVMError
from . import _backend
def to_c_function_style(original_name: str):
"""Transform a name to the C function style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCFunctionStyle(original_name)
def to_c_variable_style(original_name: str):
"""Transform a name to the C variable style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCVariableStyle(original_name)
def to_c_constant_style(original_name: str):
"""Transform a name to the C constant style assuming it is
appropriately constructed using the prefixing functions
Parameters
----------
original_name : str
Original name to transform
"""
return _backend.ToCConstantStyle(original_name)
def _preprocess_names(names: Union[List[str], str]):
"""Preprocesses name strings into format for C++ functions
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
if isinstance(names, str):
if names == "":
raise TVMError("Name is empty")
return [names]
return names
def prefix_name(names: Union[List[str], str]):
"""Apply TVM-specific prefix to a function name
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
return _backend.PrefixName(_preprocess_names(names))
def prefix_generated_name(names: Union[List[str], str]):
"""Apply generated TVM-specific prefix to a function name
Parameters
----------
names : Union[List[str], str]
List of names to combine to form a combined name or the name itself
"""
return _backend.PrefixGeneratedName(_preprocess_names(names))
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Runtime configuration"""
import tvm
from tvm.runtime import Object
from . import _backend
@tvm._ffi.register_object
class Runtime(Object):
"""Runtime configuration"""
flag_registry_name = "runtime"
def __init__(self, name, options=None) -> None:
if options is None:
options = {}
self.__init_handle_by_constructor__(_backend.CreateRuntime, name, options)
self._attrs = _backend.GetRuntimeAttrs(self)
def __contains__(self, name):
return name in self._attrs
def __getitem__(self, name):
self._attrs = _backend.GetRuntimeAttrs(self)
return self._attrs[name]
def __eq__(self, other):
return str(other) == str(self) and dict(other._attrs) == dict(self._attrs)
@staticmethod
def list_registered():
"""Returns a list of possible runtimes"""
return list(_backend.ListRuntimes())
@staticmethod
def list_registered_options(runtime):
"""Returns the dict of available option names and types"""
return dict(_backend.ListRuntimeOptions(str(runtime)))
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/te_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""TE compiler engine (replacing legacy compile_engine)."""
from __future__ import absolute_import
import logging
import numpy as np
import tvm
from tvm import autotvm, te
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.meta_schedule import is_meta_schedule_enabled
from tvm.runtime import Object
from tvm.support import libinfo
from tvm.target import Target
from .. import function as _function
from .. import ty as _ty
from ..backend.utils import mangle_module_name
from . import _backend
logger = logging.getLogger("te_compiler")
autotvm_logger = logging.getLogger("autotvm")
_first_warning = True
@tvm._ffi.register_object("relay.LoweredOutput")
class LoweredOutput(Object):
"""Lowered output"""
def __init__(self, outputs, implement):
self.__init_handle_by_constructor__(_backend._make_LoweredOutput, outputs, implement)
@tvm._ffi.register_object("relay.CCacheKey")
class CCacheKey(Object):
"""Key in the TE Compiler.
Parameters
----------
source_func : tvm.relay.Function
The source function.
target : tvm.Target
The target we want to run the function on.
"""
def __init__(self, source_func, target):
self.__init_handle_by_constructor__(_backend._make_CCacheKey, source_func, target)
@tvm._ffi.register_object("relay.CCacheValue")
class CCacheValue(Object):
"""Value in the TE Compiler, including usage statistics."""
def _get_cache_key(source_func, target):
if isinstance(source_func, _function.Function):
if isinstance(target, str):
target = Target(target)
if not target:
raise ValueError("Need target when source_func is a Function")
return CCacheKey(source_func, target)
if not isinstance(source_func, CCacheKey):
raise TypeError("Expect source_func to be CCacheKey")
return source_func
def get_valid_implementations(op, attrs, inputs, out_type, target):
"""Get all valid implementations from the op strategy.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
Returns
-------
ret : List[relay.op.OpImplementation]
The list of all valid op implementations.
"""
fstrategy = op.get_attr("FTVMStrategy")
assert fstrategy is not None, (
"%s doesn't have an FTVMStrategy registered. You can register "
"one in python with `tvm.relay.op.register_strategy`." % op.name
)
with target:
strategy = fstrategy(attrs, inputs, out_type, target)
analyzer = tvm.arith.Analyzer()
ret = []
for spec in strategy.specializations:
if spec.condition:
# check if all the clauses in the specialized condition are true
flag = True
for clause in spec.condition.clauses:
clause = analyzer.canonical_simplify(clause)
if isinstance(clause, tvm.tir.IntImm) and clause.value:
continue
flag = False
break
if flag:
for impl in spec.implementations:
ret.append(impl)
else:
for impl in spec.implementations:
ret.append(impl)
return ret
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
if len(all_impls) == 0:
raise RuntimeError(f"No valid {op} implementations for {target}")
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if is_auto_scheduler_enabled() or is_meta_schedule_enabled():
use_autotvm = False
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
logger.info(
"Using %s for %s based on highest priority (%d)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
old_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = old_silent
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
logger.info(
"Using %s for %s based on lowest cost (%.2e)",
best_autotvm_impl.name,
op.name,
best_cfg.cost,
)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find tuning records for:\n target=%s\n key=%s\n"
"TVM will apply a default schedule which may negatively impact performance."
% (target, workloads[best_plevel_impl])
)
if (
not autotvm.env.GLOBAL_SCOPE.silent
and msg not in autotvm.task.DispatchContext.warning_messages
):
autotvm.task.DispatchContext.warning_messages.add(msg)
global _first_warning
if _first_warning:
_first_warning = False
info_msg = (
"One or more operators have not been tuned. Please tune your model "
"for better performance. Use DEBUG logging level to see more details."
)
autotvm_logger.warning(info_msg)
autotvm_logger.debug(msg)
logger.info(
"Using %s for %s based on highest priority (%s)",
best_plevel_impl.name,
op.name,
best_plevel_impl.plevel,
)
return best_plevel_impl, outputs[best_plevel_impl]
def get_shape(shape):
"""Convert the shape to correct dtype and vars."""
ret = []
for dim in shape:
if isinstance(dim, tvm.tir.IntImm):
if libinfo()["INDEX_DEFAULT_I64"] == "ON":
ret.append(dim)
else:
val = int(dim)
assert val <= np.iinfo(np.int32).max
ret.append(tvm.tir.IntImm("int32", val))
elif isinstance(dim, tvm.tir.Any):
ret.append(te.size_var("any_dim", "int32"))
else:
ret.append(dim)
return ret
@tvm._ffi.register_func("relay.backend.lower_call")
def lower_call(call, inputs, target, otype=None):
"""Lower the call expression to op implementation and tensor outputs."""
assert isinstance(call.op, tvm.ir.Op)
op = call.op
if otype is not None:
ret_type = otype
else:
# Prepare the call_node->checked_type(). For the call node inputs, we ensure that
# the shape is Int32. Following code ensures the same for the output as well.
# TODO(@icemelon9): Support recursive tuple
ret_type = call.checked_type
if isinstance(ret_type, _ty.TensorType):
ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype)
elif isinstance(ret_type, _ty.TupleType):
new_fields = []
for field in ret_type.fields:
if isinstance(field, _ty.TensorType):
new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype))
else:
new_fields.append(field)
ret_type = _ty.TupleType(new_fields)
is_dyn = _ty.is_dynamic(call.checked_type)
for arg in call.args:
is_dyn = is_dyn or _ty.is_dynamic(arg.checked_type)
# check if in the AutoTVM tracing mode, and disable if op is not in wanted list
env = autotvm.task.TaskExtractEnv.current
reenable_tracing = False
if env is not None and env.tracing:
if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops:
env.tracing = False
reenable_tracing = True
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
best_impl, outputs = select_implementation(
op, call.attrs, inputs, ret_type, target, use_autotvm=False
)
# re-enable AutoTVM tracing
if reenable_tracing:
env.tracing = True
return LoweredOutput(outputs, best_impl)
@tvm._ffi.register_object("relay.TECompiler")
class TECompiler(Object):
"""TECompiler to get lowered code."""
def __init__(self):
raise RuntimeError("Cannot construct a TECompiler")
def lower(self, source_func, target=None, mod_name="default"):
"""Lower a source_func to a CachedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
try:
mod_name = mangle_module_name(mod_name)
key = _get_cache_key(source_func, target)
return _backend._TECompilerLower(self, key, mod_name)
except Exception:
import traceback
msg = traceback.format_exc()
msg += "Error during compile func\n"
msg += "--------------------------\n"
msg += source_func.astext(show_meta_data=False)
msg += "--------------------------\n"
raise RuntimeError(msg)
def jit(self, source_func, target=None):
"""JIT a source_func to a tvm.runtime.PackedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
jited_func: tvm.runtime.PackedFunc
The result of jited function.
"""
key = _get_cache_key(source_func, target)
return _backend._TECompilerJIT(self, key)
def clear(self):
"""clear the existing cached functions"""
_backend._TECompilerClear(self)
def items(self):
"""List items in the cache.
Returns
-------
item_list : List[Tuple[CCacheKey, CCacheValue]]
The list of items.
"""
res = _backend._TECompilerListItems(self)
assert len(res) % 2 == 0
return [(res[2 * i], res[2 * i + 1]) for i in range(len(res) // 2)]
def get():
"""Get the global TE Compiler.
Returns
-------
engine : tvm.relay.backend.TECompiler
The TE Compiler.
"""
return _backend._TECompilerGlobal()
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility backend functions."""
from enum import Enum
class CallType(Enum):
Packed = 0
CPacked = 1
Unpacked = 2
def _is_valid_modname(mod_name):
"""Determine if mod_name is a valid string to use inside function names"""
if mod_name:
try:
mod_name.encode("ascii")
return True
except UnicodeEncodeError:
return False
return True
def mangle_module_name(mod_name):
if not _is_valid_modname(mod_name):
raise ValueError(mod_name + " contains invalid characters")
if mod_name:
return "tvmgen_" + mod_name
return "tvmgen"
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/backend/vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine.
Implements a Python interface to compiling and executing on the Relay VM.
"""
import numpy as np
import tvm.runtime.ndarray as _nd
import tvm.runtime.vm as vm_rt
from tvm import autotvm
from tvm.relay import expr as _expr
from tvm.relay.backend.interpreter import Executor
from tvm.target import Target
from . import _vm
def compile(mod, target=None, target_host=None, params=None):
"""Compile the module to VM executable. A helper function for VMCompiler.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
compiler = VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target, target_host)
compiler.codegen()
return compiler.get_exec()
class VMCompiler(object):
"""Compiler that compiles Relay module to VM executable."""
def __init__(self):
self.mod = _vm._VMCompiler()
self._lower = self.mod["lower"]
self._codegen = self.mod["codegen"]
self._get_exec = self.mod["get_executable"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._optimize = self.mod["optimize"]
def set_params(self, params):
"""Set constant parameters for the model.
Parameters
----------
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
"""
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
self._set_params_func(inputs)
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def lower(self, mod, target=None, target_host=None):
"""Lower the module to VM bytecode.
Parameters
----------
mod : tvm.IRModule
The Relay module to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
tophub_context = self._tophub_context(raw_targets)
with tophub_context:
self._lower(mod, raw_targets)
def codegen(self):
"""Generate the kernel library."""
self._codegen()
def optimize(self, mod, target=None, target_host=None, params=None):
"""Helper method that optimizes a Relay module via VM.
Parameters
----------
mod : tvm.IRModule
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : any target-like object, see Target.canon_target
Host compilation target, if target is device.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : tvm.IRModule
The optimized relay module.
params : dict
The parameters of the final module.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
if params:
self.set_params(params)
return self._optimize(mod, raw_targets), self.get_params()
def get_exec(self):
"""Get the VM executable.
Returns
-------
exec : tvm.runtime.vm.Executable
The VM executable that contains both library code and bytecode.
"""
return vm_rt.Executable(self._get_exec())
def _tophub_context(self, raw_targets):
"""Get the autotvm context."""
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(raw_targets)
else:
tophub_context = autotvm.utils.EmptyContext()
return tophub_context
class VMExecutor(Executor):
"""
An implementation of the executor interface for
the Relay VM.
Useful interface for experimentation and debugging
the VM can also be used directly from the API.
supported by `tvm.runtime.vm`.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`~tvm.runtime.Device`
The runtime device to run the code on.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
"""
def __init__(self, mod, device, target):
if mod is None:
raise RuntimeError("Must provide module to get VM executor.")
self.mod = mod
self.device = device
self.target = target
self.executable = None
self.vm = None
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.executable = compile(self.mod, self.target)
self.vm = vm_rt.VirtualMachine(self.executable, self.device)
def _vm_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
return self.vm.run(*args)
return _vm_wrapper
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, unused-import
"""The base node types for the Relay language."""
import os
import tvm._ffi
from tvm.runtime import Object
from tvm.ir import SourceName, Span, Node as RelayNode
__STD_PATH__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), "std")
@tvm._ffi.register_func("tvm.relay.std_path")
def _std_path():
return __STD_PATH__
@tvm._ffi.register_object("relay.Id")
class Id(Object):
"""Unique identifier(name) used in Var.
Guaranteed to be stable across all passes.
"""
def __init__(self):
raise RuntimeError("Cannot directly construct Id")
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Construct the necessary state for the TVM graph executor
from a Relay expression.
"""
import warnings
import numpy as np
from tvm.ir import IRModule
from tvm.target import Target
from .. import autotvm
from .. import nd as _nd
from .. import register_func
from ..contrib import graph_executor as _graph_executor
from ..contrib import utils as contrib_utils
from ..runtime import load_module
from ..runtime.executor import aot_executor as _aot_executor
from ..target import Target
from . import _build_module
from . import expr as _expr
from . import function as _function
from . import ty as _ty
from .backend import Executor, Runtime
from .backend import executor_factory as _executor_factory
from .backend import interpreter as _interpreter
from .backend.utils import mangle_module_name
from .backend.vm import VMExecutor
from .transform import InferType
def _convert_param_map(params):
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
return inputs
class BuildModule(object):
"""Build an IR module to run on TVM graph executor. This class is used
to expose the `RelayBuildModule` APIs implemented in C++.
"""
def __init__(self):
self.mod = _build_module._BuildModule()
self._get_graph_json = self.mod["get_graph_json"]
self._get_module = self.mod["get_module"]
self._build = self.mod["build"]
self._optimize = self.mod["optimize"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
self._get_function_metadata = self.mod["get_function_metadata"]
self._get_executor_codegen_metadata = self.mod["get_executor_codegen_metadata"]
self._get_devices = self.mod["get_devices"]
self._get_irmodule = self.mod["get_irmodule"]
def build(
self,
mod,
target=None,
target_host=None,
executor=Executor("graph"),
runtime=Runtime("cpp"),
workspace_memory_pools=None,
constant_memory_pools=None,
params=None,
mod_name=None,
):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IRModule to build.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm interpreter is used.
executor : Optional[Executor]
The executor configuration with which to build the model.
Defaults to "graph" if no executor specified.
runtime : Optional[Runtime]
Runtime configuration to use when building the model.
Defaults to "cpp" if no runtime specified.
workspace_memory_pools : Optional[WorkspaceMemoryPools]
The object that contains an Array of WorkspacePoolInfo objects
that hold properties of read-write workspace pools that could be
used by the inference.
constant_memory_pools : Optional[ConstantMemoryPools]
The object that contains an Array of ConstantPoolInfo objects
that hold properties of read-only memory pools that could be
used by the inference.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
mod_name: Optional[str]
The module name we will build
Returns
-------
graph_json : str
The json string that can be accepted by graph executor.
mod : tvm.Module
The module containing necessary libraries.
params : dict
The parameters of the final graph.
"""
# pylint: disable=import-outside-toplevel
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.meta_schedule import is_meta_schedule_enabled
# pylint: enable=import-outside-toplevel
# Setup the params.
if params:
self._set_params(params)
# Build the IR module. If auto_scheduler is not enabled,
# then use the TOPI-defined schedule.
# Turn off AutoTVM config not found warnings if auto_scheduler is enabled.
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = (
is_auto_scheduler_enabled() or is_meta_schedule_enabled() or old_autotvm_silent
)
mod_name = mangle_module_name(mod_name)
self._build(
mod,
target,
target_host,
executor,
runtime,
workspace_memory_pools,
constant_memory_pools,
mod_name,
)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
# Get artifacts
mod = self.get_module()
params = self.get_params()
executor_config = self.get_graph_json() if executor.name == "graph" else None
return executor_config, mod, params
def optimize(self, mod, target=None, target_host=None, params=None):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IR module to build.
target : any multi-target like object, see Target.canon_multi_target.
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
target_host : None, or any target-like object, see Target.canon_target
Host compilation target, if target is device.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
raw_targets = Target.canon_multi_target_and_host(target, target_host)
# Setup the params.
if params:
self._set_params(params)
mod = self._optimize(mod, raw_targets)
# Get artifacts
params = self.get_params()
return mod, params
def _set_params(self, params):
self._set_params_func(_convert_param_map(params))
def get_graph_json(self):
"""Return the json file of the built program."""
return self._get_graph_json()
def get_module(self):
"""Return the built module."""
return self._get_module()
def get_function_metadata(self):
"""Return the compiled function metadata.
Currently, the metadata contains workspace size required by
each PrimFunc"""
return self._get_function_metadata()
def get_executor_codegen_metadata(self):
"""Return the metadata produced after executor
codegen
"""
return self._get_executor_codegen_metadata()
def get_devices(self):
"""Returns a list of devices configured in this module"""
return self._get_devices()
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
def get_irmodule(self):
"""Returns the TargetIRModule's post-lowering"""
return self._get_irmodule()
@register_func("tvm.relay.module_export_library")
def _module_export(module, file_name): # fcompile, addons, kwargs?
return module.export_library(file_name)
@register_func("tvm.relay.build")
def _build_module_no_factory_impl(mod, target, target_host, params, mod_name):
return build(
mod, target=target, target_host=target_host, params=params, mod_name=mod_name
).module
def _build_module_no_factory(mod, target=None, target_host=None, params=None, mod_name="default"):
"""A wrapper around build which discards the Python GraphFactoryRuntime.
This wrapper is suitable to be used from other programming languages as
the runtime::Module can be freely passed between language boundaries.
"""
return _build_module_no_factory_impl(mod, target, target_host, params, mod_name)
def build_with_bldmod(
ir_mod,
target=None,
target_host=None,
executor=Executor("graph"),
runtime=Runtime("cpp"),
workspace_memory_pools=None,
constant_memory_pools=None,
params=None,
mod_name="default",
):
# fmt: off
# pylint: disable=line-too-long
"""Helper function that builds a Relay function to run on TVM graph executor.
Parameters
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build. Using relay.Function is deprecated.
target : None, or any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
Defaults to the current target in the environment if None.
target_host : None, or any target like object, see Target.canon_target
Host compilation target, if target is device.
executor : Optional[Executor]
The executor configuration with which to build the model.
Defaults to "graph" if no executor specified.
runtime : Optional[Runtime]
Runtime configuration to use when building the model.
Defaults to "cpp" if no runtime specified.
workspace_memory_pools : Optional[WorkspaceMemoryPools]
The object that contains an Array of WorkspacePoolInfo objects
that hold properties of read-write workspace pools that could be
used by the inference.
constant_memory_pools : Optional[ConstantMemoryPools]
The object that contains an Array of ConstantPoolInfo objects
that hold properties of read-only pools that could be
used by the inference.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
mod_name: Optional[str]
The module name we will build
Returns
-------
factory_module : tvm.relay.backend.executor_factory.ExecutorFactoryModule
The runtime factory for the TVM graph executor.
"""
# pylint: enable=line-too-long
# fmt: on
if not isinstance(ir_mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(ir_mod, _function.Function):
if params:
ir_mod = bind_params_by_name(ir_mod, params)
ir_mod = IRModule.from_expr(ir_mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter mod (tvm.relay.function.Function)",
DeprecationWarning,
)
raw_targets = Target.canon_multi_target_and_host(Target.target_or_current(target), target_host)
assert len(raw_targets) > 0
target_host = raw_targets[0].host
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(raw_targets))
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
graph_json, runtime_mod, params = bld_mod.build(
mod=ir_mod,
target=raw_targets,
params=params,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
constant_memory_pools=constant_memory_pools,
mod_name=mod_name,
)
func_metadata = bld_mod.get_function_metadata()
devices = bld_mod.get_devices()
lowered_ir_mods = bld_mod.get_irmodule()
executor_codegen_metadata = bld_mod.get_executor_codegen_metadata()
if executor.name == "aot":
executor_factory = _executor_factory.AOTExecutorFactoryModule(
ir_mod,
lowered_ir_mods,
raw_targets,
executor,
runtime,
runtime_mod,
mod_name,
params,
func_metadata,
executor_codegen_metadata,
devices,
)
elif executor.name == "graph":
executor_factory = _executor_factory.GraphExecutorFactoryModule(
ir_mod,
raw_targets,
executor,
graph_json,
runtime_mod,
mod_name,
params,
func_metadata,
)
else:
assert False, "Executor " + executor + " not supported"
return executor_factory, bld_mod
def build(
ir_mod,
target=None,
target_host=None,
executor=Executor("graph"),
runtime=Runtime("cpp"),
workspace_memory_pools=None,
constant_memory_pools=None,
params=None,
mod_name="default",
):
# fmt: off
# pylint: disable=line-too-long
"""Helper function that builds a Relay function to run on TVM graph executor.
Parameters
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build. Using relay.Function is deprecated.
target : None, or any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
Defaults to the current target in the environment if None.
target_host : None, or any target like object, see Target.canon_target
Host compilation target, if target is device.
executor : Optional[Executor]
The executor configuration with which to build the model.
Defaults to "graph" if no executor specified.
runtime : Optional[Runtime]
Runtime configuration to use when building the model.
Defaults to "cpp" if no runtime specified.
workspace_memory_pools : Optional[WorkspaceMemoryPools]
The object that contains an Array of WorkspacePoolInfo objects
that hold properties of read-write workspace pools that could be
used by the inference.
constant_memory_pools : Optional[ConstantMemoryPools]
The object that contains an Array of ConstantPoolInfo objects
that hold properties of read-only pools that could be
used by the inference.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
mod_name: Optional[str]
The module name we will build
Returns
-------
factory_module : tvm.relay.backend.executor_factory.ExecutorFactoryModule
The runtime factory for the TVM graph executor.
"""
# pylint: enable=line-too-long
# fmt: on
if not isinstance(ir_mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(ir_mod, _function.Function):
if params:
ir_mod = bind_params_by_name(ir_mod, params)
ir_mod = IRModule.from_expr(ir_mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter mod (tvm.relay.function.Function)",
DeprecationWarning,
)
raw_targets = Target.canon_multi_target_and_host(Target.target_or_current(target), target_host)
assert len(raw_targets) > 0
target_host = raw_targets[0].host
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(raw_targets))
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
graph_json, runtime_mod, params = bld_mod.build(
mod=ir_mod,
target=raw_targets,
params=params,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
constant_memory_pools=constant_memory_pools,
mod_name=mod_name,
)
func_metadata = bld_mod.get_function_metadata()
devices = bld_mod.get_devices()
lowered_ir_mods = bld_mod.get_irmodule()
executor_codegen_metadata = bld_mod.get_executor_codegen_metadata()
if executor.name == "aot":
executor_factory = _executor_factory.AOTExecutorFactoryModule(
ir_mod,
lowered_ir_mods,
raw_targets,
executor,
runtime,
runtime_mod,
mod_name,
params,
func_metadata,
executor_codegen_metadata,
devices,
)
elif executor.name == "graph":
executor_factory = _executor_factory.GraphExecutorFactoryModule(
ir_mod,
raw_targets,
executor,
graph_json,
runtime_mod,
mod_name,
params,
func_metadata,
)
else:
assert False, "Executor " + executor + " not supported"
return executor_factory
def optimize(mod, target=None, params=None):
"""Helper function that optimizes a Relay module.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to build. Using relay.Function is deprecated.
target : None, or any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
Defaults to the current target in the environment if None.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
if not isinstance(mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(mod, _function.Function):
if params:
mod = bind_params_by_name(mod, params)
mod = IRModule.from_expr(mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter func (tvm.relay.function.Function)",
DeprecationWarning,
)
raw_targets = Target.canon_multi_target_and_host(Target.target_or_current(target))
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(raw_targets)
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
mod, params = bld_mod.optimize(mod, target=raw_targets, params=params)
return mod, params
def bind_params_by_name(func, params):
"""Bind params to function by name.
This could be useful when assembling custom Relay optimization
passes that involve constant folding.
Parameters
----------
func : relay.Function
The function to bind parameters to.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
func : relay.Function
The function with parameters bound
"""
inputs = _convert_param_map(params)
return _build_module.BindParamsByName(func, inputs)
class GraphExecutor(_interpreter.Executor):
"""Wrapper around Executor interface.
This executor is used for debug and testing purposes.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`Device`
The runtime device to run the code on.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
"""
def __init__(self, mod, device, target):
assert mod is not None
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.mod = InferType()(self.mod)
ret_type = self.mod["main"].checked_type.ret_type
if _ty.is_dynamic(ret_type):
raise ValueError(
"Graph Executor only supports static graphs, got output type", ret_type
)
mod = build(self.mod, target=self.target)
gmodule = _graph_executor.GraphModule(mod["default"](self.device))
def _unflatten(flat_iter, cur_type):
if isinstance(cur_type, _ty.TensorType):
return next(flat_iter)
if isinstance(cur_type, _ty.TupleType):
fields = []
for field_type in cur_type.fields:
field = _unflatten(flat_iter, field_type)
fields.append(field)
return fields
raise ValueError("Return type", ret_type, "contains unsupported type", cur_type)
def _graph_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
# Create map of inputs.
for i, arg in enumerate(args):
gmodule.set_input(i, arg)
# Run the module, and fetch the output.
gmodule.run()
flattened = []
for i in range(gmodule.get_num_outputs()):
flattened.append(gmodule.get_output(i).copyto(_nd.cpu(0)))
unflattened = _unflatten(iter(flattened), ret_type)
return unflattened
return _graph_wrapper
class AotExecutor(_interpreter.Executor):
"""Implements the Executor interface for AOT.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`Device`
The runtime device to run the code on.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
"""
def __init__(self, mod, device, target):
assert mod is not None
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.mod = InferType()(self.mod)
ret_type = self.mod["main"].checked_type.ret_type
if _ty.is_dynamic(ret_type):
raise ValueError("AOT Executor only supports static graphs, got output type", ret_type)
mod = build(self.mod, target=self.target)
# NOTE: Given AOT requires use of the "c" backend, must export/import to compile the
# generated code.
temp_so_dir = contrib_utils.TempDirectory()
temp_so = temp_so_dir / "temp.so"
mod.export_library(temp_so, cc="gcc", options=["-std=c11"])
mod = load_module(temp_so)
aot_mod = mod["default"](self.device)
gmodule = _aot_executor.AotModule(aot_mod)
def _unflatten(flat_iter, cur_type):
if isinstance(cur_type, _ty.TensorType):
return next(flat_iter)
if isinstance(cur_type, _ty.TupleType):
fields = []
for field_type in cur_type.fields:
field = _unflatten(flat_iter, field_type)
fields.append(field)
return fields
raise ValueError("Return type", ret_type, "contains unsupported type", cur_type)
def _aot_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
# Create map of inputs.
for i, arg in enumerate(args):
gmodule.set_input(i, arg)
# Run the module, and fetch the output.
gmodule.run()
flattened = []
for i in range(gmodule.get_num_outputs()):
flattened.append(gmodule.get_output(i).copyto(_nd.cpu(0)))
unflattened = _unflatten(iter(flattened), ret_type)
return unflattened
return _aot_wrapper
# TODO(mbs): Collapse the create_executor/evaluate phases together since a) most callers don't
# reuse the executor for multiple expressions and b) any preparation necessary for the expression
# evaluation needs to (currently) be done along with preparation for the module.
def create_executor(kind="debug", mod=None, device=None, target="llvm", params=None):
"""Factory function to create an executor.
Example
-------
.. code-block:: python
import tvm.relay
import numpy as np
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
tvm.relay.create_executor(
kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
).evaluate()(np.array([2], dtype="float32"))
# returns `array([3.], dtype=float32)`
Parameters
----------
kind : str
The type of executor. Avaliable options are `debug` for the interpreter, `graph` for the
graph executor, `aot` for the aot executor, and `vm` for the virtual machine.
mod : :py:class:`~tvm.IRModule`
The Relay module containing collection of functions
device : :py:class:`Device`
The device to execute the code.
target : any multi-target like object, see Target.canon_multi_target
For homogeneous compilation, the unique build target.
For heterogeneous compilation, a dictionary or list of possible build targets.
CAUTION: Though this API allows multiple targets, it does not allow multiple devices, so
heterogenous compilation is not yet supported.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time.
Returns
-------
executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
"""
raw_targets = Target.canon_multi_target(target)
if mod is None:
mod = IRModule()
if device is not None:
assert device.device_type == raw_targets[0].get_target_device_type()
else:
# Derive the default device from the first target.
device = _nd.device(raw_targets[0].get_target_device_type(), 0)
if params is not None:
mod = IRModule.from_expr(bind_params_by_name(mod["main"], params))
assert "executor" not in raw_targets[0].attrs or raw_targets[0].attrs["executor"] == kind
if kind == "debug":
assert len(raw_targets) == 1, "The interpreter currently only supports a single target"
return _interpreter.Interpreter(mod, device, raw_targets[0])
if kind == "graph":
return GraphExecutor(mod, device, raw_targets)
if kind == "vm":
return VMExecutor(mod, device, raw_targets)
if kind == "aot":
return AotExecutor(mod, device, raw_targets)
raise RuntimeError("unknown execution strategy: {0}".format(kind))
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/collage/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""relay.collage exports"""
from .collage import (
MEASURE_NUMBER,
MEASURE_REPEAT,
WARMUP_MIN_REPEAT_MS,
CostEstimator,
MockCostEstimator,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/collage/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for the Collage partitioner."""
import tvm._ffi
tvm._ffi._init_api("relay.collage", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/collage/collage.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Mostly helper methods which interface the main C++ Collage implementation with Python.
See relay.transform.CollagePartition for the main Collage entrypoint."""
import logging
import os
import math
import tempfile
import numpy as np
import tvm
from tvm._ffi.registry import register_func, register_object
from tvm.runtime import Object
from . import _ffi_api
# Parameters to use when estimating latency (of both partitions and overall models).
MEASURE_NUMBER = 20
MEASURE_REPEAT = 5
WARMUP_MIN_REPEAT_MS = 250
@register_object("relay.collage.CostEstimator")
class CostEstimator(Object):
"""CostEstimator class"""
def __init__(self):
self.__init_handle_by_constructor__(_ffi_api.CostEstimator)
@register_object("relay.collage.MockCostEstimator")
class MockCostEstimator(Object):
"""MockEstimator class"""
def __init__(self, target_costs, max_estimates=0):
self.__init_handle_by_constructor__(_ffi_api.MockCostEstimator, target_costs, max_estimates)
def arg_for(arg_type, device):
"""Returns a test argument of Relay arg_type on device"""
assert isinstance(arg_type, tvm.ir.TensorType)
return tvm.nd.array(
np.random.uniform(-1.0, 1.0, size=arg_type.concrete_shape).astype(arg_type.dtype),
device=device,
)
def vm_estimate_seconds(device, the_vm, func_name, args):
"""Returns the estimated latency, in seconds, of running func_name with args on the_vm."""
# Warmup
the_vm.benchmark(
device, repeat=1, number=1, min_repeat_ms=WARMUP_MIN_REPEAT_MS, func_name=func_name, **args
)
# One more time, with feeling
return the_vm.benchmark(
device,
repeat=MEASURE_REPEAT,
number=MEASURE_NUMBER,
min_repeat_ms=0,
func_name=func_name,
**args,
)
@register_func("tvm.relay.collage.estimate_seconds")
def estimate_seconds(mod, target):
"""Returns the mean execution time of "main" in mod on target with params. The module
may contain "Primitive" functions, possibly with "Compiler" attributes."""
device = tvm.device(target.get_target_device_type())
try:
# Build the module.
logging.info("Compiling module to estimate")
exe = tvm.relay.vm.compile(mod, target)
except RuntimeError as err:
# A build failure indicates the partition is not supported.
# eg trying to build an nn.batch_norm on GPU, which has no schedule since we assume it
# is only ever used with a tuple projection which is rewritten away.
logging.info("Assigning module infinite cost since unable to build: %s", err)
return math.inf
# Finalize compilation
tmp_dir = tempfile.mkdtemp()
code, lib = exe.save()
lib_path = os.path.join(tmp_dir, "library.so")
# TODO(mbs): Avoid nvcc dependency?
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
lib = tvm.runtime.load_module(lib_path)
exe = tvm.runtime.vm.Executable.load_exec(code, lib)
# Benchmark the module.
the_vm = tvm.runtime.vm.VirtualMachine(exe, device)
func_name = "main"
main_args = {v.name_hint: arg_for(v.checked_type, device) for v in mod[func_name].params}
logging.info("Benchmarking module to estimate")
profile = vm_estimate_seconds(device, the_vm, func_name, main_args)
logging.info("profile: %s", profile)
return profile.median # seconds
def make_labelled_dfpattern_partition_rule_wrapper(compiler, pattern_tuple):
"""Returns a DFPatternPartitionRule representing one (label, pattern, predicate) entry from
the pattern table for external codegen compiler"""
if len(pattern_tuple) == 2:
rule_name, dataflow_pattern = pattern_tuple
return _ffi_api.MakeLabelledDFPatternPartitionRule(compiler, rule_name, dataflow_pattern)
else:
rule_name, dataflow_pattern, predicate = pattern_tuple
return _ffi_api.MakeLabelledDFPatternPartitionRuleWithPredicate(
compiler, rule_name, dataflow_pattern, predicate
)
@register_func("tvm.relay.collage.make_byoc_partition_rule")
def make_byoc_partition_rule(compiler):
"""Returns the PartitionRule for external codegen compiler"""
pattern_table = tvm.relay.op.contrib.get_pattern_table(compiler)
assert (
pattern_table is not None
), f"No pattern table entry was found for BYOC compiler {compiler}"
logging.info(
"Converting %s rules for %s for use in pattern style BYOC lowering/codegen",
len(pattern_table),
compiler,
)
sub_rules = [
make_labelled_dfpattern_partition_rule_wrapper(compiler, pattern_tuple)
for pattern_tuple in pattern_table
]
return _ffi_api.MakePatternBYOCPartitionRule(compiler, sub_rules)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/data_dep_optimization/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Optimizations involves changing of paramters"""
from . import bsr_dense
from . import simplify_fc_transpose
from . import bsr_conv2d
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/data_dep_optimization/bsr_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic convert model from dense to block sparse"""
from tvm import relay
from tvm.relay.analysis.sparse_conv2d import process_params
from .utils import _run_opt_pass
def convert(func, params, blocksize, sparsity_threshold, layout="NHWC", kernel_size=1):
"""Convert a conv2d func and according parameters to block sparse
Parameters
----------
func : relay.Expr
Expr will be optimized to sparse operation
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr
blocksize : Tuple(int, int)
Blocksize for BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting.
If weight sparsity is lower than this threshold,
the dense operation will be kept.
layout : str
layout of network
Returns
-------
new_func: relay.Expr
Mutated Expr with sparse operations
params: Dict[Srting, tvm.nd.array]
New params with BSR matrix for mutated Expr
"""
weight_info = process_params(func, params, blocksize, sparsity_threshold, layout, kernel_size)
new_func = _run_opt_pass(
func,
relay.transform.Conv2dToSparse(
weight_info.weight_name, weight_info.weight_shape, layout, kernel_size
),
)
return new_func, params
def convert2(func, params, blocksize, sparsity_threshold, layout, kernel_size):
"""Convert a freezed conv2d func to block sparse
Parameters
----------
func : relay.Expr
Expr will be optimized to sparse operation, with params freezed
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr (not used in this pass)
blocksize : Tuple(int, int)
Blocksize for BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting.
If weight sparsity is lower than this threshold,
the dense operation will be kept.
layout : str
layout of network
kernel_size : int
kernel size of the conv2d, for filtering
Returns
-------
new_func: relay.Expr
Mutated Expr with sparse operations
params: Dict[Srting, tvm.nd.array]
New params with BSR matrix for mutated Expr (not modified)
"""
new_func = _run_opt_pass(
func, relay.transform.Conv2dToSparse2(layout, kernel_size, blocksize, sparsity_threshold)
)
return new_func, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/data_dep_optimization/bsr_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic convert model from dense to block sparse"""
from tvm import relay
from tvm.relay.analysis.sparse_dense import process_params
from .utils import _run_opt_pass
def convert(func, params, blocksize, sparsity_threshold):
"""Convert a dense func and according parameters to block sparse
Parameters
----------
func : relay.Expr
Expr will be optimized to sparse operation
params : Dict[Srting, tvm.nd.array]
Parameters of the Expr
blocksize : Tuple(int, int)
Blocksize for BSR matrix
sparsity_threshold : float
Minimal sparsity requirement for converting.
If weight sparsity is lower than this threshold,
the dense operation will be kept.
Returns
-------
new_func: relay.Expr
Mutated Expr with sparse operations
params: Dict[Srting, tvm.nd.array]
New params with BSR matrix for mutated Expr
"""
weight_info = process_params(func, params, blocksize, sparsity_threshold)
new_func = _run_opt_pass(
func, relay.transform.DenseToSparse(weight_info.weight_name, weight_info.weight_shape)
)
return new_func, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic optimize fc tranpose"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay.analysis import search_fc_transpose
from .utils import _run_opt_pass
def convert(func, params):
"""convert all ```y = nn.dense(x, transpose(w, [1, 0]))``` to
```y = nn.dense(x, wt)```
Parameters
----------
func : relay.Expr
Expr will be optimized
params : Dict[String, tvm.nd.array]
Parameters of Expr
Returns
-------
new_func : relay.Expr
Mutated Expr from ```y = nn.dense(x, transpose(w, [1, 0]))``` to
```y = nn.dense(x, wt)```
params: Dict[String, tvm.nd.array]
Parameters of mutated Expr, with weights pre-transposed
"""
weight_info = search_fc_transpose(func)
for item in weight_info:
name = str(item)
w_np = params[name].numpy()
new_w = np.transpose(w_np, axes=[1, 0])
params[name + ".T"] = tvm.nd.array(new_w)
del params[name]
new_func = _run_opt_pass(
func,
relay.transform.SimplifyFCTranspose(
weight_info,
),
)
return new_func, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/data_dep_optimization/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Utils functions for optimizations"""
import tvm
def _run_opt_pass(expr, opt_pass):
"""Helper function to run pass
Parameters
----------
expr : relay.Expr
Expr will be optimized
opt_pass : relay.Pass
Optimization pass
Returns
-------
ret: relay.Expr
Optimized Expr by running opt_pass
"""
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
return mod["main"]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/dataflow_pattern/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The Relay Pattern Language and tooling."""
# pylint: disable=no-member
from typing import Callable, Dict, List, Optional
import tvm._ffi
from tvm.relay.expr import RelayExpr as Expr
from ... import _ffi as tvm_ffi
from ... import ir as _ir
from ...ir import make_node
from ...ir.base import Node
from ...runtime import Object
from ..op import get
from . import _ffi as ffi
def register_df_node(type_key=None):
"""Register a Relay node type.
Parameters
----------
type_key : str or cls
The type key of the node.
"""
if not isinstance(type_key, str):
return tvm._ffi.register_object("relay.dataflow_pattern." + type_key.__name__)(type_key)
return tvm._ffi.register_object(type_key)
class DFPattern(Node):
"""Base class of all Patterns."""
def __call__(self, *args):
args = list(args)
if len(args) == 1 and args[0] is None:
args = None
return CallPattern(self, args)
def __or__(self, other):
return AltPattern(self, other)
def __add__(self, other):
return is_op("add")(self, other)
def __sub__(self, other):
return is_op("subtract")(self, other)
def __mul__(self, other):
return is_op("multiply")(self, other)
def __truediv__(self, other):
return is_op("divide")(self, other)
def has_attr(self, attrs: Dict[str, Object]):
"""
Add an attribute constraint to this pattern
Parameters
----------
attrs: Dict[str, Object]
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting AttrPattern
"""
attrs = make_node("DictAttrs", **attrs)
return AttrPattern(self, attrs)
def has_type(self, ttype: tvm.ir.type.Type):
"""
Add a type constraint to this pattern
Parameters
----------
ttype: tvm.ir.type.Type
The type to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting TypePattern
"""
return has_type(ttype, self)
def has_dtype(self, dtype: str):
"""
Add a type constraint to this pattern
Parameters
----------
dtype: str
The dtype to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DataTypePattern
"""
return has_dtype(dtype, self)
def has_shape(self, shape: List[tvm.ir.PrimExpr]):
"""
Add a type constraint to this pattern
Parameters
----------
shape: List[tvm.ir.PrimExpr]
The shape to match
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ShapePattern
"""
return has_shape(shape, self)
def match(self, expr: Expr) -> bool:
"""
Match this pattern to an expression
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
Returns
-------
result: bool
Whether or not the expression matches the pattern
"""
return match(self, expr)
def partition(
self,
expr: Expr,
attrs: Optional[Dict[str, Object]] = None,
check: Callable[[Expr], bool] = lambda x: True,
) -> Expr:
"""
Partition the expression into functions defined by this pattern
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
attrs : Optional[Dict[str, Object]]
A dictionary of Attribute name/values to add to the paritioned function
check : Callable[[Expr], bool]
A function to perform more complicated checks on the matched expression.
Returns true if partitioning should proceed, false otherwise.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs replaced by function calls to that subgraph
"""
return partition(self, expr, attrs, check)
def dominates(self, parent: "DFPattern", path: "DFPattern" = None):
"""
Create a dominator for this pattern.
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent pattern this pattern dominates.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DominatorPattern.
"""
if path is None:
path = wildcard()
return DominatorPattern(parent, path, self)
def optional(self, option_constructor: Callable[["DFPattern"], "DFPattern"]):
"""
Create a optional user of this pattern.
Parameters
----------
option_constructor: function
A function that takes a single Pattern parameter and returns
a constructed pattern matching the option
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting Pattern
"""
return self | option_constructor(self)
def is_var(name: str = "") -> "DFPattern":
"""
Syntatic sugar for creating an optionally named VarPattern.
Parameters
----------
name: str
The name of the input pattern to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return VarPattern(name)
def is_constant() -> "DFPattern":
"""
Syntatic sugar for creating a ConstantPattern.
Parameters
----------
name: str
The name of the input pattern to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return ConstantPattern()
def is_expr(expr: Expr) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
expr: Expr
The Relay expression to match.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return ExprPattern(expr)
def is_op(op_name: str) -> "DFPattern":
"""
Syntatic sugar for creating an operator ExprPattern.
Parameters
----------
op_name: String
The name of the relay op
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ExprPattern
"""
op = get(op_name)
return ExprPattern(op)
def is_tuple(fields: tvm.ir.container.Array) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
fields : Array[tvm.relay.dataflow_pattern.DFPattern]
The fields in the tuple.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return TuplePattern(fields)
def is_tuple_get_item(tuple_value: "DFPattern", index: Optional[int] = None) -> "DFPattern":
"""
Syntatic sugar for creating an ExprPattern.
Parameters
----------
tuple_value: tvm.relay.dataflow_pattern.DFPattern
The input tuple expression.
index: Optional[int]
The index to match; Default (None) to match a TupleGetItem with any index.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return TupleGetItemPattern(tuple_value, index)
def is_if(cond, true_branch, false_branch):
"""
Syntatic sugar for creating an IfPattern.
Parameters
----------
cond: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the condition of If.
true_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the true branch of If.
false_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the false branch of If.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return IfPattern(cond, true_branch, false_branch)
def is_let(var, value, body):
"""
Syntatic sugar for creating a LetPattern.
Parameters
----------
var: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the variable of Let.
value: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the value of Let.
body: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the body where the binding is in effect.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return LetPattern(var, value, body)
def wildcard() -> "DFPattern":
"""
Syntatic sugar for creating a WildcardPattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting pattern.
"""
return WildcardPattern()
def has_type(ttype: tvm.ir.type.Type, pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a TypePattern
Parameters
----------
ttype: tvm.ir.type.Type
The type to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting TypePattern
"""
if pattern is None:
pattern = wildcard()
return TypePattern(pattern, ttype)
def has_dtype(dtype: str, pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a DataTypePattern
Parameters
----------
dtype: str
The dtype to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DataTypePattern
"""
if pattern is None:
pattern = wildcard()
return DataTypePattern(pattern, dtype)
def has_shape(shape: List[tvm.ir.PrimExpr], pattern: "DFPattern" = None) -> "DFPattern":
"""
Syntatic sugar for creating a ShapePattern
Parameters
----------
shape: List[tvm.ir.PrimExpr]
The shape to match
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern that needs type annotation
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting ShapePattern
"""
if pattern is None:
pattern = wildcard()
return ShapePattern(pattern, shape)
def has_attr(attrs, pattern=None) -> "DFPattern":
"""
Syntatic sugar for creating an AttrPattern
Parameters
----------
attrs: Dict[str, Object]
The attributes to match
pattern: Optional[tvm.relay.dataflow_pattern.DFPattern]
The input pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting AttrPattern
"""
if pattern is None:
pattern = wildcard()
return pattern.has_attr(attrs)
def dominates(parent: "DFPattern", path: "DFPattern", child: "DFPattern") -> "DFPattern":
"""
Syntatic sugar for creating an Dominator pattern
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent pattern.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern.
child: tvm.relay.dataflow_pattern.DFPattern
The child pattern.
Returns
-------
result: tvm.relay.dataflow_pattern.DFPattern
The resulting DominatorPattern.
"""
return DominatorPattern(parent, path, child)
def match(pattern: "DFPattern", expr: Expr) -> bool:
"""
Match a pattern to an expression
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern.
expr : tvm.relay.Expr
The expression to match.
"""
return ffi.match(pattern, expr)
@register_df_node
class ExprPattern(DFPattern):
"""A pattern which matches a constant expression.
Parameters
----------
expr : tvm.relay.Expr
The expression to match.
"""
def __init__(self, expr: Expr):
self.__init_handle_by_constructor__(ffi.ExprPattern, expr)
@register_df_node
class VarPattern(DFPattern):
"""A local variable in Relay.
Local variable can be used to declare input
arguments to a function, or intermediate variables.
Parameters
----------
name_hint: str
The name of the variable. Optional, if not provided,
the pattern will match any VarNode.
type_annotation: tvm.ir.type.Type, optional
The type annotation on the variable.
"""
def __init__(self, name_hint: str = ""):
self.__init_handle_by_constructor__(ffi.VarPattern, name_hint)
@register_df_node
class ConstantPattern(DFPattern):
"""A pattern matching a Relay Constant."""
def __init__(self):
self.__init_handle_by_constructor__(ffi.ConstantPattern)
@register_df_node
class CallPattern(DFPattern):
"""A pattern matching a function call node in Relay.
Parameters
----------
op: relay.dataflow_pattern.DFPattern
The operation to be called.
args: List[relay.dataflow_pattern.DFPattern]
The arguments to the call or None to match any arguments.
"""
def __init__(
self,
op: "DFPattern",
args: List["DFPattern"],
):
self.__init_handle_by_constructor__(ffi.CallPattern, op, args)
@register_df_node
class FunctionPattern(DFPattern):
"""A pattern matching a function node in Relay.
Parameters
----------
params: List[relay.dataflow_pattern.DFPattern]
The parameters to the Function or None to match any parameters.
body: relay.dataflow_pattern.DFPattern
The body fo the Function
"""
def __init__(
self,
params: List["DFPattern"],
body: "DFPattern",
):
self.__init_handle_by_constructor__(ffi.FunctionPattern, params, body)
@register_df_node
class IfPattern(DFPattern):
"""A patern matching a Relay If.
Parameters
----------
cond: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the condition of If.
true_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the true branch of If.
false_branch: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the false branch of If.
"""
def __init__(self, cond: "DFPattern", true_branch: "DFPattern", false_branch: "DFPattern"):
self.__init_handle_by_constructor__(ffi.IfPattern, cond, true_branch, false_branch)
@register_df_node
class LetPattern(DFPattern):
"""A patern matching a Relay Let.
Parameters
----------
var: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the variable of Let.
value: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the value of Let.
body: tvm.relay.dataflow_pattern.DFPattern
The pattern describing the body where the binding is in effect.
"""
def __init__(self, var: "DFPattern", value: "DFPattern", body: "DFPattern"):
self.__init_handle_by_constructor__(ffi.LetPattern, var, value, body)
@register_df_node
class TuplePattern(DFPattern):
"""A patern matching a Relay Tuple.
Parameters
----------
fields : Array[tvm.relay.dataflow_pattern.DFPattern]
The fields in the tuple.
"""
def __init__(self, fields: tvm.ir.container.Array):
self.__init_handle_by_constructor__(ffi.TuplePattern, fields)
def __getitem__(self, index: int):
if index >= len(self):
raise IndexError("TuplePattern index out of range")
return self.fields[index]
def __len__(self):
return len(self.fields)
def astype(self, _):
raise TypeError("astype cannot be used on TuplePattern")
@register_df_node
class TupleGetItemPattern(DFPattern):
"""Get index-th item from a TuplePattern.
Parameters
----------
tuple_value: tvm.relay.dataflow_pattern.DFPattern
The input tuple expression.
index: Optional[int]
The index to match; Default (None) to match a TupleGetItem with any index.
"""
def __init__(self, tuple_value: "DFPattern", index: Optional[int] = None):
match_index = index if index is not None else -1
self.__init_handle_by_constructor__(ffi.TupleGetItemPattern, tuple_value, match_index)
@register_df_node
class AltPattern(DFPattern):
"""Create a Pattern that can match one of two conditions
Parameters
----------
left: tvm.relay.dataflow_pattern.DFPattern
One possible matching pattern.
right: tvm.relay.dataflow_pattern.DFPattern
One possible matching pattern.
"""
def __init__(self, left: "DFPattern", right: "DFPattern"):
self.__init_handle_by_constructor__(ffi.AltPattern, left, right)
@register_df_node
class WildcardPattern(DFPattern):
"""A pattern which matches anything."""
def __init__(self):
self.__init_handle_by_constructor__(ffi.WildcardPattern)
@register_df_node
class TypePattern(DFPattern):
"""A pattern that matches another pattern with a certain type annotation.
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
ttype: tvm.ir.type.Type
The type to match.
"""
def __init__(self, pattern: "DFPattern", ttype: tvm.ir.type.Type):
self.__init_handle_by_constructor__(ffi.TypePattern, pattern, ttype)
@register_df_node
class DataTypePattern(DFPattern):
"""A pattern that matches another pattern with certain data type
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
dtype: str
The dtype to match.
"""
def __init__(self, pattern: "DFPattern", dtype: str):
self.__init_handle_by_constructor__(ffi.DataTypePattern, pattern, dtype)
@register_df_node
class ShapePattern(DFPattern):
"""A pattern that matches another pattern with a certain tensor shape
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern that needs type annotation.
shape: List[tvm.ir.PrimExpr]
The shape to match.
"""
def __init__(self, pattern: "DFPattern", shape: List[tvm.ir.PrimExpr]):
self.__init_handle_by_constructor__(ffi.ShapePattern, pattern, shape)
@register_df_node
class AttrPattern(DFPattern):
"""Get match an expression with a certain attributes.
Currently only supports Op Attributes, not call Attributes.
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The input pattern.
attrs: tvm.ir.attrs.Attrs
The attributes to match.
"""
def __init__(self, pattern: "DFPattern", attrs: tvm.ir.attrs.Attrs):
self.__init_handle_by_constructor__(ffi.AttrPattern, pattern, attrs)
@register_df_node
class DominatorPattern(DFPattern):
"""Match a domination graph.
Parameters
----------
parent: tvm.relay.dataflow_pattern.DFPattern
The parent, i.e., the single node which produces something,
later aggregated by the child.
path: tvm.relay.dataflow_pattern.DFPattern
The fuzzy path pattern between parent and child,
typically matches elementwise ops.
child: tvm.relay.dataflow_pattern.DFPattern
The last node in the domination which is the end user
for all nodes in the path and the parent.
"""
def __init__(self, parent: "DFPattern", path: "DFPattern", child: "DFPattern"):
self.__init_handle_by_constructor__(ffi.DominatorPattern, parent, path, child)
class DFPatternCallback:
"""A Callback for Pattern Rewriting.
When rewrite is called on this DFPatternCallback, the backend will find matches for the
pattern, call the callback function, and replace the matched expression with whatever
the callback returns.
Users are expect to inherit from this class and provide a "self.pattern" to match
Parameters
----------
require_type: bool
Whether InferType is required to be run before the callback.
rewrite_once: bool
If True, run the callback only once.
"""
def __init__(self, require_type=False, rewrite_once=False):
self.pattern = None
self.require_type = require_type
self.rewrite_once = rewrite_once
def rewrite(self, expr: Expr) -> Expr:
"""
Rewrite expression with this callback
Parameters
----------
expr : tvm.relay.Expr
The expression to rewrite.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs rewritten by the callbacks.
"""
return rewrite(self, expr)
def callback(self, pre: Expr, post: Expr, node_map: tvm.ir.container.Map) -> Expr:
"""
Callback function to use when we found a match to the pattern
Parameters
----------
pre : tvm.relay.Expr
The matching expression from the original graph.
post : tvm.relay.Expr
The matching expression with rewritten inputs
node_map : tvm.ir.container.Map[DFPattern, List[Expr]]
The map between patterns and matched expressions
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraph rewritten by the callback
"""
raise "Unimplemented"
class _DFPatternCallback(Object):
"""C++ implemenation"""
def __init__(self, pattern, callback, require_type, rewrite_once):
self.__init_handle_by_constructor__(
ffi.DFPatternCallback, pattern, callback, require_type, rewrite_once
)
def rewrite(callbacks, expr: Expr, mod: Optional[_ir.IRModule] = None) -> Expr:
"""
Rewrite expression with the given callbacks.
Parameters
----------
callbacks: tvm.relay.dataflow_pattern.DFPatternCallback
The input callback or list of callbacks.
expr : tvm.relay.Expr
The expression to rewrite.
mod : Optional[tvm.ir.IRModule]
The module that associates with the expression.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs rewritten by the callbacks.
"""
if mod is None:
mod = _ir.IRModule()
callbacks = [callbacks] if isinstance(callbacks, DFPatternCallback) else callbacks
tmp = []
for callback in callbacks:
assert callback.pattern is not None
tmp.append(
_DFPatternCallback(
callback.pattern, callback.callback, callback.require_type, callback.rewrite_once
)
)
return ffi.rewrite(tmp, expr, mod)
def partition(
pattern: "DFPattern",
expr: Expr,
attrs: Optional[Dict[str, Object]] = None,
check: Callable[[Expr], bool] = lambda x: True,
) -> Expr:
"""
Parition the expression into a series of functions that match the pattern
Parameters
----------
pattern: tvm.relay.dataflow_pattern.DFPattern
The pattern to match
expr : tvm.relay.Expr
The expression to split into functions
attrs : Optional[Dict[str, Object]]
A dict of attributes to apply to the partitioned function
check : Callable[[Expr], bool]
A function to perform more complicated checks on the matched expression.
Returns true if partitioning should proceed, false otherwise.
Returns
-------
result : tvm.relay.Expr
The Expression with matched subgraphs replaced by function calls to that subgraph
"""
return ffi.partition(pattern, expr, attrs, check)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/dataflow_pattern/_ffi.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""DataFlow Pattern Language FFI bindings."""
import tvm._ffi
tvm._ffi._init_api("relay.dataflow_pattern", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/debug.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay IR namespace containing the IR definition and compiler."""
import tvm._ffi
# pylint: disable=unused-argument, import-outside-toplevel
def _debugger_init(expr, stack):
import pdb
pdb.set_trace()
@tvm._ffi.register_func("relay.debug")
def _debug(*args):
import pdb
pdb.set_trace()
# pylint: disable=unused-argument
@tvm._ffi.register_func("relay.debug_interp")
def _debug_interp(*args):
_, _, _, ist = args
print("Relay Debugger")
print(" You can manipulate the expression under evaluation with the name `expr`.")
print(" You can manipulate the call stack with the name `stack`.")
print("--------------")
print("--------------")
_debugger_init(ist.current_expr, ist.stack)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-import
"""The expression nodes of Relay."""
from __future__ import absolute_import
from numbers import Number as _Number
import numpy as _np
import tvm._ffi
from tvm._ffi import base as _base
from tvm.runtime import NDArray, ndarray as _nd
from tvm.ir import RelayExpr, GlobalVar, Node
from .base import RelayNode
from . import _ffi_api
from . import ty as _ty
# alias relay expr as Expr.
Expr = RelayExpr
# will be registered afterwards
_op_make = None
class ExprWithOp(RelayExpr):
"""Basetype of all relay expressions that defines op overloading."""
def astype(self, dtype):
"""Cast the content type of the current data to dtype.
Parameters
----------
dtype : str
The target data type.
Note
----
This function only works for TensorType Exprs.
Returns
-------
result : tvm.relay.Expr
The result expression.
"""
return _ffi_api.cast(self, dtype)
def __neg__(self):
return _op_make.negative(self)
def __lt__(self, other):
if isinstance(other, Expr):
return _op_make.less(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __gt__(self, other):
if isinstance(other, Expr):
return _op_make.greater(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __ge__(self, other):
if isinstance(other, Expr):
return _op_make.greater_equal(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __le__(self, other):
if isinstance(other, Expr):
return _op_make.less_equal(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __add__(self, other):
if isinstance(other, Expr):
return _op_make.add(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, Expr):
return _op_make.subtract(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rsub__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
raise TypeError("type %s not supported" % str(type(other)))
def __mul__(self, other):
if isinstance(other, Expr):
return _op_make.multiply(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
if isinstance(other, Expr):
return _op_make.divide(self, other)
elif isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
else:
raise TypeError("type %s not supported" % str(type(other)))
def __rdiv__(self, other):
if isinstance(other, _Number):
raise TypeError('convert "%s" with `const` first' % str(other))
raise TypeError("type %s not supported" % str(type(other)))
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __call__(self, *args):
"""Call the variable (if it represents a function).
Parameters
----------
args: List[relay.Expr]
The arguments to the call.
Returns
-------
call: Call
A call taking the variable as a function.
"""
return Call(self, args)
@tvm._ffi.register_object("relay.Constant")
class Constant(ExprWithOp):
"""A constant expression in Relay.
Parameters
----------
data : tvm.nd.NDArray
The data content of the constant expression.
"""
def __init__(self, data):
self.__init_handle_by_constructor__(_ffi_api.Constant, data)
@tvm._ffi.register_object("relay.Tuple")
class Tuple(ExprWithOp):
"""Tuple expression that groups several fields together.
Parameters
----------
fields : List[tvm.relay.Expr]
The fields in the tuple.
span: Optional[tvm.relay.Span]
Span that points to original source code
"""
def __init__(self, fields, span=None):
self.__init_handle_by_constructor__(_ffi_api.Tuple, fields, span)
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Tuple index out of range")
return self.fields[index]
def __len__(self):
return len(self.fields)
def astype(self, _):
raise TypeError("astype cannot be used on tuple")
@tvm._ffi.register_object("relay.Var")
class Var(ExprWithOp):
"""A local variable in Relay.
Local variable can be used to declare input
arguments to a function, or intermediate variables.
Parameters
----------
name_hint: str
The name of the variable.
This name only acts as a hint, and is not used
for equality.
type_annotation: tvm.relay.Type, optional
The type annotation on the variable.
"""
def __init__(self, name_hint, type_annotation=None):
self.__init_handle_by_constructor__(_ffi_api.Var, name_hint, type_annotation)
@property
def name_hint(self):
"""Get name hint of the current var."""
name = str(self.vid.name_hint)
return name
@tvm._ffi.register_object("relay.Call")
class Call(ExprWithOp):
"""Function call node in Relay.
Call node corresponds the operator application node
in computational graph terminology.
Parameters
----------
op: tvm.ir.Op or any tvm.relay.Expr with function type.
The operation to be called.
args: List[tvm.relay.Expr]
The arguments to the call.
attrs: Optional[tvm.Attrs]
Attributes to the call, can be None
type_args: Optional[List[tvm.relay.Type]]
The additional type arguments, this is only
used in advanced usecase of template functions.
span: Optional[tvm.relay.Span]
Span that points to original source code
"""
def __init__(self, op, args, attrs=None, type_args=None, span=None):
if not type_args:
type_args = []
self.__init_handle_by_constructor__(_ffi_api.Call, op, args, attrs, type_args, span)
@tvm._ffi.register_object("relay.Let")
class Let(ExprWithOp):
"""Let variable binding expression.
Parameters
----------
variable: tvm.relay.Var
The local variable to be bound.
value: tvm.relay.Expr
The value to be bound.
body: tvm.relay.Expr
The body of the let binding.
"""
def __init__(self, variable, value, body):
self.__init_handle_by_constructor__(_ffi_api.Let, variable, value, body)
@tvm._ffi.register_object("relay.If")
class If(ExprWithOp):
"""A conditional expression in Relay.
Parameters
----------
cond: tvm.relay.Expr
The condition.
true_branch: tvm.relay.Expr
The expression evaluated when condition is true.
false_branch: tvm.relay.Expr
The expression evaluated when condition is false.
"""
def __init__(self, cond, true_branch, false_branch):
self.__init_handle_by_constructor__(_ffi_api.If, cond, true_branch, false_branch)
@tvm._ffi.register_object("relay.TupleGetItem")
class TupleGetItem(ExprWithOp):
"""Get index-th item from a tuple.
Parameters
----------
tuple_value: tvm.relay.Expr
The input tuple expression.
index: int
The index.
"""
def __init__(self, tuple_value, index):
self.__init_handle_by_constructor__(_ffi_api.TupleGetItem, tuple_value, index)
@tvm._ffi.register_object("relay.RefCreate")
class RefCreate(ExprWithOp):
"""Create a new reference from initial value.
Parameters
----------
value: tvm.relay.Expr
The initial value.
"""
def __init__(self, value):
self.__init_handle_by_constructor__(_ffi_api.RefCreate, value)
@tvm._ffi.register_object("relay.RefRead")
class RefRead(ExprWithOp):
"""Get the value inside the reference.
Parameters
----------
ref: tvm.relay.Expr
The reference.
"""
def __init__(self, ref):
self.__init_handle_by_constructor__(_ffi_api.RefRead, ref)
@tvm._ffi.register_object("relay.RefWrite")
class RefWrite(ExprWithOp):
"""
Update the value inside the reference.
The whole expression will evaluate to an empty tuple.
Parameters
----------
ref: tvm.relay.Expr
The reference.
value: tvm.relay.Expr
The new value.
"""
def __init__(self, ref, value):
self.__init_handle_by_constructor__(_ffi_api.RefWrite, ref, value)
class TempExpr(ExprWithOp):
"""Baseclass of all TempExpr.
TempExprs are pass specific expression that can be
useful to define intermediate result in the
rewriting pass such as layout or type transformation.
"""
def realize(self):
"""Convert the expression to a normal(non-temp) Expr.
Returns
-------
The corresponding normal expression.
"""
return _ffi_api.TempExprRealize(self)
class TupleWrapper(object):
"""TupleWrapper.
This class is a Python wrapper for a Relay tuple of known size.
It allows for accessing the fields of the Relay tuple as though
it were a Python tuple.
Parameters
----------
tuple_value: tvm.relay.Expr
The input tuple
size: int
The size of the tuple.
"""
def __init__(self, tuple_value, size):
self.tuple_value = tuple_value
self.size = size
def astuple(self):
"""Returns the underlying Relay tuple if this wrapper is passed
as an argument to an FFI function."""
return self.tuple_value
def astext(self):
"""Get the text format of the tuple expression.
Returns
-------
text : str
The text format of the tuple expression.
"""
return self.tuple_value.astext()
def __getitem__(self, index):
if index >= len(self):
raise IndexError("Tuple index out of range")
return TupleGetItem(self.tuple_value, index)
def __len__(self):
return self.size
def __repr__(self):
return "TupleWrapper(" + self.tuple_value.__repr__() + ", " + str(self.size) + ")"
def astype(self, _):
raise TypeError("astype cannot be used on tuple")
def var(name_hint, type_annotation=None, shape=None, dtype="float32"):
"""Create a new tvm.relay.Var.
This is a simple wrapper function that allows specify
shape and dtype directly.
Parameters
----------
name_hint: str
The name of the variable.
This name only acts as a hint, and is not used
for equality.
type_annotation: Optional[tvm.relay.Type, str]
The type annotation on the variable.
When type_annotation is a str, we will create a scalar variable.
shape: Optional[List[tvm.Expr]]
The shape of the tensor type.
dtype: str, optional
The data type of the tensor.
Examples
--------
.. code-block:: python
# The following 4 lines are equivalent to each other
x = tvm.relay.Var("x", tvm.relay.TensorType([1, 2]))
x = tvm.relay.var("x", tvm.relay.TensorType([1, 2]))
x = tvm.relay.var("x", shape=[1, 2])
x = tvm.relay.var("x", shape=[1, 2], dtype="float32")
# The following 2 lines are equivalent to each other.
y = tvm.relay.var("x", "float32")
y = tvm.relay.var("x", shape=(), dtype="float32")
"""
if type_annotation is not None and shape is not None:
raise ValueError("Can only specify either type_annotation or shape.")
if shape is not None:
type_annotation = _ty.TensorType(shape, dtype)
elif isinstance(type_annotation, str):
type_annotation = _ty.TensorType((), type_annotation)
return Var(name_hint, type_annotation)
def const(value, dtype=None):
"""Create a constant value.
Parameters
----------
value: Union[bool, int, float, numpy.ndarray, tvm.nd.NDArray]
The constant value.
dtype: str, optional
The data type of the resulting constant.
Note
----
When dtype is None, we use the following rule:
- int maps to "int32"
- float maps to "float32"
- bool maps to "bool"
- other using the same default rule as numpy.
"""
if isinstance(value, (_base.numeric_types, (bool, list))):
value = _np.array(value, dtype=dtype)
if not dtype:
# when dtype is None: int maps to "int32", float maps to "float32"
dtype = {_np.dtype("int64"): _np.int32, _np.dtype("float64"): _np.float32}.get(
value.dtype, None
)
if isinstance(value, (_np.ndarray, _np.generic)):
if dtype is not None:
value = value.astype(dtype)
value = _nd.array(value)
if not isinstance(value, _nd.NDArray):
raise ValueError("value has to be scalar or NDArray")
return Constant(value)
def bind(expr, binds):
"""Bind an free variables in expr or function arguments.
We can bind parameters expr if it is a function.
Parameters
----------
expr : tvm.relay.Expr
The input expression.
binds : Map[tvm.relay.Var, tvm.relay.Expr]
The specific bindings.
Returns
-------
result : tvm.relay.Expr
The expression or function after binding.
"""
return _ffi_api.Bind(expr, binds)
@tvm._ffi.register_object("relay.StorageInfo")
class StorageInfo(Node):
"""StorageInfo
The static storage information produced by memory planning.
Contains the storage ids where expressions are stored, the
type of the "virtual devices" the expressions are stored on,
and the sizes of each storage element."""
def __init__(self, sids, dev_types, sizes):
self.__init_handle_by_constructor__(_ffi_api.StorageInfo, sids, dev_types, sizes)
@property
def storage_ids(self):
return _ffi_api.StorageInfoStorageIds(self)
@property
def device_types(self):
return _ffi_api.StorageInfoDeviceTypes(self)
@property
def storage_sizes(self):
return _ffi_api.StorageInfoStorageSizes(self)
@property
def virtual_devices(self):
return _ffi_api.StorageInfoVirtualDevices(self)
@tvm._ffi.register_object("relay.StaticMemoryPlan")
class StaticMemoryPlan(Node):
"""StaticMemoryPlan
The result of static memory planning."""
def __init__(self, expr_to_storage_info):
self.__init_handle_by_constructor__(_ffi_api.StaticMemoryPlan, expr_to_storage_info)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/expr_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
"""The expression functor of Relay."""
from tvm.ir import Op
from .function import Function, FunctionWithFields
from .expr import Call, Let, Var, GlobalVar
from .expr import If, Tuple, TupleGetItem, Constant
from .expr import RefCreate, RefRead, RefWrite
from .adt import Constructor, Match, Clause
class ExprFunctor:
"""
An abstract visitor defined over Expr.
Defines the default dispatch over expressions, and
implements memoization.
"""
def __init__(self):
self.memo_map = {}
# pylint: disable=no-else-return
def visit(self, expr):
"""Apply the visitor to an expression."""
if expr in self.memo_map:
return self.memo_map[expr]
if isinstance(expr, Function):
res = self.visit_function(expr)
elif isinstance(expr, Call):
res = self.visit_call(expr)
elif isinstance(expr, Let):
res = self.visit_let(expr)
elif isinstance(expr, Var):
res = self.visit_var(expr)
elif isinstance(expr, GlobalVar):
res = self.visit_global_var(expr)
elif isinstance(expr, If):
res = self.visit_if(expr)
elif isinstance(expr, Tuple):
res = self.visit_tuple(expr)
elif isinstance(expr, TupleGetItem):
res = self.visit_tuple_getitem(expr)
elif isinstance(expr, Constant):
res = self.visit_constant(expr)
elif isinstance(expr, Op):
res = self.visit_op(expr)
elif isinstance(expr, RefCreate):
res = self.visit_ref_create(expr)
elif isinstance(expr, RefRead):
res = self.visit_ref_read(expr)
elif isinstance(expr, RefWrite):
res = self.visit_ref_write(expr)
elif isinstance(expr, Constructor):
res = self.visit_constructor(expr)
elif isinstance(expr, Match):
res = self.visit_match(expr)
else:
raise Exception("warning unhandled case: {0}".format(type(expr)))
self.memo_map[expr] = res
return res
def visit_function(self, _):
raise NotImplementedError()
def visit_let(self, _):
raise NotImplementedError()
def visit_call(self, _):
raise NotImplementedError()
def visit_var(self, _):
raise NotImplementedError()
def visit_type(self, typ):
return typ
def visit_if(self, _):
raise NotImplementedError()
def visit_tuple(self, _):
raise NotImplementedError()
def visit_tuple_getitem(self, _):
raise NotImplementedError()
def visit_global_var(self, _):
raise NotImplementedError()
def visit_op(self, _):
raise NotImplementedError()
def visit_constant(self, _):
raise NotImplementedError()
def visit_ref_create(self, _):
raise NotImplementedError()
def visit_ref_write(self, _):
raise NotImplementedError()
def visit_ref_read(self, _):
raise NotImplementedError()
def visit_constructor(self, _):
raise NotImplementedError()
def visit_match(self, _):
raise NotImplementedError()
class ExprVisitor(ExprFunctor):
"""
A visitor over Expr.
The default behavior recursively traverses the AST.
"""
def visit_tuple(self, tup):
for x in tup.fields:
self.visit(x)
def visit_call(self, call):
self.visit(call.op)
for a in call.args:
self.visit(a)
def visit_var(self, var):
pass
def visit_let(self, let):
self.visit(let.var)
self.visit(let.value)
self.visit(let.body)
def visit_function(self, fn):
for x in fn.params:
self.visit(x)
self.visit(fn.body)
def visit_if(self, i):
self.visit(i.cond)
self.visit(i.true_branch)
self.visit(i.false_branch)
def visit_global_var(self, gv):
pass
def visit_constructor(self, c):
pass
def visit_op(self, op):
pass
def visit_constant(self, const):
pass
def visit_ref_create(self, r):
self.visit(r.value)
def visit_ref_read(self, r):
self.visit(r.ref)
def visit_ref_write(self, r):
self.visit(r.ref)
self.visit(r.value)
def visit_tuple_getitem(self, t):
self.visit(t.tuple_value)
def visit_match(self, m):
self.visit(m.data)
for c in m.clauses:
self.visit(c.rhs)
class ExprMutator(ExprFunctor):
"""
A functional visitor over Expr.
The default behavior recursively traverses the AST
and reconstructs the AST.
"""
def visit_function(self, fn):
new_params = [self.visit(x) for x in fn.params]
new_body = self.visit(fn.body)
return FunctionWithFields(
fn,
list(new_params),
new_body,
)
def visit_let(self, let):
new_var = self.visit(let.var)
new_val = self.visit(let.value)
new_body = self.visit(let.body)
return Let(new_var, new_val, new_body)
def visit_call(self, call):
new_fn = self.visit(call.op)
new_args = [self.visit(arg) for arg in call.args]
return Call(new_fn, new_args, call.attrs, call.type_args, call.span)
def visit_var(self, var):
return var
def visit_global_id(self, global_var):
return global_var
def visit_if(self, ite):
return If(self.visit(ite.cond), self.visit(ite.true_branch), self.visit(ite.false_branch))
def visit_tuple(self, tup):
return Tuple([self.visit(field) for field in tup.fields], tup.span)
def visit_tuple_getitem(self, op):
tuple_value = self.visit(op.tuple_value)
if not tuple_value.same_as(op.tuple_value):
return TupleGetItem(tuple_value, op.index)
return op
def visit_global_var(self, gvar):
return gvar
def visit_op(self, op):
return op
def visit_constant(self, const):
return const
def visit_constructor(self, con):
return con
def visit_match(self, m):
return Match(
self.visit(m.data),
[Clause(c.lhs, self.visit(c.rhs)) for c in m.clauses],
complete=m.complete,
)
def visit_ref_create(self, r):
return RefCreate(self.visit(r.value))
def visit_ref_write(self, r):
return RefWrite(self.visit(r.ref), self.visit(r.value))
def visit_ref_read(self, r):
return RefRead(self.visit(r.ref))
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Frontends for constructing Relay programs.
Contains the model importers currently defined
for Relay.
"""
from .mxnet import from_mxnet
from .mxnet_qnn_op_utils import quantize_conv_bias_mkldnn_from_var
from .keras import from_keras
from .oneflow import from_oneflow
from .onnx import from_onnx
from .tflite import from_tflite
from .coreml import from_coreml
from .caffe2 import from_caffe2
from .tensorflow import from_tensorflow
from .darknet import from_darknet
from .pytorch import from_pytorch
from .caffe import from_caffe
from .paddlepaddle import from_paddle
from .change_datatype import ChangeDatatype
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/caffe.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
# pylint: disable=no-else-return, no-else-continue
"""Caffe frontend."""
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_caffe"]
class OperatorConverter(object):
"""Operator Converted for converting Caffe ops to Relay ops"""
def __init__(self, init_layer_dict, predict_layer, exp_tab):
self.init_layer_dict = init_layer_dict
self.predict_layer = predict_layer
self.exp_tab = exp_tab
self.new_bn = {}
self.changed_layers = None
self.convert_map = {
"BatchNorm": self.convert_batch_norm,
"Concat": self.convert_concat,
"Convolution": self.convert_conv,
"Crop": self.convert_crop,
"Deconvolution": self.convert_deconv,
"Dropout": self.convert_dropout,
"Eltwise": self.convert_eltwise,
"Embed": self.convert_embed,
"Flatten": self.convert_flatten,
"InnerProduct": self.convert_innerproduct,
"Input": None,
"LRN": self.convert_lrn,
"Permute": self.convert_permute,
"Pooling": self.convert_pooling,
"Power": self.convert_power,
"PReLU": self.convert_prelu,
"ReLU": self.convert_relu,
"Reshape": self.convert_reshape,
"Scale": self.convert_scale,
"Sigmoid": self.convert_sigmoid,
"Slice": self.convert_slice,
"Softmax": self.convert_softmax,
"TanH": self.convert_tanh,
"Reduction": self.convert_reduction,
}
def convert_flatten(self, op):
"""Convert Flatten layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
flatten_params = op.flatten_param.axis
assert flatten_params == 1, "flatten axis should be 1"
out = _op.nn.batch_flatten(in_expr)
return out
def convert_eltwise(self, op):
"""Convert Eltwise layer"""
inputs = op.bottom
assert len(inputs) >= 2, "input tensors length should be larger than 2"
# gethering initial 2 input expressions
lhs_expr = self.exp_tab.get_expr(inputs[0])
rhs_expr = self.exp_tab.get_expr(inputs[1])
lhs_shape = _infer_shape(lhs_expr)
rhs_shape = _infer_shape(rhs_expr)
assert lhs_shape == rhs_shape, "input tensors shape should be equal"
eltwise_params = op.eltwise_param
eltwise_type_dict = ["PROD", "SUM", "MAX"]
eltwise_type = eltwise_params.operation
coeff = list(eltwise_params.coeff)
if eltwise_type_dict[eltwise_type] == "PROD":
out = _op.multiply(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.multiply(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "SUM":
if coeff:
left_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[0], np.float32))
right_coeff_expr = self.exp_tab.new_const(np.asarray(coeff[1], np.float32))
lhs_expr_scale = _op.multiply(lhs_expr, left_coeff_expr)
rhs_expr_scale = _op.multiply(rhs_expr, right_coeff_expr)
out = _op.add(lhs_expr_scale, rhs_expr_scale)
else:
out = _op.add(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
if coeff:
coeff_expr = self.exp_tab.new_const(np.asarray(coeff[i + 2], np.float32))
extra_expr_scale = _op.multiply(extra_expr, coeff_expr)
out = _op.add(out, extra_expr_scale)
else:
out = _op.add(out, extra_expr)
elif eltwise_type_dict[eltwise_type] == "MAX":
out = _op.maximum(lhs_expr, rhs_expr)
# for rest inputs
for i in range(len(inputs) - 2):
extra_expr = self.exp_tab.get_expr(inputs[i + 2])
assert _infer_shape(out) == _infer_shape(extra_expr)
out = _op.maximum(out, extra_expr)
else:
raise tvm.error.OpNotImplemented(
"eltwise_type {} is not supported for frontend Caffe.".format(eltwise_type)
)
return out
def _parse_conv_params(self, op):
"""Parse the parameters of Convolution and Deconvolution layer"""
nonzone = lambda val, pos, dflt: val[pos] if pos < len(val) else dflt
conv_params = op.convolution_param
params = dict()
# parse kernel size
if conv_params.kernel_h > 0 or conv_params.kernel_w > 0:
params["kernel_size"] = (conv_params.kernel_h, conv_params.kernel_w)
else:
ksize_h = nonzone(conv_params.kernel_size, 0, 1)
ksize_w = nonzone(conv_params.kernel_size, 1, ksize_h)
params["kernel_size"] = (ksize_h, ksize_w)
# parse padding size
if conv_params.pad_h > 0 or conv_params.pad_w > 0:
params["padding"] = (conv_params.pad_h, conv_params.pad_w)
else:
pad_h = nonzone(conv_params.pad, 0, 0)
pad_w = nonzone(conv_params.pad, 1, pad_h)
params["padding"] = (pad_h, pad_w)
# parse stride size
if conv_params.stride_h > 0 or conv_params.stride_w > 0:
params["strides"] = (conv_params.stride_h, conv_params.stride_w)
else:
stride_h = nonzone(conv_params.stride, 0, 1)
stride_w = nonzone(conv_params.stride, 1, stride_h)
params["strides"] = (stride_h, stride_w)
# parse dilation size
if hasattr(conv_params, "dilation") and len(conv_params.dilation) > 0:
dilation = " ".join(str(d) for d in conv_params.dilation)
dilation = tuple(map(int, dilation.split(" ")))
params["dilation"] = dilation
if len(dilation) == 1:
params["dilation"] = (dilation[0], dilation[0])
params["kernel_layout"] = "OIHW"
params["data_layout"] = "NCHW"
params["groups"] = conv_params.group
params["channels"] = conv_params.num_output
return params
def convert_batch_norm(self, op):
"""Convert BatchNorm layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
n, c, h, w = _infer_shape(in_expr)
if op.name in self.new_bn:
mean, var, eps, gamma, beta = self.new_bn[op.name]
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var_expr = self.exp_tab.new_const(var, dtype="float32")
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=eps, scale=True
)
else:
weight_bias_blobs = self.init_layer_dict[op.name].blobs
mean = np.asarray(weight_bias_blobs[0].data, np.float32)
var = np.asarray(weight_bias_blobs[1].data, np.float32)
if len(weight_bias_blobs) == 2:
mean = np.repeat(mean, h * w).reshape((c, h, w))
mean = np.expand_dims(mean, 0).repeat(n, axis=0)
mean_expr = self.exp_tab.new_const(mean, dtype="float32")
var = np.repeat(var, h * w).reshape((c, h, w))
var = np.expand_dims(var, 0).repeat(n, axis=0)
var_expr = self.exp_tab.new_const(var, dtype="float32")
tmp_out = _op.multiply(in_expr, mean_expr)
out = _op.add(tmp_out, var_expr)
return out
else:
scale = np.asarray(weight_bias_blobs[2].data, np.float32)
if scale:
scale = 1 / scale
mean_expr = self.exp_tab.new_const(mean * scale, dtype="float32")
var_expr = self.exp_tab.new_const(var * scale, dtype="float32")
# caffe bn layer not support scale
gamma_expr = self.exp_tab.new_const(
np.ones(mean.shape, dtype=np.float32), dtype="float32"
)
beta_expr = self.exp_tab.new_const(
np.zeros(mean.shape, dtype=np.float32), dtype="float32"
)
bn_params = op.batch_norm_param.eps
out = _op.nn.batch_norm(
in_expr, gamma_expr, beta_expr, mean_expr, var_expr, epsilon=bn_params, scale=False
)
return out[0]
def convert_scale(self, op):
"""Convert Scale layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
weight_bias_blobs = self.init_layer_dict[op.name].blobs
params = dict()
params["bias"] = op.scale_param.bias_term
params["axis"] = op.scale_param.axis
gamma = np.asarray(weight_bias_blobs[0].data, np.float32)
gamma_expr = self.exp_tab.new_const(gamma, dtype="float32")
if params["bias"]:
beta = np.asarray(weight_bias_blobs[1].data, np.float32)
beta_expr = self.exp_tab.new_const(beta, dtype="float32")
else:
beta_expr = self.exp_tab.new_const(
np.zeros(gamma.shape, dtype=np.float32), dtype="float32"
)
_, c, _, _ = _infer_shape(in_expr)
gamma_expr = _op.reshape(gamma_expr, newshape=(1, c, 1, 1))
beta_expr = _op.reshape(beta_expr, newshape=(1, c, 1, 1))
out = _op.multiply(in_expr, gamma_expr)
out = _op.add(out, beta_expr)
return out
def convert_concat(self, op):
"""Convert Concat layer"""
inputs = op.bottom
in_expr = (self.exp_tab.get_expr(inputs[i]) for i in range(len(inputs)))
c_params = dict()
c_params["axis"] = op.concat_param.axis
out = _op.concatenate(in_expr, axis=c_params["axis"])
return out
def convert_reshape(self, op):
"""Convert Reshape layer"""
inputs = op.bottom
input_name = inputs[0]
reshape_param = op.reshape_param
dims = list(reshape_param.shape.dim)
in_expr = self.exp_tab.get_expr(input_name)
input_shape = list(_infer_shape(in_expr))
start_axis = int(reshape_param.axis)
if start_axis < 0:
start_axis = len(input_shape) + start_axis + 1
num_axes = int(reshape_param.num_axes)
end_axis = len(input_shape)
if num_axes != -1:
end_axis = start_axis + num_axes
left_shape = input_shape[:start_axis]
if end_axis == len(input_shape):
center_shape = input_shape[start_axis:]
right_shape = []
else:
center_shape = input_shape[start_axis:end_axis]
right_shape = input_shape[end_axis:]
for idx, dim in enumerate(dims):
if dim == 0:
dims[idx] = center_shape[idx]
tmp = np.random.rand(*center_shape)
tmp = np.reshape(tmp, dims)
center_shape = list(tmp.shape)
newshape = left_shape + center_shape + right_shape
out = _op.reshape(in_expr, newshape=newshape)
return out
def convert_softmax(self, op):
"""Convert Softmax layer"""
inputs = op.bottom
assert len(inputs) == 1, "input tensors length should be 1"
input_name = inputs[0]
in_expr = self.exp_tab.get_expr(input_name)
softmax_param = op.softmax_param
parmas = {"axis": softmax_param.axis}
out = _op.nn.softmax(in_expr, **parmas)
return out
def convert_conv(self, op):
"""Convert Convolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [conv_params.num_output, -1, kh, kw]
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.nn.conv2d(data=in_expr, weight=weight_expr, **params)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr)
return out
def convert_pooling(self, op):
"""Convert Pooling layer"""
inputs = op.bottom
input_name = inputs[0]
pool_params = op.pooling_param
pool_type_dict = ["MAX", "AVE", "STOCHASTIC"]
params = dict()
# parse pool type: 0: MAX, 1: AVE, 2: STOCHASTIC
pool_type = pool_params.pool
# parse kernel size
if pool_params.kernel_h > 0 or pool_params.kernel_w > 0:
params["pool_size"] = (pool_params.kernel_h, pool_params.kernel_w)
else:
params["pool_size"] = (pool_params.kernel_size, pool_params.kernel_size)
# parse padding size
if pool_params.pad_h > 0 or pool_params.pad_w > 0:
params["padding"] = (pool_params.pad_h, pool_params.pad_w)
else:
params["padding"] = (pool_params.pad, pool_params.pad)
# parse stride size
if pool_params.stride_h > 0 or pool_params.stride_w > 0:
params["strides"] = (pool_params.stride_h, pool_params.stride_w)
else:
params["strides"] = (pool_params.stride, pool_params.stride)
params["ceil_mode"] = True
if hasattr(pool_params, "round_mode"):
params["ceil_mode"] = pool_params.round_mode == "CEIL"
in_expr = self.exp_tab.get_expr(input_name)
if pool_type_dict[pool_type] == "MAX":
if pool_params.global_pooling:
out = _op.nn.global_max_pool2d(in_expr)
else:
if len(op.top) == 1:
out = _op.nn.max_pool2d(in_expr, **params)
elif len(op.top) == 2:
out1 = _op.nn.max_pool2d_with_argmax(in_expr, **params)
out2 = _op.vision.max_pool2d_location(in_expr, **params)
return _expr.Tuple((out1, out2))
elif pool_type_dict[pool_type] == "AVE": # AVE
if pool_params.global_pooling:
out = _op.nn.global_avg_pool2d(in_expr)
else:
params["count_include_pad"] = True
out = _op.nn.avg_pool2d(in_expr, **params)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Caffe.".format(
pool_type_dict[pool_type] + " pool"
)
)
return out
def convert_lrn(self, op):
"""Convert LRN layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
lrn_params = op.lrn_param
params["size"] = lrn_params.local_size
params["bias"] = lrn_params.k
params["alpha"] = lrn_params.alpha
params["beta"] = lrn_params.beta
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.lrn(in_expr, **params)
return out
def convert_innerproduct(self, op):
"""Convert InnerProduct layer"""
inputs = op.bottom
weight_bias_blobs = self.init_layer_dict[op.name].blobs
dense_params = op.inner_product_param
params = dict()
params["num_output"] = dense_params.num_output
params["bias"] = dense_params.bias_term
params["axis"] = dense_params.axis
if params["axis"] != 1:
raise Exception("Only support 2D InnerProduct")
# process weight and bias blobs
weight, bias = None, None
if params["bias"]:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, (params["num_output"], -1))
weight_shape = weight_value.shape
else:
raise Exception("No weight value of layer {} in caffemodel".format(op.name))
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
in_reshape = _op.reshape(data=in_expr, newshape=(-1, weight_shape[-1]))
out = _op.nn.dense(data=in_reshape, weight=weight_expr)
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.nn.bias_add(out, bias_expr, axis=params["axis"])
return out
def convert_dropout(self, op):
"""Convert Dropout layer"""
inputs = op.bottom
input_name = inputs[0]
params = dict()
dropout_params = op.dropout_param
params["rate"] = dropout_params.dropout_ratio
in_expr = self.exp_tab.get_expr(input_name)
out = _op.nn.dropout(in_expr, **params)
return out
def convert_relu(self, op):
"""Convert ReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
negative_slope = op.relu_param.negative_slope
if negative_slope:
out = _op.nn.leaky_relu(in_expr, negative_slope)
return out
out = _op.nn.relu(in_expr)
return out
def convert_prelu(self, op):
"""Convert PReLU layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
alpha = self.init_layer_dict[op.name].blobs[0].data
alpha = np.asarray(alpha, np.float32)
alpha = self.exp_tab.new_const(alpha, dtype="float32")
axis = 1
out = _op.nn.prelu(in_expr, alpha, axis=axis)
return out
def convert_deconv(self, op):
"""Convert Deconvolution layer"""
params = self._parse_conv_params(op)
weight_bias_blobs = self.init_layer_dict[op.name].blobs
conv_params = op.convolution_param
inputs = op.bottom
# process weight and bias blobs
weight, bias = None, None
if len(weight_bias_blobs) > 1:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
else:
weight = weight_bias_blobs[0]
if weight:
kh, kw = params["kernel_size"]
weight_shape = [-1, conv_params.num_output, kh, kw]
if not weight.data:
if conv_params.weight_filler:
_filler = conv_params.weight_filler.value
weight_value = np.full(weight.shape.dim, _filler, np.float32)
else:
raise tvm.error.OpAttributeInvalid("At least weight_filler must be given")
else:
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, weight_shape)
# weight shape is in relay's IOHW format rn, we need it to be OIHW
weight_value = np.transpose(weight_value, [1, 0, 2, 3])
else:
raise tvm.error.OpAttributeRequired(
"No weight value of layer {} in caffemodel".format(op.name)
)
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
groups = params["groups"]
channels = params["channels"]
if bias:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
if groups > channels:
raise tvm.error.OpAttributeInvalid(
"Groups cannot be larger than the number of input channels"
)
if groups == channels:
inputs_expr = _op.split(in_expr, groups, axis=1)
# changing split axis to 0, according to PR #9336
weights_expr = _op.split(weight_expr, groups, axis=0)
# Preventing to create Concat layer with too many tensors(> 16)
q = groups >> 4
r = groups % 16
params["groups"] = 1
params["channels"] = 1
out = []
for lc in range(q):
_outputs = []
_inputs = [inputs_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
_weights = [weights_expr[i] for i in range(lc << 4, (lc << 4) + 16)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
if r != 0:
_outputs = []
_inputs = [inputs_expr[i] for i in range(groups - r, groups)]
_weights = [weights_expr[i] for i in range(groups - r, groups)]
for (i, w) in zip(_inputs, _weights):
_out = _op.nn.conv2d_transpose(data=i, weight=w, **params)
if bias:
_out = _op.nn.bias_add(_out, bias_expr)
_outputs.append(_out)
out.append(_op.concatenate(_outputs, axis=1))
out = _op.concatenate(out, axis=1)
elif groups == 1:
out = _op.nn.conv2d_transpose(data=in_expr, weight=weight_expr, **params)
if bias:
out = _op.nn.bias_add(out, bias_expr)
else:
raise tvm.error.OpAttributeInvalid("Unable to handle.")
return out
def convert_slice(self, op):
"""Convert Slice layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
output_num = len(op.top)
slice_params = op.slice_param
axis = int(slice_params.axis)
indices_or_sections = list([int(s) for s in slice_params.slice_point])
if len(indices_or_sections) == 0:
indices_or_sections = output_num
else:
indices_or_sections = sorted(indices_or_sections)
out = _op.split(in_expr, indices_or_sections=indices_or_sections, axis=axis)
return out
def convert_sigmoid(self, op):
"""Convert Sigmoid layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.sigmoid(in_expr)
return out
def convert_tanh(self, op):
"""Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out
def convert_reduction(self, op):
"""Convert Reduction layer"""
reduction_dic = ["NOP", "SUM", "ASUM", "SUMSQ", "MEAN"]
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
method = op.reduction_param.operation
axis = op.reduction_param.axis
coeff = op.reduction_param.coeff
coeff_expr = self.exp_tab.new_const(np.asarray(coeff, np.float32))
num_axes = len(_infer_shape(in_expr))
# Currently, only reduction along ALL "tail" axes is supported in Caffe;
# reduction of axis M through N, where N < num_axes - 1, is unsupported.
if 0 < axis < (num_axes - 1):
for _axis in reversed(range(axis + 1, num_axes)):
in_expr = _op.sum(in_expr, axis=_axis)
in_expr = _op.squeeze(in_expr)
if reduction_dic[method] == "SUM":
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "MEAN":
out = _op.mean(in_expr, axis=axis)
elif reduction_dic[method] == "ASUM":
in_expr = _op.abs(in_expr)
out = _op.sum(in_expr, axis=axis)
elif reduction_dic[method] == "SUMSQ":
in_expr = _op.multiply(in_expr, in_expr)
out = _op.sum(in_expr, axis=axis)
else:
raise tvm.error.OpAttributeInvalid(
"reduction method:{} is invalid in Caffe frontend.".format(method)
)
if float(coeff) != 1.0:
out = _op.multiply(out, coeff_expr)
return out
def convert_crop(self, op):
"""Convert Crop layer"""
inputs = op.bottom
assert len(inputs) == 2, "Need two inputs of Crop layer"
in_expr_a = self.exp_tab.get_expr(inputs[0])
in_expr_b = self.exp_tab.get_expr(inputs[1])
# parse crop params
crop_params = op.crop_param
axis = int(getattr(crop_params, "axis", 2))
offset = list(getattr(crop_params, "offset", 0))
# expand offset to (offset1, offset2, ...)
in_a_shape = _infer_shape(in_expr_a)
num_to_crop = len(in_a_shape) - axis
if not offset:
offset = [0] * num_to_crop
if len(offset) == 1:
offset = offset * num_to_crop
elif len(offset) != num_to_crop:
raise Exception("No matching the number between axis and offset!")
slice_end = in_a_shape
slice_start = [0] * len(in_a_shape)
for i in range(num_to_crop):
slice_start[i + axis] = offset[i]
to_crop_axis = list(range(len(in_a_shape)))
to_crop_axis = to_crop_axis[axis:]
# secondly, crop in_expr_a by in_expr_b
in_expr_a_stride = _op.strided_slice(in_expr_a, slice_start, slice_end)
out = _op.slice_like(in_expr_a_stride, in_expr_b, axes=to_crop_axis)
return out
def convert_permute(self, op):
"""Convert Permute layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
# parse permute params
permute_param = op.permute_param
axes = list(getattr(permute_param, "order", 0))
out = _op.transpose(in_expr, axes)
return out
def convert_embed(self, op):
"""Convert Embed layer"""
inputs = op.bottom
embed_param = op.embed_param
num_output = embed_param.num_output
input_dim = embed_param.input_dim
bias_term = embed_param.bias_term
weight_bias_blobs = self.init_layer_dict[op.name].blobs
weight, bias = None, None
if bias_term:
weight = weight_bias_blobs[0]
bias = weight_bias_blobs[1]
assert weight and bias
else:
weight = weight_bias_blobs[0]
assert weight
weight_value = np.asarray(weight.data, np.float32)
weight_value = np.reshape(weight_value, [input_dim, num_output])
weight_expr = self.exp_tab.new_const(weight_value, dtype="float32")
in_expr = self.exp_tab.get_expr(inputs[0])
input_shape = _infer_shape(in_expr)
input_count = 1
for dim in input_shape:
input_count *= dim
index = _op.cast(in_expr, "int32")
out = _op.take(weight_expr, index, axis=0)
if bias_term:
bias_value = np.asarray(bias.data, np.float32)
bias_expr = self.exp_tab.new_const(bias_value, dtype="float32")
out = _op.reshape(out, [input_count, num_output])
out = _op.add(out, bias_expr)
out_shape = list(input_shape)
out_shape.append(num_output)
out = _op.reshape(out, out_shape)
return out
def convert_power(self, op):
"""Convert Power layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
power = _expr.const(op.power_param.power)
scale = _expr.const(op.power_param.scale)
shift = _expr.const(op.power_param.shift)
out = _op.multiply(in_expr, scale)
out = _op.add(out, shift)
out = _op.power(out, power)
return out
def check_unsupported_ops(self):
"""Check unsupported Caffe ops in our converter."""
unsupported_ops_set = set()
include_layer = dict()
for pl in self.predict_layer:
if pl.type not in include_layer:
include_layer[pl.type] = 1
else:
include_layer[pl.type] = include_layer[pl.type] + 1
for pl in self.predict_layer:
op_name = pl.type
if op_name not in self.convert_map:
unsupported_ops_set.add(op_name)
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "Caffe: {}"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise tvm.error.OpNotImplemented(msg.format(ops))
def fuse_op(self, layers):
"""Fusing the BatchNorm and Scale layer"""
bn, scale = layers["bn"], layers["scale"]
# bn params
bn_weight_bias_blobs = self.init_layer_dict[bn.name].blobs
bn_scale = np.asarray(bn_weight_bias_blobs[2].data, np.float32)
if bn_scale:
bn_scale = 1 / bn_scale
bn_mean = np.asarray(bn_weight_bias_blobs[0].data, np.float32) * bn_scale
bn_var = np.asarray(bn_weight_bias_blobs[1].data, np.float32) * bn_scale
bn_eps = bn.batch_norm_param.eps
# scale params
scale_weight_bias_blobs = self.init_layer_dict[scale.name].blobs
scale_gamma = np.asarray(scale_weight_bias_blobs[0].data, np.float32)
scale_bias = scale.scale_param.bias_term
if scale_bias:
scale_beta = np.asarray(scale_weight_bias_blobs[1].data, np.float32)
else:
scale_beta = np.zeros(scale_gamma.shape, dtype=np.float32)
# new params
self.new_bn[bn.name] = [bn_mean, bn_var, bn_eps, scale_gamma, scale_beta]
return bn
def op_fuse(self):
"""fuse bn and scale"""
new_layers = []
temp_layers = {}
changed_layers = {}
for index, pl in enumerate(self.predict_layer):
op_type = pl.type
if op_type == "Input":
new_layers.append(pl)
continue
elif op_type == "BatchNorm":
if (index != len(self.predict_layer) - 1) and (
self.predict_layer[index + 1].type == "Scale"
):
temp_layers["bn"] = pl
continue
else:
new_layers.append(pl)
temp_layers.clear()
elif op_type == "Scale":
if self.predict_layer[index - 1].type == "BatchNorm":
temp_layers["scale"] = pl
else:
new_layers.append(pl)
temp_layers.clear()
else:
temp_layers.clear()
if len(temp_layers) == 2:
layer = self.fuse_op(temp_layers)
new_layers.append(layer)
changed_layers[temp_layers["scale"].name] = temp_layers["bn"].name
for idx, plt in enumerate(pl.bottom):
if plt in changed_layers:
pl.bottom[idx] = changed_layers[plt]
if op_type not in ["BatchNorm", "Scale"]:
new_layers.append(pl)
self.predict_layer = new_layers
self.changed_layers = changed_layers
def convert_op_to_relay(self):
"""Convert Caffe ops to relay ops"""
for pl in self.predict_layer:
op_type = pl.type
if op_type == "Input":
continue
output_tensors = pl.top
ret = self.convert_map[op_type](pl)
if len(output_tensors) == 1:
self.exp_tab.set_expr(output_tensors[0], ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(output_tensor, ret[idx])
def _rebuild_layers(predict_layer):
"""Rebuild caffe layer. If the caffe net include in-place layers, repalce its top
with its name and update the bottom of other layer that is related to it.
"""
# dict of input name that will be changed to new name
changed_top_dict = dict()
for pl in predict_layer:
if pl.type == "Input":
continue
# if current layer has single input and output and input equals to output
# it means that the layer does "in-place"
if len(pl.top) == 1 and len(pl.bottom) == 1:
if pl.top[0] == pl.bottom[0]:
# change current layer's input firstly
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# update "change" dict
changed_top_dict[pl.top[0]] = pl.name
# change current layer's output to its name
pl.top[0] = pl.name
else:
if pl.bottom[0] in changed_top_dict:
pl.bottom[0] = changed_top_dict[pl.bottom[0]]
# if the layer does not
else:
for index, plt in enumerate(pl.bottom):
if plt in changed_top_dict:
pl.bottom[index] = changed_top_dict[plt]
def _get_inputs_outputs(predict_layer):
"""Obtain Caffe model's inputs and outpus"""
# model inputs / outputs
model_inputs = list()
model_outputs = list()
# The bottoms of every layer can not be as outputs
not_outputs = set()
for pl in predict_layer:
if pl.type == "Input":
assert len(pl.top) == 1, "The number of Input layer's output is more than 1."
model_inputs.append(pl.top[0])
for i in pl.bottom:
not_outputs.add(i)
for pl in predict_layer:
if len(pl.bottom) > 0:
for t in pl.top:
if t not in not_outputs:
model_outputs.append(t)
return model_inputs, model_outputs
def from_caffe(init_net, predict_net, shape_dict, dtype_dict):
"""Convert from caffe model into compatible relay Function.
Parameters
----------
init_net : caffe_pb2.NetParameter
caffemodel
predict_net : caffe_pb2.NetParameter
caffe prototxt
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.NDArray
The parameter dict to be used by relay
"""
old_caffe = False
if len(predict_net.input) != 0: # old caffe version
old_caffe = True
model_inputs = list(predict_net.input)
predict_layer = predict_net.layer
# replace layer's top with its name and update other layers'bottoms
_rebuild_layers(predict_layer)
# obtain inputs and outputs of Net
if old_caffe:
_, model_outputs = _get_inputs_outputs(predict_layer)
else:
model_inputs, model_outputs = _get_inputs_outputs(predict_layer)
exp_tab = ExprTable()
for in_name in model_inputs:
shape = shape_dict[in_name] if in_name in shape_dict else None
dtype = dtype_dict[in_name] if in_name in dtype_dict else "float32"
exp_tab.set_expr(in_name, _expr.var(in_name, shape=shape, dtype=dtype))
if list(init_net.layer):
init_layer = init_net.layer
else:
init_layer = init_net.layers
init_layer_dict = {il.name: il for il in init_layer}
# op code in model
op_converter = OperatorConverter(init_layer_dict, predict_layer, exp_tab)
op_converter.check_unsupported_ops()
op_converter.op_fuse()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = list()
for n in model_outputs:
if n in op_converter.changed_layers:
n = op_converter.changed_layers[n]
outputs.append(exp_tab.get_expr(n))
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
mod = IRModule.from_expr(func)
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/caffe2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, line-too-long, unused-argument
"""Caffe2 frontend"""
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_channels
__all__ = ["from_caffe2"]
def dimension_picker(prefix, surfix=""):
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 2:
return prefix + "2d" + surfix
raise tvm.error.OpAttributeUnImplemented(
"Non-2D kernels are not supported for operator {}2d".format(prefix)
)
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid("Number of pads must equal 2 or 4.")
return pads
def dimension_constraint():
def _dim_check(args):
if len(args["kernel_shape"]) == 2:
return True
return False
return _dim_check, "Only 2d kernel supported."
def _clean_up_pool_args(args):
"""A helper function to clean up common arguments in conv and pooling ops."""
assert isinstance(args, dict)
if "stride_h" in args and "stride_w" in args:
assert "stride" not in args and "strides" not in args
args["strides"] = [args["stride_h"], args["stride_w"]]
args.pop("stride_h")
args.pop("stride_w")
elif "stride" in args:
args["strides"] = [args["stride"], args["stride"]]
args.pop("stride")
# rename 'kernel', 'kernels', to 'kernel_shape'
if "kernel_h" in args and "kernel_w" in args:
assert "kernel" not in args and "kernels" not in args
args["kernel_shape"] = [args["kernel_h"], args["kernel_w"]]
args.pop("kernel_h")
args.pop("kernel_w")
elif "kernel" in args:
args["kernel_shape"] = [args["kernel"], args["kernel"]]
args.pop("kernel")
elif "kernels" in args:
args["kernel_shape"] = args["kernels"]
args.pop("kernels")
if "pad_t" in args and "pad_l" in args and "pad_b" in args and "pad_r" in args:
assert "pad" not in args and "pads" not in args
args["pads"] = [args["pad_t"], args["pad_l"], args["pad_b"], args["pad_r"]]
for pad in ["pad_t", "pad_l", "pad_b", "pad_r"]:
args.pop(pad)
elif "pad" in args:
args["pads"] = [args["pad"], args["pad"]]
args.pop("pad")
if "dilation_h" in args and "dilation_w" in args:
assert "dilation" not in args and "dilations" not in args
args["dilations"] = [args["dilation_h"], args["dilation_w"]]
args.pop("dilation_h")
args.pop("dilation_w")
elif "dilation" in args:
args["dilations"] = [args["dilation"], args["dilation"]]
args.pop("dilation")
return args
class Caffe2OpConverter(object):
"""A helper class for holding Caffe2 op converters."""
@classmethod
def get_converter(cls):
"""Get converter.
:return: converter, which should be `_impl`.
"""
if hasattr(cls, "_impl"):
return getattr(cls, "_impl")
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend Caffe2.".format(cls.__name__)
)
_caffe2_internal_args = [
# nnpack args
"algo",
"convolution_transform_strategy",
"float16_compute",
"shared_buffer",
# training args
"init_params",
"cudnn_exhaustive_search",
"exhaustive_search",
# training args
"adj",
"hwgq",
# args that we don't care
"legacy_pad",
]
class Elemwise(Caffe2OpConverter):
"""A helper class for elemwise op converters."""
name = ""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 2, "Math op take 2 inputs, {} given".format(len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if args.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(args.get("axis", 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Add(Elemwise):
"""Operator converter for Add."""
name = "add"
class Mul(Elemwise):
"""Operator converter for Mul."""
name = "multiply"
class Pool(Caffe2OpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl(cls, inputs, args, params):
_clean_up_pool_args(args)
if "global_pooling" in args and args["global_pooling"] == 1:
op_name = dimension_picker("global_" + cls.name)
return get_relay_op(op_name(args))(*inputs)
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
"kernel_shape": "pool_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"strides": "strides",
},
ignores=["dilations", "order", "legacy_pad", "global_pooling"],
extras={"ceil_mode": False},
custom_check=dimension_constraint(),
)(inputs, args, params)
class AveragePool(Pool):
name = "avg_pool"
class MaxPool(Pool):
name = "max_pool"
class Conv(Caffe2OpConverter):
"""Operator converter for Conv."""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1])
args["channels"] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker("conv"),
transforms={
"group": ("groups", 1),
"kernel_shape": "kernel_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"strides": "strides",
"dilations": ("dilation", (1, 1)),
"order": (
"data_layout",
("NCHW"),
lambda x: x if isinstance(x, str) else x.decode("UTF-8"),
),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint(),
)(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(Caffe2OpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl(cls, inputs, args, params):
# get number of channels
channels = infer_channels(inputs[1], True)
args["channels"] = channels
_clean_up_pool_args(args)
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"pads": ("padding", (0, 0), revert_caffe2_pad),
"dilations": ("dilation", (1, 1)),
"order": (
"data_layout",
("NCHW"),
lambda x: x if isinstance(x, str) else x.decode("UTF-8"),
),
},
excludes=[],
ignores=_caffe2_internal_args,
custom_check=dimension_constraint(),
)(inputs[:2], args, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Concat(Caffe2OpConverter):
"""Operator converter for Concat."""
@classmethod
def _impl(cls, inputs, args, params):
def _get_axis_from_order_str(order):
order = order if isinstance(order, str) else order.decode("UTF-8")
if order == "NCHW":
return 1
if order == "NHWC":
return 3
raise tvm.error.OpAttributeUnImplemented(
"Order {} is not supported in operator Concat.".format(order)
)
return AttrCvt(
op_name="concatenate",
transforms={
"order": ("axis", (1), _get_axis_from_order_str),
},
excludes=["add_axis"],
)((inputs,), args, params)
class NormalizePlanarYUV(Caffe2OpConverter):
"""Operator converter for NormalizePlanarYUV.
caffe2 definition: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/norm_planar_yuv_op.cc
"""
@classmethod
def _impl(cls, inputs, args, params):
assert len(inputs) == 3
mean = _op.expand_dims(inputs[1], axis=2, num_newaxis=2)
std = _op.expand_dims(inputs[2], axis=2, num_newaxis=2)
return _op.divide(_op.subtract(inputs[0], mean), std)
class ResizeNearest(Caffe2OpConverter):
"""Operator converter for Upsample (nearest mode)."""
@classmethod
def _impl(cls, inputs, args, params):
width_scale = args["width_scale"] if "width_scale" in args else 1
height_scale = args["height_scale"] if "height_scale" in args else 1
assert width_scale == height_scale
return _op.nn.upsampling(
inputs[0], scale_h=int(width_scale), scale_w=int(width_scale), method="NEAREST_NEIGHBOR"
)
class Sum(Caffe2OpConverter):
"""Operator converter for Sum."""
@classmethod
def _impl(cls, inputs, args, params):
# Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Softmax(Caffe2OpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl(cls, inputs, args, params):
# set default value when axis is not set in the model
if "axis" not in args:
args["axis"] = 1
return AttrCvt("softmax", transforms={"axis": ("axis", args["axis"])})(inputs, args, params)
class FC(Caffe2OpConverter):
"""Operator converter for FC."""
@classmethod
def _impl(cls, inputs, args, params):
inputs[0] = _op.nn.batch_flatten(inputs[0])
units = infer_channels(inputs[1])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
use_bias = len(inputs) == 3
if use_bias:
res = _op.nn.bias_add(res, inputs[2])
return res
class SpatialBN(Caffe2OpConverter):
"""Operator converter for SpatialBN."""
@classmethod
def _impl(cls, inputs, args, params):
return AttrCvt(
op_name="batch_norm",
disables=["momentum"],
ignores=["order", "spatial", "is_test", "consumed_inputs", "num_batches"],
)(inputs, args, params)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
# Minimal set of ops for squeezenet and resnet50
def _get_convert_map():
return {
# caffe2 common operators
"Add": Add.get_converter(),
"Sum": Sum.get_converter(),
"Mul": Mul.get_converter(),
"Softmax": Softmax.get_converter(),
# nn
"AveragePool": AveragePool.get_converter(),
"MaxPool": MaxPool.get_converter(),
"Conv": Conv.get_converter(),
"ConvTranspose": ConvTranspose.get_converter(),
"Concat": Concat.get_converter(),
"FC": FC.get_converter(),
"SpatialBN": SpatialBN.get_converter(),
"ResizeNearest": ResizeNearest.get_converter(),
"Relu": AttrCvt("relu", {}, ignores=["order"]),
"Sigmoid": Renamer("sigmoid"),
"Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]),
# c2 image preprocessing ops
"NormalizePlanarYUV": NormalizePlanarYUV.get_converter(),
}
class Caffe2NetDef(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/pytorch/pytorch/blob/master/caffe2/proto/caffe2.proto
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._visited_nodes = set()
self._ops = {}
self._shape = shape
self._dtype = dtype
self._mod = IRModule({})
def from_caffe2(self, init_net, predict_net):
"""Construct Relay expression from caffe2 graph.
Parameters
----------
init_net : protobuf object
predict_net : protobuf object
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# pylint: disable=import-outside-toplevel
from caffe2.python import workspace
workspace.RunNetOnce(init_net)
# Input
input_name = predict_net.op[0].input[0]
# Params
self._params = {}
used_blobs = set()
for c2_op in predict_net.op:
for i in c2_op.input:
used_blobs.add(i)
for blob in workspace.Blobs():
if blob in used_blobs and blob != input_name:
self._params[blob] = _nd.array(workspace.FetchBlob(blob))
# Variables
self._nodes = {}
for blob in predict_net.external_input:
if blob in self._params:
self._nodes[blob] = new_var(
blob, shape=self._params[blob].shape, dtype=self._params[blob].dtype
)
else:
shape = self._shape[blob] if blob in self._shape else ()
if isinstance(self._dtype, dict) and blob in self._dtype:
dtype = str(self._dtype[blob])
elif isinstance(self._dtype, str):
dtype = self._dtype
else:
dtype = "float32"
self._nodes[blob] = new_var(blob, shape=shape, dtype=dtype)
# Ops
for c2_op in predict_net.op:
for blob in c2_op.output:
self._ops[blob] = c2_op
for c2_op in predict_net.op:
self._process_op(c2_op)
# Outputs
out = []
for blob in predict_net.external_output:
out.append(self._nodes[blob])
if len(out) > 1:
outputs = _expr.Tuple(out)
else:
outputs = out[0]
func = _function.Function(analysis.free_vars(outputs), outputs)
self._mod["main"] = func
return self._mod, self._params
def _get_node(self, blob):
"""Get the Symbol of blob and detect cyclic dependency in the graph."""
if blob in self._nodes:
return self._nodes[blob]
assert blob not in self._visited_nodes, "Cyclic dependency in the graph (in {})".format(
blob
)
self._visited_nodes.add(blob)
self._process_op(self._ops[blob])
return self._nodes[blob]
def _process_op(self, c2_op):
op_type = c2_op.type
args = self._parse_arg(c2_op.arg)
inputs = [self._get_node(i) for i in c2_op.input]
tvm_op = self._convert_operator(op_type, inputs, args)
if not isinstance(tvm_op, _expr.TupleWrapper):
self._nodes[c2_op.output[0]] = tvm_op
else:
for k, i in zip(list(c2_op.output), range(len(tvm_op))):
self._nodes[k] = tvm_op[i]
def _parse_arg(self, arg):
"""Convert a list of Argument to a dict, with names as keys."""
args = {}
for a in arg:
for f in ["f", "i", "s"]:
if a.HasField(f):
args[a.name] = getattr(a, f)
for f in ["floats", "ints", "strings"]:
if list(getattr(a, f)):
assert a.name not in args, "Only one type of attr is allowed"
args[a.name] = tuple(getattr(a, f))
for f in ["n"]:
if a.HasField(f):
raise NotImplementedError("Field {} is not supported in relay.".format(f))
for f in ["nets"]:
if list(getattr(a, f)):
raise NotImplementedError("Field {} is not supported in relay.".format(f))
if a.name not in args:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return args
def _convert_operator(self, op_type, inputs, args, identity_list=None, convert_map=None):
"""Convert from Caffe2 operator to Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_type : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of input inputs.
args : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to relay, callable are functions which
take args and return (new_op_type, new_args)
Returns
-------
func : tvm.relay.function.Function
Converted relay function
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _get_convert_map()
if op_type in identity_list:
func = get_relay_op(op_type)(*inputs, **args)
elif op_type in convert_map:
# Add a sanitizing step to convert all byte strings in args to strings
func = convert_map[op_type](inputs, args, self._params)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend Caffe2.".format(op_type)
)
return func
def from_caffe2(init_net, predict_net, shape=None, dtype="float32"):
"""Load caffe2 graph which contains init_net and predict_net into Relay Function.
Parameters
----------
init_net : protobuf object
Caffe2 NetDef containing the weights
predict_net : protobuf object
Caffe2 NetDef containing the graph
shape : dict of str to tuple
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.nd.NDArray
Dict of converted parameters stored in tvm.nd.NDArray format
"""
caffe2 = Caffe2NetDef(shape, dtype)
return caffe2.from_caffe2(init_net, predict_net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/change_datatype.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Change Datatype Pass"""
from ..function import Function
from ..expr_functor import ExprMutator
from ..transform.transform import function_pass
from ..expr import var, bind
@function_pass(opt_level=0)
class ChangeDatatype(ExprMutator):
"""Mutator for changing the datatype of Relay programs.
This pass should be useful for users of the Bring Your Own Datatypes
framework.
TODO(@gussmith23 @hypercubestart) Add link to documentation when it exists
Example:
.. code-block:: python
from tvm.relay.testing.inception_v3 import get_workload
mod, params = get_workload()
def change_dtype(mod, params, src, dst):
mod = ChangeDatatype(src, dst)(mod)
params = dict((p, tvm.nd.array(params[p].numpy().astype(dst))) for p in params)
return mod, params
mod, params = change_dtype(mod, params, "float32", "custom[posites2]32")
Parameters
----------
src : String
The source datatype name, e.g. "float" or "posites2" (but not "float32"
or "custom[posites2]32").
dst : String
The destination datatype name, in the same format.
Returns
-------
mod : tvm.IRModule
Module where all nodes of dtype `src` have been changed to have dtype
`dst`.
"""
def __init__(self, src, dst):
self.src = src
self.dst = dst
super().__init__()
def transform_function(self, func, mod, ctx):
return self.visit(func)
def visit_constant(self, const):
if const.data.dtype == self.src:
return const.astype(self.dst)
return const
def visit_function(self, fn):
new_params = []
binds = {}
for param in fn.params:
# Get the parameter's type annotation.
var_type = param.type_annotation
# See if we want to replace dtype.
if var_type.dtype == self.src:
dtype = self.dst
else:
dtype = var_type.dtype
# Generate new variable.
new_param = var(param.name_hint, shape=var_type.shape, dtype=dtype)
new_params.append(new_param)
binds[param] = new_param
new_body = self.visit(fn.body)
# Rewrite the body to use new parameters.
new_body = bind(new_body, binds)
# Construct the updated function and return.
return Function(
new_params,
new_body,
# You could change the return type, if you use None it will re-infer.
None,
type_params=fn.type_params,
attrs=fn.attrs,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=broad-except
"""Common utilities"""
from __future__ import absolute_import as _abs
import logging
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import expr as _expr
from .. import function as _function
from .. import transform as _transform
from .. import op as _op
from .. import ty as _ty
from .. import analysis
class DuplicateFilter:
"""A log filter that only prints the same message once."""
def __init__(self):
self.msgs = set()
def filter(self, record):
self.msgs.add(record.msg)
return record.msg not in self.msgs
# pylint: disable=invalid-name
logger = logging.getLogger("Frontend")
logger.addFilter(DuplicateFilter())
# Uncomment below line to print all debug msgs
# logger.setLevel(logging.DEBUG)
class RequiredAttr(object):
"""Dummpy class to represent required attr"""
class StrAttrsDict(object):
"""Helper class to parse attrs stored as Dict[str, str].
Parameters
----------
attrs : Dict[str, str]
The attributes to be used.
"""
def __init__(self, attrs):
self.attrs = attrs
def has_attr(self, key):
"""Checks if a attribute is present in the map.
Parameters
----------
key : str
The attribute key
Returns
-------
bool : True if the key is present in the attributes else false.
"""
return key in self.attrs
def get_float(self, key, default=RequiredAttr()):
"""Get float attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
return float(self.attrs[key])
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_int(self, key, default=RequiredAttr()):
"""Get int attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
val = self.attrs[key]
if val == "None":
return None
return int(val)
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_str(self, key, default=RequiredAttr()):
"""Get str attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
return self.attrs[key]
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_int_tuple(self, key, default=RequiredAttr()):
"""Get int tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(
int(x) if x.strip("- ").isdigit() else None
for x in tshape.strip("()[]").split(",")
if x
)
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_float_tuple(self, key, default=RequiredAttr()):
"""Get float tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(float(x.strip()) for x in tshape.strip("()[]").split(","))
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_tuple_tuple_int(self, key, default=RequiredAttr()):
"""Get int list attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
value = self.attrs[key]
seq = []
for tup in value.strip("()").split("),"):
tup = tup.strip("[]()")
els = [int(x.strip("( ")) for x in tup.split(",")]
seq.append(tuple(els))
return tuple(seq)
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_int_list(self, key, default=RequiredAttr()):
"""Get int list attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
tshape = self.attrs[key]
return tuple(int(x.strip()) for x in tshape.strip("[]()").split(","))
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_bool(self, key, default=RequiredAttr()):
"""Get bool tuple attribute
Parameters
----------
key : str
The attribute key
default : float
The default value.
Returns
-------
value : The result
"""
if key in self.attrs:
val = self.attrs[key]
return val.strip().lower() in ["true", "1", "t", "y", "yes"]
if isinstance(default, RequiredAttr):
raise AttributeError("Required attribute {} not found.".format(key))
return default
def get_relay_op(op_name):
"""Get the callable function from Relay based on operator name.
Parameters
----------
op_name : str
The Relay operator name.
"""
if "." in op_name:
# explicit hierarchical modules
op = _op
try:
for opn in op_name.split("."):
op = getattr(op, opn)
except AttributeError:
op = None
else:
# try search op in various modules
for candidate in (_op, _op.nn, _op.image, _op.vision, _op.contrib):
op = getattr(candidate, op_name, None)
if op is not None:
break
if not op:
raise tvm.error.OpNotImplemented("Unable to map op_name {} to relay".format(op_name))
return op
class ExprTable(object):
"""Table storing Relay expressions by names."""
def __init__(self):
self.exprs = {}
self.params = {}
self.const_ctr = 1
self.in_padding = False
def new_const(self, value, shape=None, dtype="float32"):
name = "_param_%d" % (self.const_ctr)
if hasattr(value, "shape"):
shape = value.shape
self.const_ctr += 1
self.params[name] = value
self.exprs[name] = _expr.var(name_hint=name, shape=shape, dtype=dtype)
return self.exprs[name]
def get_expr(self, name):
return self.exprs[name]
def set_expr(self, name, expr, force_override=False):
assert isinstance(expr, _expr.Expr)
# if name exists, we should override the value
# otherwise, we can not get like x = func(x) work.
# One example is CoreML preprocess, which will override
# the same name of input.
# However, according to git log, Find keras frontend depends
# on this property, so we add one force_override to control it.
if name not in self.exprs or force_override:
self.exprs[name] = expr
def has_expr(self, name):
return name in self.exprs
def set_padding(self, paddings):
self.paddings = paddings
self.in_padding = True
def clear_padding(self):
self.in_padding = False
class AttrCvt(object):
"""Common attribute converter. An AttrConverter instance is a callable:
```
attr_converter = AttrConverter(op_name, transforms={'a':'b', 'c':('d', 1)})
new_op_name, new_attr = attr_converter(attrs)
```
Parameters
----------
op_name : str or callable
If set as str, returned operator name is the str.
If set as callable, returned operator is the str returned by calling:
`op_name = func(attr)`
transforms : dict of `new_name, or (new_name, default_value, transform function)`
If only a new_name is provided, it's like renaming the attribute name.
If default_value if provided, then the attribute is considered as optional.
If transform function is provided, the original attribute value is handled
by transform function.
excludes : list
A list of excluded attributes that should `NOT` appear.
Raise NotImplementedError if occurred.
disables : list
A list of attributes that is disabled in relay. Log warnings.
ignores : list
A list of attributes that is ignored in relay. Debug level logging.
extras : dict
A series of additional attributes should be added anyway to the returned
attribute dict.
custom_check : callable
A custom function takes attribute, and return True/False.
Raise RuntimeError if not bool(True) returned.
"""
def __init__(
self,
op_name,
transforms=None,
excludes=None,
disables=None,
ignores=None,
extras=None,
custom_check=None,
):
self._op_name = op_name
self._transforms = transforms if transforms else {}
self._excludes = excludes if excludes else []
self._disables = disables if disables else []
self._ignores = ignores if ignores else []
self._extras = extras if extras else {}
self._custom_check = custom_check
def __call__(self, inputs, attrs, *args):
self._ignores.append("_output_shapes")
self._ignores.append("_input_shapes")
self._ignores.append("T")
self._ignores.append("use_cudnn_on_gpu")
self._ignores.append("_node_name")
self._ignores.append("is_training")
self._ignores.append("_target_layout")
# apply custom check
if self._custom_check:
func, msg = self._custom_check
if not func(attrs):
raise RuntimeError("Check failed: {}".format(msg))
# get new op_name
if isinstance(self._op_name, str):
op_name = self._op_name
else:
assert callable(self._op_name), "op_name can either be string or callable"
op_name = self._op_name(attrs)
# ignore 'tvm_custom' always
self._ignores.append("tvm_custom")
# convert attributes
new_attrs = {}
for k in attrs.keys():
if k in self._excludes:
raise NotImplementedError(
"Attribute %s in operator %s is not" + " supported.", k, op_name
)
if k in self._disables:
logger.debug("Attribute %s is disabled in relay.sym.%s", k, op_name)
elif k in self._ignores:
if k != "tvm_custom":
logger.debug("Attribute %s is ignored in relay.sym.%s", k, op_name)
elif k in self._transforms:
new_name, defaults, transform = self._parse_default(self._transforms[k])
if defaults is None:
new_attr = self._required_attr(attrs, k)
else:
new_attr = attrs.get(k, None)
if new_attr is None:
new_attrs[new_name] = defaults
else:
new_attrs[new_name] = transform(new_attr)
else:
# copy
new_attrs[k] = attrs[k]
# add extras
new_attrs.update(self._extras)
return get_relay_op(op_name)(*inputs, **new_attrs)
def _parse_default(self, target):
"""Helper function to parse default values."""
if not isinstance(target, (list, tuple)):
k, v, t = target, None, lambda x: x
elif len(target) == 1:
k, v, t = target[0], None, lambda x: x
elif len(target) == 2:
k, v, t = target[0], target[1], lambda x: x
elif len(target) > 2:
k, v, t = target[0], target[1], target[2]
else:
k = None # should raise
if not isinstance(k, str):
msg = "{} is not a valid target, (name, default) expected.".format(target)
raise ValueError(msg)
return k, v, t
def _parse_bool(self, value):
"""Helper function to parse default boolean values."""
if isinstance(value, str):
return value.strip().lower() in ["true", "1", "t", "y", "yes"]
return bool(value)
def _required_attr(self, attr, key):
"""Wrapper for getting required attributes."""
assert isinstance(attr, dict)
if key not in attr:
raise AttributeError("Required attribute {} not found.".format(key))
return attr[key]
def get_name(node):
name = ""
if hasattr(node, "name_hint"):
name = node.name_hint
return name
def infer_type(node, mod=None):
"""A method to infer the type of an intermediate node in the relay graph."""
if isinstance(mod, IRModule):
mod["main"] = _function.Function(tvm.relay.analysis.free_vars(node), node)
mod = _transform.InferType()(mod)
entry = mod["main"]
ret = entry.body
else:
new_mod = IRModule.from_expr(node)
if mod is not None:
new_mod.update(mod)
new_mod = _transform.InferType()(new_mod)
entry = new_mod["main"]
ret = entry if isinstance(node, _function.Function) else entry.body
return ret
def fold_constant(node, mod=None):
if mod is None:
mod = IRModule()
return _transform.FoldConstantExpr(node, mod)
def infer_channels(inputs, transpose=False):
"""A hack for getting 'channels' or 'units' since caffe2 does not provide
these attributes. We check the shape of weights provided to get the number.
"""
out_type = infer_type(inputs)
out_shapes = [get_const_tuple(out_type.checked_type.shape)]
channels = out_shapes[0][0] if not transpose else out_shapes[0][1]
return channels
def infer_shape(inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
out_type = infer_type(inputs, mod=mod)
checked_type = out_type.checked_type
if hasattr(checked_type, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(checked_type.shape)
# The return type is not a tensor, for example List
return checked_type
def infer_value(input_val, params, mod=None):
"""A hack for getting the value of an expression by evaluating a
portion of the relay graph. This is often needed for functions that
whose output shape depends on the value of a tensor.
"""
# Check that all free variables have associated parameters.
assert all(
var.name_hint in params.keys() for var in analysis.free_vars(input_val)
), "All inputs to infer must be available in params."
assert tvm.runtime.enabled("llvm"), "LLVM must be enabled to infer value."
try:
# TODO(kevinthesun): Use VM for all cases.
# pylint: disable=import-outside-toplevel
from tvm.contrib import graph_executor
func = _function.Function(analysis.free_vars(input_val), input_val)
with tvm.transform.PassContext(opt_level=0):
lib = tvm.relay.build(func, target="llvm", params=params)
dev = tvm.cpu(0)
m = graph_executor.GraphModule(lib["default"](dev))
m.run()
return m.get_output(0)
except Exception:
if isinstance(mod, IRModule):
mod["main"] = _function.Function(analysis.free_vars(input_val), input_val)
else:
mod = IRModule.from_expr(input_val)
inputs = []
for param in mod["main"].params:
inputs.append(params[param.name_hint])
result = tvm.relay.create_executor(
"debug", mod=mod, device=tvm.cpu(), target="llvm"
).evaluate()(*inputs)
return result
def infer_value_simulated(input_val, params):
"""Extension to infer_value that can be used when some input
values are missing. This function creates dummy inputs with the same
shape and random values then calls infer_value. This is helpful when
implementing certain onnx operators where we need to evaluate the graph
to determine a static shape.
"""
fake_params = []
# Add a fake copy of all missing params.
for free_param in analysis.free_vars(input_val):
if free_param.name_hint not in params:
fp_dtype = free_param.type_annotation.dtype
fp_shape = [s.value for s in free_param.type_annotation.shape]
fake_params.append(free_param)
params[free_param.name_hint] = tvm.nd.array(np.random.rand(*fp_shape).astype(fp_dtype))
# Now infer the value.
output_value = infer_value(input_val, params)
# Clean fake params out of param dictionary.
for fake_p in fake_params:
params.pop(fake_p.name_hint, None)
return output_value
def try_infer_value(val, on_success=None, on_failure=None, parameters=None):
"""Try running infer_value on the input val, and if successful, return the inferred value or
pass it to on_success callback if provided. Otherwise, run on_failure callback if it is
provided, or return the input val as output. In each case, the second return value
indicates whether infer_value has succeeded or not.
"""
try:
params = parameters if parameters is not None else {}
ret = infer_value(val, params).numpy()
if on_success:
return on_success(ret), True
return ret, True
except Exception:
if on_failure:
return on_failure(), False
return val, False
def shape_of(x, dtype="int64", start=None, end=None):
"""Get shape of a tensor."""
ttype = infer_type(x).checked_type
if not _ty.is_dynamic(ttype):
shape = list(ttype.shape)
start = start or 0 # default to first
end = end or len(shape) # default to last
shape_sliced = shape[start:end]
return _expr.const(shape_sliced, dtype)
return _op.shape_of(x, dtype)
def new_var(name_hint, type_annotation=None, shape=None, dtype="float32"):
return _expr.var(name_hint, type_annotation, shape, dtype)
class Renamer(object):
"""A simply renamer for operators.
Parameters
----------
new_name : str
The new name for the operator
"""
def __init__(self, new_name):
self._new_name = new_name
def __call__(self, inputs, attrs, *args):
if "tvm_custom" in attrs:
attrs.pop("tvm_custom")
return get_relay_op(self._new_name)(*inputs, **attrs)
def to_int_list(np_array):
"""Convert a np array to a python int list.
Note: This function converts np.int32 to python's int.
If we don't do this conversion, numpy's automatic upcast will make
the shape / parameters be converted to int64 IntImm in relay and
cause problems in relay/TOPI.
"""
return [int(x) for x in np_array]
def unbind(data, axis=0):
"""
Unbind was taken from Pytorch frontend. The operation removes a tensor dimension
and returns a tuple of all slices along a given dimension, with specified axis removed.
TODO (vvchernov): It needs such operation on relay side to reduce time consumption
on squeeze operation.
Parameters
----------
data : relay.Expr
Input tensor
axis : int
Axis along which tensor is split.
Returns
-------
result : List[relay.Expr]
The sequence of computed tensors
"""
shape = infer_shape(data)
if axis >= len(shape):
msg = "Please check input dim, it shouldn't be greater than or equal to rank."
raise AttributeError(msg)
selections = shape[axis]
res_split = _op.split(data, selections, axis)
ret = []
for i in range(selections):
ret.append(_op.squeeze(res_split[i], axis=[axis]))
return _expr.TupleWrapper(_expr.Tuple(ret), selections)
def rnn_cell(
input_seqs, hidden_state, w_inp, w_hid, b_inp=None, b_hid=None, backwards=False, act=_op.tanh
):
"""
Common implementation of RNN cell for all frontends of TVM
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch_size, hidden_size)
w_inp, w_hid: relay.Expr
weight matrices. shape = (hidden_size, feature_size), (hidden_size, feature_size)
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (1 * hidden_size)
backwards : bool
Flag for reverse pass of RNN
act : relay.op
activation function. It is tanh by default.
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
for x_t in input_seqs if not backwards else reversed(input_seqs):
xwt = _op.nn.dense(x_t, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None and b_hid is not None:
xwt += b_inp
hwt += b_hid
hidden_state = act(xwt + hwt)
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
return outputs_list, hidden_state
def gru_cell(
input_seqs,
hidden_state,
w_inp,
w_hid,
b_inp=None,
b_hid=None,
rz_act=_op.sigmoid,
n_act=_op.tanh,
backwards=False,
linear_before_reset=True,
):
"""
Common implementation of GRU cell for all frontends of TVM
TODO(vvchernov): currently it is used by pytorch and ONNX. Extend for other frontends
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch_size, hidden_size)
w_inp, w_hid : relay.Expr
weight matrices. wi shape = (3 * hidden_size, feature_size)
wh shape = (3 * hidden_size, hidden_size)
NOTE: wi = (w_ir|w_iz|w_in) for reset, update and new gates.
The order is important for correct GRU calculation!
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (3 * hidden_size)
r_act : relay.op
activation function for reset gate. it is sigmoid by default
z_act : relay.op
activation function for update gate. it is sigmoid by default
n_act : relay.op
activation function for new gate. it is tanh by default
backwards : bool
Flag for reverse pass of GRU
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
for x_t in input_seqs if not backwards else reversed(input_seqs):
xwt = _op.nn.dense(x_t, w_inp)
if linear_before_reset:
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None and b_hid is not None:
xwt += b_inp
hwt += b_hid
i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)
h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)
r_gate = rz_act(i_r + h_r)
z_gate = rz_act(i_z + h_z)
n_gate = n_act(i_n + r_gate * h_n)
else:
i_r, i_z, i_n = _op.split(xwt, 3, axis=1)
w_hr, w_hz, w_hn = _op.split(w_hid, 3, axis=0)
r_gate = i_r + _op.nn.dense(hidden_state, w_hr)
z_gate = i_z + _op.nn.dense(hidden_state, w_hz)
if b_inp is not None and b_hid is not None:
b_ir, b_iz, b_in = _op.split(b_inp, 3, axis=-1)
b_hr, b_hz, b_hn = _op.split(b_hid, 3, axis=-1)
r_gate += b_ir + b_hr
r_gate = rz_act(r_gate)
z_gate += b_iz + b_hz
i_n += b_in
h_n = _op.nn.dense((r_gate * hidden_state), w_hn) + b_hn
else:
r_gate = rz_act(r_gate)
h_n = _op.nn.dense((r_gate * hidden_state), w_hn)
z_gate = rz_act(z_gate)
n_gate = n_act(i_n + h_n)
hidden_state = (hidden_state - n_gate) * z_gate + n_gate
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
return outputs_list, hidden_state
def lstm_cell(
input_seqs,
hidden_state,
cell_state,
w_inp,
w_hid,
b_inp=None,
b_hid=None,
proj=None,
p_i=None,
p_f=None,
p_o=None,
f_act=_op.sigmoid,
g_act=_op.tanh,
h_act=_op.tanh,
backwards=False,
):
"""
Common implementation of LSTM cell for all frontends of TVM
TODO (vvchernov): currently it is used by onnx and pytorch. Extend for other frontends
Parameters
----------
input_seqs : List[relay.Expr]
The sequence of input tensors
Input tensor should be 2d while issue #8412 is not resolved
Shape = (batch, feature_size)
hidden_state : relay.Expr
Hidden state. shape = (batch, hidden_size)
cell_state : relay.Expr
Cell state. shape = (batch, hidden_size)
w_inp, w_hid : relay.Expr
weight matrices. wi shape = (4 * hidden_size, feature_size)
wh shape = (4 * hidden_size, hidden_size or proj_size)
NOTE: wi = (w_ii|w_if|w_ig|w_io) for input, forget, cell and output gates.
The order is important for correct LSTM calculation!
b_inp, b_hid : relay.Expr
bias matrices. The same order of internal parts as for weights. shape = (4 * hidden_size)
proj : relay.Expr
projection matrix. shape = (proj_size, hidden_size)
p_i, p_f, p_o : relay.Expr
peephole LSTM matrices. shape = (batch, hidden_size)
f_act, g_act, h_act : relay.op
activation functions
backwards : bool
Flag for reverse pass of LSTM
Returns
-------
result : List[relay.Expr], relay.Expr, relay.Expr
The sequence of computed result, final hidden and cell state
"""
outputs_list = []
for x_t in input_seqs if not backwards else reversed(input_seqs):
# x_t shape = (batch, feature size), step shape = (batch, feature size + hidden_size)
step = _op.concatenate([x_t, hidden_state], axis=1)
cat_w = _op.concatenate([w_inp, w_hid], axis=1)
# Instead of nn.dense(x_t, w_inp) + nn.dense(hidden_state, w_hid)
# nn.dense(step, cat_w) is used
# gates shape = (batch, 4 * hidden_size)
gates = _op.nn.dense(step, cat_w)
# Add biases
if b_inp is not None:
gates += b_inp
if b_hid is not None:
gates += b_hid
# any gate shape = (batch, hidden_size)
inp_gate, fgt_gate, cell_gate, otp_gate = _op.split(gates, 4, axis=-1)
if p_i is not None and p_f is not None:
inp_gate = f_act(inp_gate + p_i * cell_state)
fgt_gate = f_act(fgt_gate + p_f * cell_state)
else:
inp_gate = f_act(inp_gate)
fgt_gate = f_act(fgt_gate)
cell_gate = g_act(cell_gate)
cell_state = fgt_gate * cell_state + inp_gate * cell_gate
if p_o is not None:
otp_gate = f_act(otp_gate + p_o * cell_state)
else:
otp_gate = f_act(otp_gate)
hidden_state = otp_gate * h_act(cell_state)
if proj is not None:
hidden_state = _op.nn.dense(hidden_state, proj)
outputs_list.append(hidden_state) # [seq_num, (batch, hidden_size)]
return outputs_list, hidden_state, cell_state
def autopad(
data,
strides,
kernel_shape,
dilations=(1, 1),
pad_type="constant",
deconv=False,
mode="SAME_UPPER",
pad_value=0.0,
):
"""
Perform autopadding with dynamic input shapes
"""
# get attributes as constants
strides = _op.const(np.array(strides), dtype="int64")
dilated_kernel_shape = _op.const(
np.array(
[(kernel - 1) * dilation + 1 for kernel, dilation in zip(kernel_shape, dilations)]
),
dtype="int64",
)
# get input shape
ndim = len(infer_shape(data))
shape = _op.strided_slice(shape_of(data, dtype="int64"), [2], [ndim])
# set up integer constants
zero = _op.const(0, dtype="int64")
one = _op.const(1, dtype="int64")
two = _op.const(2, dtype="int64")
# Calculate total padding
mod = _op.mod(shape, strides)
left = _op.maximum(dilated_kernel_shape - strides, zero)
right = _op.maximum(dilated_kernel_shape - mod, zero)
total_pad = _op.where(_op.equal(mod, zero), left, right)
if deconv:
total_pad = _op.const(np.array(kernel_shape), dtype="int64") - one - total_pad
# split total padding into before and after
pad_before = _op.floor_divide(total_pad, two)
pad_after = total_pad - pad_before
# combine
if "LOWER" in mode:
pad = _op.concatenate(
[_op.reshape(pad_after, [-1, 1]), _op.reshape(pad_before, [-1, 1])], axis=1
)
else:
pad = _op.concatenate(
[_op.reshape(pad_before, [-1, 1]), _op.reshape(pad_after, [-1, 1])], axis=1
)
# pad N and C with zeros
pad = _op.concatenate([_op.const(np.zeros([2, 2], dtype="int64"), dtype="int64"), pad], axis=0)
if isinstance(pad_value, (float, int)):
pad_value = _op.const(pad_value)
return _op.nn.pad(data, fold_constant(pad), pad_value, pad_type)
def ensure_scalar_shape(x):
"""
Assume that `x` is a tensor with one element (regardless of tensor rank).
Return a version of that tensor with rank 0.
"""
x_shape = infer_shape(x)
x_rank = len(x_shape)
if x_rank == 0:
return x
num_elem = np.prod(x_shape)
assert num_elem == 1, "Cannot squeeze tensor shape {} to scalar form.".format(x_shape)
return _op.squeeze(x)
def try_resolve_var_to_const(x, graph_params):
"""
Try to resolve the value of tensor `x` to a specific value.
If successful, return a Const op with that value.
If unsuccessful, simply return `x`.
"""
if isinstance(x, _expr.Var) and x.name_hint in graph_params:
value = graph_params[x.name_hint].numpy()
dtype = infer_type(x).checked_type.dtype
return _op.const(value, dtype)
return x
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, unused-argument, unused-variable, no-else-return
# pylint: disable=inconsistent-return-statements, import-outside-toplevel
"""CoreML frontend."""
import math
import numpy as np
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from ..._ffi import base as _base
from .common import ExprTable
from .common import infer_shape as _infer_shape
__all__ = ["from_coreml"]
def _NeuralNetworkImageScaler(op, inexpr, etab):
# TODO: we need to support more colorspace, such as rgb.
# this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
bias = etab.new_const(biases)
ret = _op.multiply(inexpr, _expr.const(op.channelScale, dtype="float32"))
ret = _op.add(ret, bias)
return ret
def _NeuralNetworkMeanImage(op, inexpr, etab):
# this changes the symbol
ret = _op.subtract(inexpr, _expr.const(op.meanImage, dtype="float32"))
return ret
def _ConvolutionLayerParams(op, inexpr, etab):
"""Convolution layer params."""
if op.isDeconvolution:
weights = etab.new_const(
np.array(list(op.weights.floatValue)).reshape(
tuple([op.kernelChannels, op.outputChannels] + list(op.kernelSize))
)
)
else:
weights = etab.new_const(
np.array(list(op.weights.floatValue)).reshape(
tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))
)
)
dilation = list(op.dilationFactor)
if not dilation:
dilation = [1, 1]
N, C, H, W = _infer_shape(inexpr)
params = {
"channels": op.outputChannels,
"kernel_size": list(op.kernelSize),
"strides": list(op.stride),
"dilation": dilation,
"groups": op.nGroups,
}
if op.WhichOneof("ConvolutionPaddingType") == "valid":
valid = op.valid
if valid.paddingAmounts.borderAmounts:
assert len(valid.paddingAmounts.borderAmounts) == 2
pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)):
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
elif op.WhichOneof("ConvolutionPaddingType") == "same":
assert op.same.asymmetryMode == 0, (
"Only support BOTTOM_RIGHT_HEAVY mode, " "which is used by tf/caffe and so on"
)
kernel = params["kernel_size"]
strides = params["strides"]
pad_t, pad_b = get_pad_value(H, kernel[0], strides[0])
pad_l, pad_r = get_pad_value(W, kernel[1], strides[1])
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
raise NotImplementedError("Valid/Same convolution padding implemented")
if op.isDeconvolution:
ret = _op.nn.conv2d_transpose(data=inexpr, weight=weights, **params)
else:
ret = _op.nn.conv2d(data=inexpr, weight=weights, **params)
if op.hasBias:
biases = etab.new_const(list(op.bias.floatValue))
ret = _op.nn.bias_add(ret, biases)
return ret
def _BatchnormLayerParams(op, inexpr, etab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise tvm.error.OpNotImplemented(
'Operator "instance normalization" is not supported in frontend CoreML.'
)
params = {
"gamma": etab.new_const(list(op.gamma.floatValue)),
"beta": etab.new_const(list(op.beta.floatValue)),
"moving_mean": etab.new_const(list(op.mean.floatValue)),
"moving_var": etab.new_const(list(op.variance.floatValue)),
"epsilon": op.epsilon,
}
result, moving_mean, moving_var = _op.nn.batch_norm(data=inexpr, **params)
return result
def _ActivationParams(op, inexpr, etab):
"""Get activation parameters"""
whichActivation = op.WhichOneof("NonlinearityType")
par = getattr(op, whichActivation)
if whichActivation == "linear":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
return _op.add(_op.multiply(inexpr, alpha), beta)
if whichActivation == "ReLU":
return _op.nn.relu(inexpr)
if whichActivation == "leakyReLU":
return _op.nn.leaky_relu(inexpr, alpha=par.alpha)
elif whichActivation == "thresholdedReLU":
alpha_tensor = _op.full_like(inexpr, fill_value=_expr.const(par.alpha, dtype="float32"))
return _op.multiply(inexpr, _op.greater(inexpr, alpha_tensor).as_type("float32"))
if whichActivation == "PReLU":
return _op.nn.prelu(inexpr, alpha=_expr.const(par.alpha, dtype="float32"))
if whichActivation == "tanh":
return _op.tanh(inexpr)
if whichActivation == "scaledTanh":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
return _op.multiply(_op.tanh(_op.multiply(inexpr, beta)), alpha)
if whichActivation == "sigmoid":
return _op.sigmoid(inexpr)
if whichActivation == "sigmoidHard":
alpha = _expr.const(par.alpha, dtype="float32")
beta = _expr.const(par.beta, dtype="float32")
transformX = (alpha * inexpr) + beta
return _op.clip(transformX, a_min=0.0, a_max=1.0)
if whichActivation == "ELU":
return _op.multiply(
_op.add(_op.exp(inexpr), _expr.const(-1, dtype="float32")),
_expr.const(par.alpha, dtype="float32"),
)
if whichActivation == "softsign":
return inexpr / (
_expr.const(1, dtype="float32")
+ (op.nn.relu(inexpr) + _op.nn.relu(_op.negative(inexpr)))
)
if whichActivation == "softplus":
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1, dtype="float32")))
if whichActivation == "parametricSoftplus":
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
return _op.multiply(
_op.log(_op.add(_op.exp(inexpr), _expr.const(beta[0], dtype="float32"))),
_expr.const(alpha[0], dtype="float32"),
)
alpha = np.array(alpha).reshape((len(alpha), 1, 1))
beta = np.array(beta).reshape((len(beta), 1, 1))
alpha_expr = etab.new_const(alpha)
beta_expr = etab.new_const(beta)
return _op.multiply(_op.log(_op.add(_op.exp(inexpr), beta_expr)), alpha_expr)
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend CoreML.".format(whichActivation)
)
def _ScaleLayerParams(op, inexpr, etab):
"""Scale layer params."""
scale = etab.new_const(
np.array(list(op.scale.floatValue)).reshape(tuple(list(op.shapeScale) + [1, 1]))
)
ret = _op.multiply(inexpr, scale)
if op.hasBias:
bias = etab.new_const(
np.array(list(op.bias.floatValue)).reshape(tuple(list(op.shapeBias) + [1, 1]))
)
ret = _op.add(ret, bias)
return ret
def _PoolingLayerParams(op, inexpr, etab):
"""get pooling parameters"""
if op.globalPooling:
if op.type == 0:
return _op.nn.global_max_pool2d(inexpr)
if op.type == 1:
return _op.nn.global_avg_pool2d(inexpr)
raise tvm.error.OpNotImplemented(
"Only Max and Average Pooling are supported in frontend CoreML."
)
params = {"pool_size": list(op.kernelSize), "strides": list(op.stride)}
if op.WhichOneof("PoolingPaddingType") == "valid":
valid = op.valid
if valid.paddingAmounts.borderAmounts:
assert len(valid.paddingAmounts.borderAmounts) == 2
pad_t = valid.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = valid.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = valid.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = valid.paddingAmounts.borderAmounts[1].endEdgeSize
if not all(v == 0 for v in (pad_t, pad_l, pad_b, pad_r)):
params["padding"] = [pad_t, pad_l, pad_b, pad_r]
elif op.WhichOneof("PoolingPaddingType") == "includeLastPixel":
# I don't know if this is correct
valid = op.includeLastPixel
padding = list(valid.paddingAmounts)
params["padding"] = padding
params["ceil_mode"] = True
else:
msg = "PoolingPaddingType {} is not supported in operator Pooling."
op_name = op.WhichOneof("PoolingPaddingType")
raise tvm.error.OpAttributeUnImplemented(msg.format(op_name))
if op.type == 0:
return _op.nn.max_pool2d(inexpr, **params)
if op.type == 1:
return _op.nn.avg_pool2d(inexpr, **params)
raise tvm.error.OpNotImplemented("Only Max and Average Pooling are supported in CoreML.")
def _SoftmaxLayerParams(op, inexpr, etab):
return _op.nn.softmax(_op.nn.batch_flatten(inexpr))
def _InnerProductLayerParams(op, inexpr, etab):
weights = etab.new_const(
np.array(op.weights.floatValue).reshape((op.outputChannels, op.inputChannels))
)
out = _op.nn.dense(data=inexpr, weight=weights, units=op.outputChannels)
if op.hasBias:
bias = etab.new_const(np.array(op.bias.floatValue))
out = _op.nn.bias_add(out, bias)
return out
def _AddLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
if op.alpha > 0:
ret = _op.add(ret, _expr.const(op.alpha, dtype="float32"))
return ret
def _MultiplyLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
ret = inexpr[0]
for i in range(1, len(inexpr)):
ret = _op.multiply(ret, inexpr[i])
if op.alpha != 1:
ret = _op.multiply(ret, _expr.const(op.alpha, dtype="float32"))
return ret
def _ConcatLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list):
inexpr = [inexpr]
if op.sequenceConcat:
raise tvm.error.OpNotImplemented(
"Operator Sequence Concat is not supported in frontend CoreML."
)
ret = _op.concatenate(inexpr, axis=1)
return ret
def _FlattenLayerParams(op, inexpr, etab):
if op.mode == 1:
inexpr = _op.transpose(_op.reshape(inexpr, newshape=(0, 0, -1)), axes=(0, 2, 1))
return _op.nn.batch_flatten(inexpr)
def _PaddingLayerParams(op, inexpr, etab):
"""Padding layer params."""
if op.WhichOneof("PaddingType") == "constant":
constant = op.constant
if constant.value != 0:
raise tvm.error.OpAttributeUnImplemented(
"{} is not supported in operator Padding.".format(constant.value)
)
pad_t = op.paddingAmounts.borderAmounts[0].startEdgeSize
pad_l = op.paddingAmounts.borderAmounts[1].startEdgeSize
pad_b = op.paddingAmounts.borderAmounts[0].endEdgeSize
pad_r = op.paddingAmounts.borderAmounts[1].endEdgeSize
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
raise tvm.error.OpNotImplemented("Non-constant padding is not supported in frontend CoreML.")
def _PermuteLayerParams(op, inexpr, etab):
axes = tuple(op.axis)
return _op.transpose(inexpr, axes=axes)
def _UpsampleLayerParams(op, inexpr, etab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise tvm.error.OpAttributeUnimplemented("Upsample height and width must be equal.")
interpolationMode = "nearest_neighbor" if op.mode == 0 else "bilinear"
return _op.nn.upsampling(
inexpr, scale_h=op.scalingFactor[0], scale_w=op.scalingFactor[1], method=interpolationMode
)
def _L2NormalizeLayerParams(op, inexpr, etab):
return _op.nn.l2_normalize(inexpr, eps=op.epsilon, axis=[1])
def _LRNLayerParams(op, inexpr, etab):
par = {}
par["size"] = op.localSize
par["bias"] = op.k
par["alpha"] = op.alpha
par["beta"] = op.beta
par["axis"] = 1 # default layout is nchw
return _op.nn.lrn(data=inexpr, **par)
def _AverageLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(inexpr)
_sum = inexpr[0]
for i in range(1, count):
_sum = _op.add(_sum, inexpr[i])
return _sum / _expr.const(count, dtype="float32")
def _MaxLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inexpr[0]
for i in range(1, len(inexpr)):
_max = _op.maximum(_max, inexpr[i])
return _max
def _MinLayerParams(op, inexpr, etab):
if not isinstance(inexpr, list) or len(inexpr) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inexpr[0]
for i in range(1, len(inexpr)):
_min = _op.minimum(_min, inexpr[i])
return _min
def _UnaryFunctionLayerParams(op, inexpr, etab):
op_type = op.type
if op_type == op.SQRT:
return _op.sqrt(inexpr)
elif op_type == op.RSQRT:
epsilon = _expr.const(op.epsilon)
return _op.rsqrt(inexpr + epsilon)
elif op_type == op.INVERSE:
epsilon = _expr.const(op.epsilon)
return _expr.const(1.0) / (inexpr + epsilon)
elif op_type == op.POWER:
alpha = _expr.const(op.alpha)
return _op.power(inexpr, alpha)
elif op_type == op.EXP:
return _op.exp(inexpr)
elif op_type == op.LOG:
return _op.log(inexpr)
elif op_type == op.ABS:
return _op.abs(inexpr)
elif op_type == op.THRESHOLD:
alpha = _expr.const(op.alpha)
return _op.maximum(inexpr, alpha)
else:
msg = "Unary Op type value {} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg.format(op_type))
def _ReduceLayerParams(op, inexpr, etab):
axis = op.axis
if axis == op.CHW:
axis = [-3, -2, -1]
elif axis == op.HW:
axis = [-2, -1]
elif axis == op.C:
axis = -3
elif axis == op.H:
axis = -2
elif axis == op.W:
axis = -1
else:
msg = "Reduce axis value {} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg.format(axis))
mode = op.mode
if mode == op.SUM:
return _op.sum(inexpr, axis=axis, keepdims=True)
elif mode == op.AVG:
return _op.mean(inexpr, axis=axis, keepdims=True)
elif mode == op.PROD:
return _op.prod(inexpr, axis=axis, keepdims=True)
elif mode == op.MIN:
return _op.min(inexpr, axis=axis, keepdims=True)
elif mode == op.MAX:
return _op.max(inexpr, axis=axis, keepdims=True)
elif mode == op.ARGMAX:
return _op.argmax(inexpr, axis=axis, keepdims=True)
else:
msg = "Reduce mode value {} is not supported in frontend CoreML."
raise tvm.error.OpAttributeUnImplemented(msg.format(mode))
def _ReshapeLayerParams(op, inexpr, etab):
return _op.reshape(inexpr, op.targetShape)
def _SplitLayerParams(op, inexpr, etab):
return _op.split(inexpr, op.nOutputs, axis=-3)
_convert_map = {
"NeuralNetworkMeanImage": _NeuralNetworkMeanImage,
"NeuralNetworkImageScaler": _NeuralNetworkImageScaler,
"ConvolutionLayerParams": _ConvolutionLayerParams,
"BatchnormLayerParams": _BatchnormLayerParams,
"ActivationParams": _ActivationParams,
"ScaleLayerParams": _ScaleLayerParams,
"PoolingLayerParams": _PoolingLayerParams,
"SoftmaxLayerParams": _SoftmaxLayerParams,
"InnerProductLayerParams": _InnerProductLayerParams,
"AddLayerParams": _AddLayerParams,
"MultiplyLayerParams": _MultiplyLayerParams,
"FlattenLayerParams": _FlattenLayerParams,
"ConcatLayerParams": _ConcatLayerParams,
"PaddingLayerParams": _PaddingLayerParams,
"PermuteLayerParams": _PermuteLayerParams,
"UpsampleLayerParams": _UpsampleLayerParams,
"L2NormalizeLayerParams": _L2NormalizeLayerParams,
"LRNLayerParams": _LRNLayerParams,
"AverageLayerParams": _AverageLayerParams,
"MaxLayerParams": _MaxLayerParams,
"MinLayerParams": _MinLayerParams,
"UnaryFunctionLayerParams": _UnaryFunctionLayerParams,
"ReduceLayerParams": _ReduceLayerParams,
"ReshapeLayerParams": _ReshapeLayerParams,
"SplitLayerParams": _SplitLayerParams,
}
# SAME padding: https://www.tensorflow.org/api_guides/python/nn
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding
Parameters
----------
data:
1D input data
kernel:
1D input kernel
stride:
1D input stride
Returns
-------
pad tuple of value
"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after
def coreml_op_to_relay(op, inname, outnames, etab):
"""Convert coreml layer to a Relay expression and update the expression table.
Parameters
----------
op: a coreml protobuf bit
inname : str or list of str
Name of the input Relay expression.
outnames : str or list of str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
"""
classname = type(op).__name__
if classname not in _convert_map:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend CoreML.".format(classname)
)
if isinstance(inname, _base.string_types):
insym = etab.get_expr(inname)
else:
insym = [etab.get_expr(i) for i in inname]
outs = _convert_map[classname](op, insym, etab)
if outnames:
if isinstance(outnames, _base.string_types) or len(outnames) == 1:
outname = outnames if isinstance(outnames, _base.string_types) else outnames[0]
etab.set_expr(outname, outs, force_override=True)
else:
# the number of outputs from model op and tvm relay must be same
assert len(outnames) == len(outs)
for outname, out in zip(outnames, outs):
etab.set_expr(outname, out, force_override=True)
def from_coreml(model, shape=None):
"""Convert from coreml model into Relay Function.
Parameters
----------
model:
coremltools.models.MLModel of a NeuralNetworkClassifier
shape : dict of str to int list/tuple, optional
The input shapes
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
try:
import coremltools as cm
except ImportError:
raise ImportError("The coremltools package must be installed")
assert isinstance(model, cm.models.MLModel)
spec = model.get_spec()
modeltype = spec.WhichOneof("Type")
assert modeltype in ["neuralNetworkClassifier", "neuralNetwork", "neuralNetworkRegressor"]
cc = getattr(spec, modeltype)
etab = ExprTable()
for i in spec.description.input:
input_shape = list(shape[i.name]) if shape is not None and i.name in shape else None
etab.set_expr(i.name, _expr.var(i.name, shape=input_shape))
for pp in cc.preprocessing:
whichpp = pp.WhichOneof("preprocessor")
ppmethod = getattr(pp, whichpp)
if whichpp == "scaler":
# Be careful we maybe only preprocess one input when we have multi inputs
# which is stored in pp.featureName. See unit testing verify_image_scaler
# in test_forward.py for CoreML.
for i in spec.description.input:
# we have multi inputs
if len(spec.description.input) > 1:
assert pp.featureName != ""
if i.name == pp.featureName:
coreml_op_to_relay(ppmethod, i.name, i.name, etab)
else:
assert pp.featureName == ""
coreml_op_to_relay(ppmethod, i.name, i.name, etab)
else:
coreml_op_to_relay(ppmethod, pp.featureName, pp.featureName, etab)
for l in cc.layers:
layertype = l.WhichOneof("layer")
layerop = getattr(l, layertype)
if len(l.input) == 1:
coreml_op_to_relay(layerop, l.input[0], l.output, etab)
else:
coreml_op_to_relay(layerop, list(l.input), l.output, etab)
outexpr = [
etab.get_expr(o.name) if o.name in etab.exprs else _expr.var(o.name)
for o in spec.description.output
]
# check there are multiple outputs in the model and all are there in etab
multi_out = all([bool(o.name in etab.exprs) for o in spec.description.output])
outexpr = _expr.Tuple(outexpr) if multi_out else outexpr[0]
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k: _nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/darknet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
DarkNet symbol frontend for Relay.
"""
from enum import Enum
import numpy as np
import tvm
from tvm.ir import IRModule
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .common import get_relay_op, new_var
__all__ = ["from_darknet"]
def _darknet_not_support(attr, op="relay"):
"""Raise error if any operation is not supported."""
err = "{} is not supported in {}.".format(attr, op)
raise NotImplementedError(err)
def _get_params_prefix(opname, layer_num):
"""Makes the params prefix name from opname and layer number."""
return str(opname).replace(".", "_") + str(layer_num)
def _get_params_name(prefix, item):
"""Makes the params name for the k,v pair."""
return prefix + "_" + item
def _get_param_var(params, prefix, item):
name = _get_params_name(prefix, item)
if name not in params:
raise AttributeError("{} not found in params dict.".format(name))
return new_var(name, shape=params[name].shape, dtype=params[name].dtype)
def _darknet_maxpooling(inputs, params, attrs, prefix):
"""Process the max pool 2d operation."""
new_attrs = {}
kernel = attrs.get("kernel")
strides = attrs.get("stride", 1)
pads = attrs.get("pad", 1)
new_attrs["pool_size"] = (kernel, kernel)
new_attrs["strides"] = (strides, strides)
new_attrs["padding"] = (pads, pads)
extra_pad_size = attrs.get("extra_pad_size", 0)
if extra_pad_size:
pad_width = ((0, 0), (0, 0), (0, extra_pad_size), (0, extra_pad_size))
inputs = [
get_relay_op("pad")(*inputs, pad_width=pad_width, pad_value=np.finfo(np.float32).min)
]
return get_relay_op("max_pool2d")(*inputs, **new_attrs)
def _darknet_avgpooling(inputs, params, attrs, prefix):
"""Process the average pool 2d operation."""
new_attrs = {}
kernel = attrs.get("kernel")
strides = attrs.get("stride", 1)
pads = attrs.get("pad", 0)
new_attrs["pool_size"] = (kernel, kernel)
new_attrs["strides"] = (strides, strides)
new_attrs["padding"] = (pads, pads)
return get_relay_op("avg_pool2d")(*inputs, **new_attrs)
def _darknet_conv2d(inputs, params, attrs, prefix):
"""Process the convolution 2d operation."""
new_attrs = {}
kernel = attrs.get("kernel")
strides = attrs.get("stride", 1)
pads = attrs.get("pad", 0)
new_attrs["channels"] = attrs.get("num_filter")
new_attrs["kernel_size"] = (kernel, kernel)
new_attrs["strides"] = (strides, strides)
new_attrs["padding"] = (pads, pads)
new_attrs["dilation"] = attrs.get("dilate", (1, 1))
new_attrs["groups"] = attrs.get("num_group", 1)
weight = _get_param_var(params, prefix, "weight")
out = get_relay_op("conv2d")(*inputs, weight=weight, **new_attrs)
use_bias = not attrs.get("use_batchNorm", False)
if use_bias:
new_attrs = {}
new_attrs["axis"] = 1
bias = _get_param_var(params, prefix, "bias")
out = get_relay_op("bias_add")(out, bias=bias, **new_attrs)
else:
new_attrs = {}
new_attrs["epsilon"] = 0.000001
gamma = _get_param_var(params, prefix, "gamma")
beta = _get_param_var(params, prefix, "beta")
moving_mean = _get_param_var(params, prefix, "moving_mean")
moving_var = _get_param_var(params, prefix, "moving_var")
out = get_relay_op("batch_norm")(out, gamma, beta, moving_mean, moving_var, **new_attrs)
if "activation" in attrs:
new_attrs = {}
new_attrs["activation"] = attrs["activation"]
new_attrs["slope"] = 0.1
out = _darknet_activations(out, None, new_attrs)
return out
def _darknet_shortcut(inputs, params, attrs, prefix):
"""Process the shortcut operation."""
input_0 = inputs[0]
input_1 = inputs[1]
input_0_channel = int(attrs["out_channel"])
input_1_channel = int(attrs["add_out_channel"])
input_0_size = int(attrs["out_size"])
input_1_size = int(attrs["add_out_size"])
if input_0_size > input_1_size:
scale = int(input_0_size / input_1_size)
input_1 = get_relay_op("upsampling")(input_1, scale_h=scale, scale_w=scale)
elif input_0_size < input_1_size:
stride = int(input_1_size / input_0_size)
input_1 = get_relay_op("avg_pool2d")(
input_1, pool_size=(1, 1), strides=(stride, stride), padding=(0, 0)
)
if input_0_channel != input_1_channel:
pad_channel = input_0_channel - input_1_channel
input_1 = get_relay_op("pad")(
input_1, pad_width=((0, 0), (0, pad_channel), (0, 0), (0, 0)), pad_value=0.0
)
sym = input_0 + input_1
if "activation" in attrs:
new_attrs = {}
new_attrs["activation"] = attrs["activation"]
sym = _darknet_activations(sym, None, new_attrs)
return sym
def _darknet_dense(inputs, params, attrs, prefix):
"""Process the dense operation."""
new_attrs = {}
new_attrs["units"] = attrs.get("num_hidden")
data = inputs[0]
if attrs.get("use_flatten", False) is True:
data = get_relay_op("batch_flatten")(data)
weight = _get_param_var(params, prefix, "weight")
data = get_relay_op("dense")(data, weight, **new_attrs)
use_bias = attrs.get("use_bias", False)
if use_bias:
bias = _get_param_var(params, prefix, "bias")
data = get_relay_op("bias_add")(data, bias, axis=1)
if "use_batchNorm" in attrs:
new_attrs = {}
new_attrs["epsilon"] = 0.000001
gamma = _get_param_var(params, prefix, "gamma")
beta = _get_param_var(params, prefix, "beta")
moving_mean = _get_param_var(params, prefix, "moving_mean")
moving_var = _get_param_var(params, prefix, "moving_var")
data = get_relay_op("batch_norm")(data, gamma, beta, moving_mean, moving_var, **new_attrs)
if "activation" in attrs:
new_attrs = {}
new_attrs["activation"] = attrs["activation"]
data = _darknet_activations(data, None, new_attrs)
return data
def _darknet_dropout(inputs, params, attrs, prefix):
"""Process the dropout operation, its a blank operation."""
new_attrs = {}
new_attrs["rate"] = attrs.get("p", 0.5)
return get_relay_op("dropout")(*inputs, **new_attrs)
def _darknet_reshape(inputs, params, attrs, prefix):
"""Process the reshape operation."""
new_attrs = {}
new_attrs["shape"] = attrs.get("shape")
return get_relay_op("reshape")(*inputs, **new_attrs)
def _darknet_upsampling(inputs, params, attrs, prefix):
"""Process the upsampling operation."""
new_attrs = {}
new_attrs["scale_h"] = attrs.get("scale", 1)
new_attrs["scale_w"] = attrs.get("scale", 1)
return get_relay_op("upsampling")(*inputs, **new_attrs)
def _darknet_l2normalize(inputs, params, attrs, prefix):
"""Process the l2 normalization operation."""
new_attrs = {}
new_attrs["eps"] = attrs.get("eps", 0.0)
new_attrs["axis"] = [attrs.get("axis", 1)]
return get_relay_op("l2_normalize")(*inputs, **new_attrs)
def _darknet_softmax_output(inputs, params, attrs, prefix):
"""Process the softmax operation."""
temperature = attrs.get("temperature", 1)
data = inputs[0]
if temperature != 1:
data = data / _expr.const(float(temperature))
if attrs.get("use_flatten", False) is True:
data = get_relay_op("batch_flatten")(data)
new_attrs = {}
if attrs.get("multi_output", False):
new_attrs["axis"] = 1
return get_relay_op("softmax")(data, **new_attrs)
def _darknet_route(inputs, params, attrs, prefix):
"""Process the route operation, which is equivalent to concat."""
new_attrs = {"axis": attrs.get("dim", 1)}
return get_relay_op("concatenate")((inputs[0], inputs[1]), **new_attrs)
def _darknet_reorg(inputs, params, attrs, prefix):
"""Process the reorg operation."""
new_attrs = {}
if "stride" in attrs:
new_attrs = {"stride": attrs.get("stride", 1)}
return get_relay_op("yolo_reorg")(*inputs, **new_attrs)
def _darknet_region(inputs, params, attrs, prefix):
"""Process the region operation."""
num = attrs.get("n", 1)
classes = attrs.get("classes", 1)
coords = attrs.get("coords", 0)
background = attrs.get("background", 0)
softmax = attrs.get("softmax", True)
input_shape = attrs.get("shape")
split_size = classes + coords + 1
intermediate_shape = (input_shape[0], num, split_size, input_shape[2], input_shape[3])
data_block = get_relay_op("reshape")(inputs[0], newshape=intermediate_shape)
split_indices = (2, 4, 5)
split_res = get_relay_op("split")(data_block, indices_or_sections=split_indices, axis=2)
split_res0 = get_relay_op("sigmoid")(split_res[0])
split_res2 = split_res[2] if background else get_relay_op("sigmoid")(split_res[2])
split_res3 = get_relay_op("softmax")(split_res[3], axis=2) if softmax else split_res[3]
out = get_relay_op("concatenate")((split_res0, split_res[1], split_res2, split_res3), axis=2)
return get_relay_op("reshape")(out, newshape=input_shape)
def _darknet_yolo(inputs, params, attrs, prefix):
"""Process the yolo operation."""
num = attrs.get("n", 1)
classes = attrs.get("classes", 1)
input_shape = attrs.get("shape")
split_size = classes + 5
intermediate_shape = (input_shape[0], num, split_size, input_shape[2], input_shape[3])
data_block = get_relay_op("reshape")(inputs[0], newshape=intermediate_shape)
split_indices = (2, 4)
split_res = get_relay_op("split")(data_block, indices_or_sections=split_indices, axis=2)
split_res0 = get_relay_op("sigmoid")(split_res[0])
split_res2 = get_relay_op("sigmoid")(split_res[2])
out = get_relay_op("concatenate")((split_res0, split_res[1], split_res2), axis=2)
return get_relay_op("reshape")(out, newshape=input_shape)
class ACTIVATION(object):
"""Darknet ACTIVATION Class constant."""
LOGISTIC = 0
RELU = 1
RELIE = 2
LINEAR = 3
RAMP = 4
TANH = 5
PLSE = 6
LEAKY = 7
ELU = 8
LOGGY = 9
STAIR = 10
HARDTAN = 11
LHTAN = 12
def _darknet_activations(inputs, params, attrs):
"""Process the activation function."""
act = attrs.get("activation")
data = inputs[0] if isinstance(inputs, _expr.TupleWrapper) else inputs
def _const(val):
return _expr.const(val)
def _relu(data):
return get_relay_op("relu")(data)
def _exp(data):
return get_relay_op("exp")(data)
def _tanh(data):
return get_relay_op("tanh")(data)
def _sigmoid(data):
return get_relay_op("sigmoid")(data)
def _elu(data):
alpha = _const(-1.0)
return alpha * _relu(_const(1.0) - _exp(data)) + _relu(data)
def _leaky_relu(data, slope):
new_attrs = {}
new_attrs["alpha"] = slope
return get_relay_op("leaky_relu")(data, **new_attrs)
if ACTIVATION.LOGISTIC == act:
data = _sigmoid(data)
elif ACTIVATION.RELU == act:
data = _relu(data)
elif ACTIVATION.TANH == act:
data = _tanh(data)
elif ACTIVATION.LINEAR == act:
return data
elif ACTIVATION.LEAKY == act:
data = _leaky_relu(data, attrs.get("slope", 0.1))
elif ACTIVATION.ELU == act:
data = _elu(data)
else:
_darknet_not_support("act: " + attrs)
return data
class LAYERTYPE(Enum):
"""Darknet LAYERTYPE Class constant."""
CONVOLUTIONAL = 0
DECONVOLUTIONAL = 1
CONNECTED = 2
MAXPOOL = 3
SOFTMAX = 4
DETECTION = 5
DROPOUT = 6
CROP = 7
ROUTE = 8
COST = 9
NORMALIZATION = 10
AVGPOOL = 11
LOCAL = 12
SHORTCUT = 13
ACTIVE = 14
RNN = 15
GRU = 16
LSTM = 17
CRNN = 18
BATCHNORM = 19
NETWORK = 20
XNOR = 21
REGION = 22
YOLO = 23
REORG = 24
UPSAMPLE = 25
LOGXENT = 26
L2NORM = 27
BLANK = 28
_DARKNET_CONVERT_MAP = {
LAYERTYPE.CONVOLUTIONAL: _darknet_conv2d,
LAYERTYPE.CONNECTED: _darknet_dense,
LAYERTYPE.MAXPOOL: _darknet_maxpooling,
LAYERTYPE.SOFTMAX: _darknet_softmax_output,
LAYERTYPE.DROPOUT: _darknet_dropout,
LAYERTYPE.AVGPOOL: _darknet_avgpooling,
LAYERTYPE.ROUTE: _darknet_route,
LAYERTYPE.REORG: _darknet_reorg,
LAYERTYPE.REGION: _darknet_region,
LAYERTYPE.SHORTCUT: _darknet_shortcut,
LAYERTYPE.UPSAMPLE: _darknet_upsampling,
LAYERTYPE.L2NORM: _darknet_l2normalize,
LAYERTYPE.YOLO: _darknet_yolo,
LAYERTYPE.DECONVOLUTIONAL: _darknet_not_support,
LAYERTYPE.BATCHNORM: _darknet_not_support,
LAYERTYPE.DETECTION: _darknet_not_support,
LAYERTYPE.CROP: _darknet_not_support,
LAYERTYPE.COST: _darknet_not_support,
LAYERTYPE.NORMALIZATION: _darknet_not_support,
LAYERTYPE.LOCAL: _darknet_not_support,
LAYERTYPE.ACTIVE: _darknet_not_support,
LAYERTYPE.RNN: _darknet_not_support,
LAYERTYPE.GRU: _darknet_not_support,
LAYERTYPE.LSTM: _darknet_not_support,
LAYERTYPE.CRNN: _darknet_not_support,
LAYERTYPE.NETWORK: _darknet_not_support,
LAYERTYPE.XNOR: _darknet_not_support,
LAYERTYPE.BLANK: _darknet_not_support,
}
def _darknet_convert_symbol(op_name, inputs, params, attrs, params_prefix):
"""Convert from darknet op to relay op.
Parameters
----------
op_name : str
Operator name, such as Convolution, Connected, etc
inputs : list of relay.Function
List of input symbols.
attrs : dict
Dict of operator attributes
params_prefix: str
Params name for this operation
Returns
-------
out_name : converted out name of operation
sym : tvm.relay.Function
Converted relay function
"""
if op_name in _DARKNET_CONVERT_MAP:
sym = _DARKNET_CONVERT_MAP[op_name](inputs, params, attrs, params_prefix)
else:
_darknet_not_support("Operator type " + str(op_name))
return sym
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
class GraphProto(object):
"""A helper class for handling relay functions from darknet model."""
def __init__(self, net, shape, dtype="float32"):
self._net = net
self._shape = shape
self._dtype = dtype
self._sym_array = {}
self._tvmparams = {}
self._outs = []
self._state_ctr = {}
self._state_ctr["rnn"] = 0
self._state_ctr["crnn"] = 0
self._state_ctr["lstm"] = 0
self._state_ctr["cell_state"] = 0
self._state_ctr["gru"] = 0
def _read_memory_buffer(self, shape, data, dtype=None):
if dtype is None:
dtype = self._dtype
length = 1
for x in shape:
length *= x
data_np = np.zeros(length, dtype=dtype)
for i in range(length):
data_np[i] = data[i]
return data_np.reshape(shape)
def _get_convolution_weights(self, layer, opname):
"""Get the convolution layer weights and biases."""
if layer.nweights == 0:
return None
if (layer.n * layer.c // layer.groups * layer.size * layer.size) != layer.nweights:
raise RuntimeError("layer weights size not matching with n c h w")
params = {}
shape = (layer.n, layer.c // layer.groups, layer.size, layer.size)
weights = self._read_memory_buffer(shape, layer.weights)
biases = self._read_memory_buffer((layer.n,), layer.biases)
k = _get_params_name(opname, "weight")
params[k] = tvm.nd.array(weights)
if layer.batch_normalize == 1 and layer.dontloadscales != 1:
params.update(self._get_batchnorm_weights(layer, opname, layer.n))
k = _get_params_name(opname, "beta")
params[k] = tvm.nd.array(biases)
else:
k = _get_params_name(opname, "bias")
params[k] = tvm.nd.array(biases)
return params
def _get_connected_weights(self, layer, opname):
"""Parse the weights and biases for fully connected or dense layer."""
size = layer.outputs * layer.inputs
if size == 0:
return None
weights = self._read_memory_buffer((layer.outputs, layer.inputs), layer.weights)
biases = self._read_memory_buffer((layer.outputs,), layer.biases)
params = {}
k = _get_params_name(opname, "weight")
params[k] = tvm.nd.array(weights)
if layer.batch_normalize == 1 and layer.dontloadscales != 1:
params.update(self._get_batchnorm_weights(layer, opname, layer.outputs))
k = _get_params_name(opname, "beta")
params[k] = tvm.nd.array(biases)
else:
k = _get_params_name(opname, "bias")
params[k] = tvm.nd.array(biases)
return params
def _get_region_weights(self, layer, opname):
"""Parse the biases for region layer."""
biases = self._read_memory_buffer((layer.n * 2,), layer.biases)
attributes = np.array(
[
layer.n,
layer.out_c,
layer.out_h,
layer.out_w,
layer.classes,
layer.coords,
layer.background,
],
dtype=np.int32,
)
params = {}
k = _get_params_name(opname, "bias")
params[k] = tvm.nd.array(biases)
k = _get_params_name(opname, "attr")
params[k] = tvm.nd.array(attributes)
return params
def _get_yolo_weights(self, layer, opname):
"""Parse the biases and mask for yolo layer."""
biases = self._read_memory_buffer((layer.total * 2,), layer.biases)
mask = self._read_memory_buffer((layer.n,), layer.mask, dtype="int32")
attributes = np.array(
[layer.n, layer.out_c, layer.out_h, layer.out_w, layer.classes, layer.total],
dtype=np.int32,
)
params = {}
k = _get_params_name(opname, "bias")
params[k] = tvm.nd.array(biases)
k = _get_params_name(opname, "mask")
params[k] = tvm.nd.array(mask)
k = _get_params_name(opname, "attr")
params[k] = tvm.nd.array(attributes)
return params
def _get_batchnorm_weights(self, layer, opname, size):
"""Parse the weights for batchnorm, which includes, scales, moving mean
and moving variances."""
scales = self._read_memory_buffer((size,), layer.scales)
rolling_mean = self._read_memory_buffer((size,), layer.rolling_mean)
rolling_variance = self._read_memory_buffer((size,), layer.rolling_variance)
params = {}
k = _get_params_name(opname, "moving_mean")
params[k] = tvm.nd.array(rolling_mean)
k = _get_params_name(opname, "moving_var")
params[k] = tvm.nd.array(rolling_variance)
k = _get_params_name(opname, "gamma")
params[k] = tvm.nd.array(scales)
return params
def _get_darknet_attrs(self, layer, layer_num):
"""Parse attributes of each layer and return."""
attr = {}
use_flatten = True
layer_type = LAYERTYPE(layer.type)
if LAYERTYPE.CONVOLUTIONAL == layer_type:
attr.update({"pad": layer.pad})
attr.update({"num_group": layer.groups})
attr.update({"num_filter": layer.n})
attr.update({"stride": layer.stride})
attr.update({"kernel": layer.size})
attr.update({"activation": (layer.activation)})
if layer.nbiases == 0:
attr.update({"use_bias": False})
else:
attr.update({"use_bias": True})
if layer.batch_normalize == 1 and layer.dontloadscales != 1:
attr.update({"use_batchNorm": True})
attr.update({"use_scales": True})
elif LAYERTYPE.CONNECTED == layer_type:
attr.update({"num_hidden": layer.outputs})
attr.update({"activation": (layer.activation)})
if layer_num != 0:
layer_prev = self._net.layers[layer_num - 1]
if (
layer_prev.out_h == layer.h
and layer_prev.out_w == layer.w
and layer_prev.out_c == layer.c
):
use_flatten = False
attr.update({"use_flatten": use_flatten})
attr.update({"use_bias": True})
if layer.batch_normalize == 1 and layer.dontloadscales != 1:
attr.update({"use_batchNorm": True})
attr.update({"use_scales": True})
attr.update({"use_bias": False})
elif LAYERTYPE.MAXPOOL == layer_type:
attr.update({"pad": layer.pad})
attr.update({"stride": layer.stride})
attr.update({"kernel": layer.size})
max_output = (layer.w - layer.size + 2 * layer.pad) / float(layer.stride) + 1
if max_output < layer.out_w:
extra_pad = (layer.out_w - max_output) * layer.stride
attr.update({"extra_pad_size": int(extra_pad)})
elif LAYERTYPE.AVGPOOL == layer_type:
attr.update({"pad": layer.pad})
if layer.stride == 0:
attr.update({"stride": 1})
else:
attr.update({"stride": layer.stride})
if layer.size == 0 and layer.h == layer.w:
attr.update({"kernel": layer.h})
else:
attr.update({"kernel": layer.size})
elif LAYERTYPE.DROPOUT == layer_type:
attr.update({"p": layer.probability})
elif LAYERTYPE.SOFTMAX == layer_type:
attr.update({"axis": 1})
attr.update({"use_flatten": True})
if layer.temperature:
attr.update({"temperature": str(layer.temperature)})
elif LAYERTYPE.SHORTCUT == layer_type:
add_layer = self._net.layers[layer.index]
attr.update({"activation": layer.activation})
attr.update({"out_channel": layer.out_c})
attr.update({"out_size": layer.out_h})
attr.update({"add_out_channel": add_layer.out_c})
attr.update({"add_out_size": add_layer.out_h})
elif LAYERTYPE.ROUTE == layer_type:
pass
elif LAYERTYPE.COST == layer_type:
pass
elif LAYERTYPE.REORG == layer_type:
attr.update({"stride": layer.stride})
elif LAYERTYPE.REGION == layer_type:
attr.update({"n": layer.n})
attr.update({"classes": layer.classes})
attr.update({"coords": layer.coords})
attr.update({"background": layer.background})
attr.update({"softmax": layer.softmax})
attr.update({"shape": (-1, layer.c, layer.h, layer.w)})
elif LAYERTYPE.YOLO == layer_type:
attr.update({"n": layer.n})
attr.update({"classes": layer.classes})
attr.update({"shape": (-1, layer.c, layer.h, layer.w)})
elif LAYERTYPE.UPSAMPLE == layer_type:
attr.update({"scale": layer.stride})
elif LAYERTYPE.L2NORM == layer_type:
pass
else:
err = "Darknet layer type {} is not supported in relay.".format(layer_type)
raise NotImplementedError(err)
return attr
def _get_darknet_params(self, layer, opname):
"""To parse and get the darknet params."""
layer_type = LAYERTYPE(layer.type)
params = None
if LAYERTYPE.CONVOLUTIONAL == layer_type:
params = self._get_convolution_weights(layer, opname)
elif LAYERTYPE.CONNECTED == layer_type:
params = self._get_connected_weights(layer, opname)
elif LAYERTYPE.REGION == layer_type:
params = self._get_region_weights(layer, opname)
elif LAYERTYPE.YOLO == layer_type:
params = self._get_yolo_weights(layer, opname)
return params
def _preproc_layer(self, layer, layer_num):
"""To preprocess each darknet layer, some layer doesnt need processing."""
if layer_num == 0:
name = "data"
sym = new_var(name, shape=self._shape, dtype=self._dtype)
else:
sym = self._sym_array[layer_num - 1]
skip_layer = False
layer_type = LAYERTYPE(layer.type)
if LAYERTYPE.ROUTE == layer_type:
sym = []
for j in range(layer.n):
sym.append(self._sym_array[layer.input_layers[j]])
if layer.n == 1:
skip_layer = True
elif LAYERTYPE.COST == layer_type:
skip_layer = True
elif LAYERTYPE.SHORTCUT == layer_type:
sym = [sym, self._sym_array[layer.index]]
elif LAYERTYPE.BLANK == layer_type:
skip_layer = True
if skip_layer is True:
self._sym_array[layer_num] = sym
return skip_layer, sym
def _get_opname(self, layer):
"""Returs the layer name."""
return LAYERTYPE(layer.type)
def _new_rnn_state_var(self, state=None, name="rnn"):
"""Returs a symbol for state"""
sym_name = name + "%d_state" % self._state_ctr[name]
self._state_ctr[name] += 1
return new_var(sym_name, shape=state.shape, dtype=str(state.dtype))
def _get_rnn_state_buffer(self, layer, name):
"""Get the state buffer for rnn."""
buffer = np.zeros((1, layer.outputs), self._dtype)
return self._new_rnn_state_var(buffer, name)
def _get_darknet_rnn_attrs(self, layer, name, sym):
"""Get the rnn converted symbol from attributes."""
attr = self._get_darknet_attrs(layer, 0)
op_name = self._get_opname(layer)
prefix = _get_params_prefix(op_name, name)
params = self._get_darknet_params(layer, prefix)
sym = _darknet_convert_symbol(op_name, _as_list(sym), params, attr, prefix)
if params:
self._tvmparams.update(params)
return sym
def _handle_darknet_rnn_layers(self, layer_num, sym):
"""Parse attributes and handle the rnn layers."""
attr = {}
layer = self._net.layers[layer_num]
processed = False
layer_type = LAYERTYPE(layer.type)
if LAYERTYPE.RNN == layer_type:
attr.update({"n": layer.n})
attr.update({"batch": layer.batch})
attr.update({"num_hidden": str(layer.outputs)})
state = self._get_rnn_state_buffer(layer, "rnn")
for _ in range(layer.steps):
input_layer = layer.input_layer
prefix = "_input_" + str(layer_num)
sym = self._get_darknet_rnn_attrs(input_layer, prefix, sym)
self_layer = layer.self_layer
prefix = "_self_" + str(layer_num)
state = self._get_darknet_rnn_attrs(self_layer, prefix, state)
state = sym + state
self._outs.append(state)
output_layer = layer.output_layer
prefix = "_output_" + str(layer_num)
sym = self._get_darknet_rnn_attrs(output_layer, prefix, state)
self._sym_array[layer_num] = sym
processed = True
return processed, sym
def _make_outlist(self, sym, op_name, layer, layer_num):
layer_type = LAYERTYPE(layer.type)
if layer_type == LAYERTYPE.REGION:
# Add attributes
k = _get_params_name(op_name, "attr")
dshape = self._tvmparams[k].shape
dtype = self._tvmparams[k].dtype
self._outs.insert(0, new_var(k, shape=dshape, dtype=dtype))
# Add bias
k = _get_params_name(op_name, "bias")
dshape = self._tvmparams[k].shape
dtype = self._tvmparams[k].dtype
self._outs.insert(0, new_var(k, shape=dshape, dtype=dtype))
if layer_num != self._net.n - 1:
self._outs.insert(0, sym)
elif layer_type == LAYERTYPE.YOLO:
# Add attributes
k = _get_params_name(op_name, "attr")
dshape = self._tvmparams[k].shape
dtype = self._tvmparams[k].dtype
self._outs.insert(0, new_var(k, shape=dshape, dtype=dtype))
# Add bias
k = _get_params_name(op_name, "bias")
dshape = self._tvmparams[k].shape
dtype = self._tvmparams[k].dtype
self._outs.insert(0, new_var(k, shape=dshape, dtype=dtype))
# Add mask
k = _get_params_name(op_name, "mask")
dshape = self._tvmparams[k].shape
dtype = self._tvmparams[k].dtype
self._outs.insert(0, new_var(k, shape=dshape, dtype=dtype))
if layer_num != self._net.n - 1:
self._outs.insert(0, sym)
def from_darknet(self):
"""To convert the darknet symbol to relay functions."""
for i in range(self._net.n):
layer = self._net.layers[i]
need_skip, sym = self._preproc_layer(layer, i)
if need_skip:
continue
processed, sym = self._handle_darknet_rnn_layers(i, sym)
if processed:
continue
attr = self._get_darknet_attrs(layer, i)
op_name = self._get_opname(layer)
prefix = _get_params_prefix(op_name, i)
params = self._get_darknet_params(self._net.layers[i], prefix)
sym = _darknet_convert_symbol(op_name, _as_list(sym), params, attr, prefix)
if params:
self._tvmparams.update(params)
self._sym_array[i] = sym
self._make_outlist(sym, prefix, layer, i)
outputs = _as_list(sym) + self._outs
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
sym = _function.Function(analysis.free_vars(outputs), outputs)
return IRModule.from_expr(sym), self._tvmparams
def from_darknet(net, shape=None, dtype="float32"):
"""Convert from Darknet's model into compatible relay Function.
Parameters
----------
net : Darknet net parameter
Darknet net structure.
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
return GraphProto(net, shape, dtype).from_darknet()
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/keras.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, import-outside-toplevel
"""Keras frontend."""
import dis
import sys
import numpy as np
import tvm
from tvm.ir import IRModule, TensorType, TupleType
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from ... import nd as _nd
from .common import ExprTable, new_var
__all__ = ["from_keras"]
def _check_data_format(keras_layer):
if hasattr(keras_layer, ("data_format")):
if keras_layer.data_format != "channels_last":
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
def _get_pad_pair(input1d, kernel1d, stride1d):
out1d = (input1d + stride1d - 1) // stride1d
pad = np.maximum((out1d - 1) * stride1d + kernel1d - input1d, 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def _get_elu(inexpr, alpha):
"""A helper method for elu."""
return _op.negative(alpha) * _op.nn.relu(
_expr.const(1.0, dtype="float32") - _op.exp(inexpr)
) + _op.nn.relu(inexpr)
def _as_list(arr):
"""Force being a list, ignore if already is."""
if isinstance(arr, list):
return arr
return [arr]
def _convert_recurrent_activation(inexpr, keras_layer):
act_type = keras_layer.recurrent_activation.__name__
return _convert_activation(inexpr, act_type, None, None, None)
def _convert_activation(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
if isinstance(keras_layer, str):
act_type = keras_layer
else:
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type == "linear":
if isinstance(keras_layer, str):
return inexpr
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
beta = keras_layer.beta if hasattr(keras_layer, "beta") else 0.0
alpha = _expr.const(alpha, dtype="float32")
beta = _expr.const(beta, dtype="float32")
return _op.add(_op.multiply(inexpr, alpha), beta)
if act_type == "softmax":
axis = 1 if data_layout == "NCHW" else -1
return _op.nn.softmax(inexpr, axis)
if act_type == "sigmoid":
return _op.sigmoid(inexpr)
if act_type == "tanh":
return _op.tanh(inexpr)
if act_type == "relu":
return _op.nn.relu(inexpr)
if act_type == "softplus":
return _op.log(_op.add(_op.exp(inexpr), _expr.const(1.0, dtype="float32")))
if act_type == "elu":
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
alpha = _expr.const(alpha, dtype="float32")
return _get_elu(inexpr, alpha)
if act_type == "selu":
# Alpha, Gamma values obtained from https://arxiv.org/abs/1706.02515
alpha = (
keras_layer.alpha
if hasattr(keras_layer, "alpha")
else 1.6732632423543772848170429916717
)
gamma = (
keras_layer.gamma
if hasattr(keras_layer, "gamma")
else 1.0507009873554804934193349852946
)
alpha = _expr.const(alpha, dtype="float32")
gamma = _expr.const(gamma, dtype="float32")
return gamma * _get_elu(inexpr, alpha)
if act_type == "relu6":
return _op.clip(inexpr, a_min=0.0, a_max=6.0)
if act_type == "softsign":
return inexpr / (_expr.const(1.0, dtype="float32") + _op.abs(inexpr))
if act_type == "hard_sigmoid":
x = (_expr.const(0.2, dtype="float32") * inexpr) + _expr.const(0.5, dtype="float32")
return _op.clip(x, a_min=0.0, a_max=1.0)
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend Keras.".format(act_type)
)
def _convert_advanced_activation(inexpr, keras_layer, etab, data_layout, input_shape=None):
act_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if act_type == "Softmax":
axis = keras_layer.axis
dims = len(input_shape)
if isinstance(axis, list):
raise tvm.error.OpAttributeUnImplemented(
"Softmax with axes {} is not supported.".format(axis)
)
if data_layout == "NCHW":
if axis == -1:
axis = 1
else:
axis = axis + 1 if axis < dims - 1 else 1
return _op.nn.softmax(inexpr, axis=axis)
if act_type == "ReLU":
threshold = _expr.const(keras_layer.threshold, dtype="float32")
if keras_layer.max_value and float(keras_layer.threshold) == 0:
# f(x) = max_value, for x >= max_value
# f(x) = x, for threshold <= x < max_value
return _op.clip(inexpr, a_min=0.0, a_max=float(keras_layer.max_value))
if keras_layer.max_value and _op.greater(threshold, inexpr).astype("float32"):
# f(x) = negative_slope * (inexpr - threshold)
negative_slope = _expr.const(keras_layer.negative_slope, dtype="float32")
return _op.multiply(negative_slope, _op.subtract(inexpr, threshold))
return _op.nn.relu(inexpr)
if act_type == "LeakyReLU":
return _op.nn.leaky_relu(inexpr, alpha=float(keras_layer.alpha))
if act_type == "ELU":
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.0
alpha = _expr.const(alpha, dtype="float32")
return _get_elu(inexpr, alpha)
if act_type == "PReLU":
assert hasattr(keras_layer, "alpha"), "alpha required for PReLU."
_check_data_format(keras_layer)
size = len(keras_layer.alpha.shape)
if data_layout == "NCHW":
alpha = etab.new_const(keras_layer.get_weights()[0].transpose(np.roll(range(size), 1)))
else:
alpha = etab.new_const(keras_layer.get_weights()[0])
return _op.negative(alpha) * _op.nn.relu(_op.negative(inexpr)) + _op.nn.relu(inexpr)
if act_type == "ThresholdedReLU":
theta = keras_layer.theta if hasattr(keras_layer, "theta") else 1.0
return _op.multiply(
inexpr, _op.greater(inexpr, _expr.const(theta, dtype="float32")).astype("float32")
)
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend Keras.".format(act_type)
)
def _convert_merge(
inexpr, keras_layer, _, input_shape=None, data_layout=None
): # pylint: disable=unused-argument
merge_type = type(keras_layer).__name__
ret = inexpr[0]
if merge_type == "Dot":
axes = keras_layer.axes
if isinstance(keras_layer.axes, int):
axes = [keras_layer.axes, keras_layer.axes]
if isinstance(axes, list):
if len(axes) != 2:
raise tvm.error.OpAttributeUnImplemented(
"Dot with axes {} is not supported.".format(keras_layer.axes)
)
for i, axis in enumerate(axes):
if axis not in [1, 2]:
raise tvm.error.OpAttributeUnImplemented(
"Dot with axes {} is not supported.".format(keras_layer.axes)
)
if axes[i] == 2:
inexpr[i] = _op.transpose(inexpr[i], axes=[0, 2, 1])
else:
raise tvm.error.OpAttributeUnImplemented(
"Dot with axes {} is not supported.".format(keras_layer.axes)
)
ret_dot = _op.nn.batch_matmul(inexpr[0], inexpr[1])
ret = _op.transpose(ret_dot, axes=[0, 2, 1])
elif merge_type == "Subtract":
assert len(inexpr) == 2, "Subtract merge takes 2 inputs."
ret = _op.subtract(ret, inexpr[1])
elif merge_type in ["Add", "Multiply", "Minimum", "Maximum"]:
op_map = {
"Add": _op.add,
"Multiply": _op.multiply,
"Minimum": _op.minimum,
"Maximum": _op.maximum,
}
for i in range(1, len(inexpr)):
ret = op_map[merge_type](ret, inexpr[i])
elif merge_type == "Average":
for i in range(1, len(inexpr)):
ret = _op.add(ret, inexpr[i])
ret = ret / _expr.const(len(inexpr), dtype="float32")
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported in frontend Keras.".format(merge_type)
)
return ret
def _convert_permute(
inexpr, keras_layer, _, input_shape=None, data_layout=None
): # pylint: disable=unused-argument
return _op.transpose(inexpr, axes=(0,) + keras_layer.dims)
def _convert_embedding(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
indices = inexpr
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0])
out = _op.take(weight, indices.astype("int32"), axis=0)
return out
def _convert_dense(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
weightList = keras_layer.get_weights()
weight = etab.new_const(weightList[0].transpose([1, 0]))
params = {"weight": weight, "units": weightList[0].shape[1]}
if input_shape is None:
input_shape = keras_layer.input_shape
input_dim = len(input_shape)
# In case of RNN dense, input shape will be (1, 1, n)
if input_dim > 2:
input_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
if input_dim != 3 or input_shape[0] != 1 or input_shape[1] != 1:
raise tvm.error.OpAttributeInvalid(
"Input shape {} is not valid for operator Dense.".format(input_shape)
)
inexpr = _op.squeeze(inexpr, axis=[0])
out = _op.nn.dense(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
if input_dim > 2:
out = _op.expand_dims(out, axis=0)
return out
def _convert_convolution1d(inexpr, keras_layer, etab, data_layout, input_shape=None):
if input_shape is None:
input_shape = keras_layer.input_shape
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
weight = weightList[0]
if data_layout == "NWC":
kernel_layout = "WIO"
else:
kernel_layout = "OIW"
msg = (
"Kernel layout with {} is not supported for operator Convolution1D "
"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout))
is_deconv = type(keras_layer).__name__ == "Conv1DTranspose"
if is_deconv:
if kernel_layout == "OIW":
weight = weight.transpose([2, 0, 1])
kernel_w, n_filters, _ = weight.shape
else:
kernel_w, _, n_filters = weight.shape
dilation_rate = keras_layer.dilation_rate
if isinstance(dilation_rate, (list, tuple)):
dilation = [dilation_rate[0]]
else:
dilation = [dilation_rate]
dilated_kernel_w = (kernel_w - 1) * dilation[0] + 1
stride_w = keras_layer.strides[0]
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_w],
"strides": [stride_w],
"dilation": dilation,
"padding": [0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
params["channels"] = n_filters
if keras_layer.padding == "valid":
pass
# calculate the padding values
elif keras_layer.padding == "same":
in_w = input_shape[1]
pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = [pad_w[0], pad_w[1]]
else:
msg = "Padding with {} is not supported for operator Convolution3D " "in frontend Keras."
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
if is_deconv:
out = _op.nn.conv1d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv1d(data=inexpr, **params)
channel_axis = -1 if data_layout == "NWC" else 1
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias, channel_axis)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_convolution(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == "Conv2DTranspose"
is_depthconv = type(keras_layer).__name__ == "DepthwiseConv2D"
weightList = keras_layer.get_weights()
weight = weightList[0]
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NHWC":
if is_depthconv:
kernel_layout = "HWOI"
else:
kernel_layout = "HWIO"
else:
if is_deconv:
kernel_layout = "IOHW"
else:
kernel_layout = "OIHW"
if is_deconv:
kernel_h, kernel_w, n_filters, in_channels = weight.shape
if kernel_layout == "IOHW":
weight = weight.transpose([3, 2, 0, 1])
elif is_depthconv:
kernel_h, kernel_w, in_channels, depth_mult = weight.shape
if kernel_layout == "OIHW":
weight = weight.transpose([2, 3, 0, 1])
elif data_layout == "NCHW":
kernel_h, kernel_w, in_channels, n_filters = weight.shape
weight = weight.transpose([3, 2, 0, 1])
else:
kernel_h, kernel_w, in_channels, n_filters = weight.shape
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
dilated_kernel_h = (kernel_h - 1) * dilation[0] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[1] + 1
stride_h, stride_w = keras_layer.strides
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": dilation,
"padding": [0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
if is_depthconv:
params["channels"] = in_channels * depth_mult
params["groups"] = in_channels
else:
params["channels"] = n_filters
if keras_layer.padding == "valid":
pass
# we insert a separate pad operator
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = "Padding with {} is not supported for operator Convolution " "in frontend Keras."
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
if is_deconv:
out = _op.nn.conv2d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv2d(data=inexpr, **params)
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
if data_layout == "NCHW":
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_convolution3d(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
weight = weightList[0]
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NDHWC":
kernel_layout = "DHWIO"
else:
kernel_layout = "OIDHW"
msg = (
"Kernel layout with {} is not supported for operator Convolution3D "
"in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg.format(data_layout))
is_deconv = type(keras_layer).__name__ == "Conv3DTranspose"
if is_deconv:
kernel_d, kernel_h, kernel_w, n_filters, _ = weight.shape
if kernel_layout == "OIDHW":
weight = weight.transpose([4, 3, 2, 0, 1])
else:
kernel_d, kernel_h, kernel_w, _, n_filters = weight.shape
dilation_rate = keras_layer.dilation_rate
if isinstance(dilation_rate, (list, tuple)):
dilation = [dilation_rate[0], dilation_rate[1], dilation_rate[2]]
else:
dilation = [dilation_rate, dilation_rate, dilation_rate]
dilated_kernel_d = (kernel_d - 1) * dilation[0] + 1
dilated_kernel_h = (kernel_h - 1) * dilation[1] + 1
dilated_kernel_w = (kernel_w - 1) * dilation[2] + 1
stride_d, stride_h, stride_w = keras_layer.strides
params = {
"weight": etab.new_const(weight),
"kernel_size": [kernel_d, kernel_h, kernel_w],
"strides": [stride_d, stride_h, stride_w],
"dilation": dilation,
"padding": [0, 0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
params["channels"] = n_filters
if keras_layer.padding == "valid":
pass
# calculate the padding values
elif keras_layer.padding == "same":
in_d = input_shape[1]
in_h = input_shape[2]
in_w = input_shape[3]
pad_d = _get_pad_pair(in_d, dilated_kernel_d, stride_d)
pad_h = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_w = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
params["padding"] = [pad_d[0], pad_h[0], pad_w[0], pad_d[1], pad_h[1], pad_w[1]]
else:
msg = "Padding with {} is not supported for operator Convolution3D " "in frontend Keras."
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
if is_deconv:
out = _op.nn.conv3d_transpose(data=inexpr, **params)
else:
out = _op.nn.conv3d(data=inexpr, **params)
channel_axis = -1 if data_layout == "NDHWC" else 1
if keras_layer.use_bias:
bias = etab.new_const(weightList[1])
out = _op.nn.bias_add(out, bias, channel_axis)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, None)
return out
def _convert_separable_convolution(inexpr, keras_layer, etab, data_layout, input_shape=None):
_check_data_format(keras_layer)
if data_layout == "NHWC":
kernel_layout = "HWOI"
else:
kernel_layout = "OIHW"
if input_shape is None:
input_shape = keras_layer.input_shape
weightList = keras_layer.get_weights()
# depthwise conv
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
stride_h, stride_w = keras_layer.strides
if kernel_layout == "OIHW":
weight0 = weightList[0].transpose([2, 3, 0, 1])
else:
weight0 = weightList[0]
params0 = {
"weight": etab.new_const(weight0),
"channels": in_channels * depth_mult,
"groups": in_channels,
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": [1, 1],
"padding": [0, 0],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
if keras_layer.padding == "valid":
pass
# we insert a separate pad operator
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
params0["padding"] = (pad_t, pad_l, pad_b, pad_r)
else:
msg = (
"Padding with {} is not supported for operator Separable "
"Convolution in frontend Keras."
)
raise tvm.error.OpAttributeUnImplemented(msg.format(keras_layer.padding))
depthconv = _op.nn.conv2d(data=inexpr, **params0)
# pointwise conv
if kernel_layout == "OIHW":
weight1 = weightList[1].transpose([3, 2, 0, 1])
else:
weight1 = weightList[1]
kernel_layout = "HWIO"
params1 = {
"weight": etab.new_const(weight1),
"channels": weightList[1].shape[3],
"groups": 1,
"kernel_size": [1, 1],
"strides": [1, 1],
"dilation": [1, 1],
"data_layout": data_layout,
"kernel_layout": kernel_layout,
}
out = _op.nn.conv2d(data=depthconv, **params1)
if keras_layer.use_bias:
bias = etab.new_const(weightList[2])
if data_layout == "NCHW":
out = _op.nn.bias_add(out, bias)
else:
out = _op.nn.bias_add(out, bias, axis=-1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != "linear":
out = _convert_activation(out, act_type, etab, data_layout)
return out
def _convert_flatten(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
# NCHW -> NHWC so that dense can be correctly converted
if data_layout == "NCHW":
inexpr = _op.transpose(inexpr, axes=[0, 2, 3, 1])
return _op.nn.batch_flatten(inexpr)
def _convert_pooling(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
# global pool in keras = global pool + flatten in relay
global_pool_params = {"layout": data_layout}
if input_shape is None:
input_shape = keras_layer.input_shape
if pool_type == "GlobalMaxPooling2D":
return _convert_flatten(
_op.nn.global_max_pool2d(inexpr, **global_pool_params), keras_layer, etab, data_layout
)
if pool_type == "GlobalAveragePooling2D":
global_avg_pool2d = _op.nn.global_avg_pool2d(inexpr, **global_pool_params)
keep_dims = len(keras_layer.input.shape) == len(keras_layer.output.shape)
if keep_dims:
return global_avg_pool2d
return _convert_flatten(global_avg_pool2d, keras_layer, etab, data_layout)
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {
"pool_size": [pool_h, pool_w],
"strides": [stride_h, stride_w],
"padding": [0, 0],
"layout": data_layout,
}
if keras_layer.padding == "valid":
pass
elif keras_layer.padding == "same":
in_h = input_shape[1]
in_w = input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
params["padding"] = [pad_t, pad_l, pad_b, pad_r]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding with {} is not supported in operator Pooling.".format(keras_layer.padding)
)
if pool_type == "MaxPooling2D":
return _op.nn.max_pool2d(inexpr, **params)
if pool_type == "AveragePooling2D":
params["count_include_pad"] = False
return _op.nn.avg_pool2d(inexpr, **params)
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(keras_layer)
)
def _convert_pooling3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if pool_type not in ["MaxPooling3D", "AveragePooling3D"]:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(keras_layer)
)
pool_d1, pool_d2, pool_d3 = keras_layer.pool_size
stride_d1, stride_d2, stride_d3 = keras_layer.strides
params = {
"pool_size": [pool_d1, pool_d2, pool_d3],
"strides": [stride_d1, stride_d2, stride_d3],
"padding": [0, 0, 0],
"layout": data_layout,
}
if keras_layer.padding == "valid":
pass
elif keras_layer.padding == "same":
in_d1 = input_shape[1]
in_d2 = input_shape[2]
in_d3 = input_shape[3]
pad_d1 = _get_pad_pair(in_d1, pool_d1, stride_d1)
pad_d2 = _get_pad_pair(in_d2, pool_d2, stride_d2)
pad_d3 = _get_pad_pair(in_d3, pool_d3, stride_d3)
params["padding"] = [pad_d1[0], pad_d2[0], pad_d3[0], pad_d1[1], pad_d2[1], pad_d3[1]]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding with {} is not supported in operator Pooling3D.".format(keras_layer.padding)
)
out = _op.transpose(inexpr, axes=(0, 4, 1, 2, 3))
params["layout"] = "NCDHW"
if pool_type == "MaxPooling3D":
out = _op.nn.max_pool3d(out, **params)
elif pool_type == "AveragePooling3D":
out = _op.nn.avg_pool3d(out, **params)
return _op.transpose(out, axes=(0, 2, 3, 4, 1))
def _convert_global_pooling3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
global_pool_params = {"layout": data_layout}
if pool_type == "GlobalMaxPooling3D":
out = _op.nn.global_max_pool3d(inexpr, **global_pool_params)
elif pool_type == "GlobalAveragePooling3D":
out = _op.nn.global_avg_pool3d(inexpr, **global_pool_params)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(keras_layer)
)
return _convert_flatten(out, keras_layer, etab, input_shape, data_layout)
def _convert_upsample(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
upsample_type = type(keras_layer).__name__
params = {}
if upsample_type == "UpSampling1D":
h = keras_layer.size
params["scale_h"] = h
elif upsample_type == "UpSampling2D":
h, w = keras_layer.size
if h != w:
raise tvm.error.OpAttributeInvalid("Height must equal width for operator Upsample.")
params["scale_h"] = h
params["scale_w"] = h
if hasattr(keras_layer, "interpolation"):
interpolation = keras_layer.interpolation
if interpolation == "nearest":
params["method"] = "nearest_neighbor"
else:
params["method"] = "bilinear"
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(upsample_type)
)
params["layout"] = data_layout
out = _op.nn.upsampling(inexpr, **params)
return out
def _convert_upsample3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
params = {}
d, h, w = keras_layer.size
params["scale_d"] = d
params["scale_h"] = h
params["scale_w"] = w
params["layout"] = data_layout
params["coordinate_transformation_mode"] = "asymmetric"
out = _op.nn.upsampling3d(inexpr, **params)
return out
def _convert_cropping(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
crop_type = type(keras_layer).__name__
if input_shape is None:
input_shape = keras_layer.input_shape
if crop_type == "Cropping2D":
(_, in_h, in_w, _) = input_shape
((crop_t, crop_b), (crop_l, crop_r)) = keras_layer.cropping
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(crop_type)
)
int32_max = np.iinfo(np.int32).max
return _op.strided_slice(
inexpr,
begin=[0, 0, crop_t, crop_l],
end=[int32_max, int32_max, in_h - crop_b, in_w - crop_r],
)
def _convert_batchnorm(inexpr, keras_layer, etab, data_layout, input_shape=None):
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NCHW" or len(input_shape) < 4:
axis = 1
else:
axis = 3
params = {"scale": False, "center": False, "epsilon": keras_layer.epsilon, "axis": axis}
idx = 0
if keras_layer.scale:
params["scale"] = True
gamma = keras_layer.get_weights()[idx]
params["gamma"] = etab.new_const(gamma)
idx += 1
if keras_layer.center:
params["center"] = True
beta = keras_layer.get_weights()[idx]
params["beta"] = etab.new_const(beta)
idx += 1
moving_mean = keras_layer.get_weights()[idx]
moving_var = keras_layer.get_weights()[idx + 1]
params["moving_mean"] = etab.new_const(moving_mean)
params["moving_var"] = etab.new_const(moving_var)
# in case beta or gamma is not defined
params["beta"] = (
etab.new_const(np.zeros(moving_mean.shape)) if "beta" not in params else params["beta"]
)
params["gamma"] = (
etab.new_const(np.ones(moving_mean.shape)) if "gamma" not in params else params["gamma"]
)
result, moving_mean, moving_var = _op.nn.batch_norm(inexpr, **params)
return result
def _convert_padding(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
padding_type = type(keras_layer).__name__
padding = keras_layer.padding
top = left = bottom = right = 0
if padding_type == "ZeroPadding2D":
if isinstance(padding, int):
top = left = bottom = right = padding
elif isinstance(padding, tuple):
if isinstance(padding[0], int):
top, left = padding
bottom, right = padding
elif isinstance(padding[0], tuple):
top, bottom = padding[0]
left, right = padding[1]
else:
msg = 'Value {} in attribute "padding" of operator Padding ' "is not valid."
raise tvm.error.OpAttributeInvalid(msg.format(str(padding)))
else:
msg = 'Value {} in attribute "padding" of operator Padding is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(str(padding)))
else:
msg = "Operator {} is not supported in frontend Keras."
raise tvm.error.OpNotImplemented(msg.format(padding_type))
if data_layout == "NCHW":
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (0, 0), (top, bottom), (left, right)))
return _op.nn.pad(data=inexpr, pad_width=((0, 0), (top, bottom), (left, right), (0, 0)))
def _convert_padding3d(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
padding = keras_layer.padding
d_pad = h_pad = w_pad = [0, 0]
# padding can be 'int' or 'tuple of 3 ints' or 'tuple of 3 tuples of 2 ints' or 'tuple
# of 3 tuples of 2 ints different values'. In all these scenarios keras will send 3
# tuples of 2 ints.
if isinstance(padding, tuple) and isinstance(padding[0], tuple):
d_pad = padding[0]
h_pad = padding[1]
w_pad = padding[2]
else:
msg = 'Value {} in attribute "padding" of operator ZeroPadding3D is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(str(padding)))
if data_layout == "NCDHW":
out = _op.nn.pad(
data=inexpr,
pad_width=(
(0, 0),
(0, 0),
(d_pad[0], d_pad[1]),
(h_pad[0], h_pad[1]),
(w_pad[0], w_pad[1]),
),
)
else:
out = _op.nn.pad(
data=inexpr,
pad_width=(
(0, 0),
(d_pad[0], d_pad[1]),
(h_pad[0], h_pad[1]),
(w_pad[0], w_pad[1]),
(0, 0),
),
)
return out
def _convert_concat(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
if data_layout == "NHWC" or len(input_shape[0]) < 4:
axis = -1
else:
axis = 1
return _op.concatenate(_as_list(inexpr), axis=axis)
def _convert_reshape(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
inshape = input_shape # includes batch
tshape = keras_layer.target_shape # no batch
shape = (-1,) + tshape
if data_layout == "NCHW" and (len(inshape) > 3 or len(tshape) > 2):
# Perform reshape in original NHWC format.
inexpr = _op.transpose(inexpr, [0] + list(range(2, len(inshape))) + [1])
inexpr = _op.reshape(inexpr, newshape=shape)
return _op.transpose(inexpr, axes=[0, -1] + list(range(1, len(shape) - 1)))
return _op.reshape(inexpr, newshape=shape)
def _convert_lstm(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if input_shape is None:
input_shape = keras_layer.input_shape
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
c_op = etab.new_const(buf)
h_op = etab.new_const(buf)
inexpr = [inexpr, h_op, c_op]
in_data = inexpr[0]
next_h = inexpr[1]
next_c = inexpr[2]
weightList = keras_layer.get_weights()
in_shape = tuple(dim if dim else 1 for dim in _as_list(input_shape)[0])
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
time_steps = in_shape[1]
in_data = _op.squeeze(in_data, axis=[0])
in_data = _op.split(in_data, indices_or_sections=time_steps, axis=0)
# loop for the number of time_steps
out_list = [] # store h outputs in case return_sequences is True
for data in in_data:
ixh1 = _op.nn.dense(data, kernel_weight, units=units)
ixh2 = _op.nn.dense(next_h, recurrent_weight, units=units)
if keras_layer.use_bias:
ixh2 = _op.nn.bias_add(ixh2, bias=in_bias)
gate = ixh1 + ixh2
gates = _op.split(gate, indices_or_sections=4, axis=1)
in_gate = _convert_recurrent_activation(gates[0], keras_layer)
in_transform = _convert_recurrent_activation(gates[1], keras_layer)
next_c = in_transform * next_c + in_gate * _convert_activation(
gates[2], keras_layer, etab, data_layout
)
out_gate = _convert_recurrent_activation(gates[3], keras_layer)
next_h = out_gate * _convert_activation(next_c, keras_layer, etab, data_layout)
if keras_layer.return_sequences:
out_list.append(_op.expand_dims(next_h, axis=1))
out = _op.concatenate(out_list, axis=1) if keras_layer.return_sequences else next_h
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
out = _op.reshape(out, newshape=out_shape)
return [out, next_h, next_c]
def _convert_simple_rnn(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
prev_op = etab.new_const(buf)
inexpr = [inexpr, prev_op]
in_data = inexpr[0]
prev_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
ixh = _op.nn.dense(in_data, kernel_weight, units=units)
if keras_layer.use_bias:
ixh = _op.nn.bias_add(ixh, bias=in_bias)
prev_op = _op.nn.batch_flatten(prev_op)
ixh2 = _op.nn.dense(prev_op, recurrent_weight, units=units)
output = ixh + ixh2
output = _convert_activation(output, keras_layer, etab, data_layout)
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _convert_gru(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
_check_data_format(keras_layer)
if not isinstance(inexpr, list):
buf = np.zeros((1, keras_layer.units), "float32")
h_tm1 = etab.new_const(buf)
inexpr = [inexpr, h_tm1]
in_data = inexpr[0]
h_tm1_op = inexpr[1]
weightList = keras_layer.get_weights()
kernel_weight = etab.new_const(weightList[0].transpose([1, 0]))
recurrent_weight = etab.new_const(weightList[1].transpose([1, 0]))
if keras_layer.use_bias:
in_bias = etab.new_const(weightList[2])
units = list(weightList[0].shape)[1]
in_data = _op.nn.batch_flatten(in_data)
matrix_x = _op.nn.dense(in_data, kernel_weight, units=units)
if keras_layer.use_bias:
matrix_x = _op.nn.bias_add(matrix_x, in_bias)
# inputs projected by all gate matrices at once
split_indices = [keras_layer.units, 2 * keras_layer.units]
gates = _op.split(matrix_x, indices_or_sections=split_indices, axis=1)
x_z = gates[0]
x_r = gates[1]
x_h = gates[2]
# hidden state projected separately for update/reset and new
units = 2 * keras_layer.units
split_indices = [units]
rec_weights = _op.split(recurrent_weight, indices_or_sections=split_indices, axis=0)
h_tm1_op = _op.nn.batch_flatten(h_tm1_op)
matrix_inner = _op.nn.dense(h_tm1_op, rec_weights[0], units=units)
split_indices = [keras_layer.units]
recurrent = _op.split(matrix_inner, indices_or_sections=split_indices, axis=1)
recurrent_z = recurrent[0]
recurrent_r = recurrent[1]
rec_act_z = _convert_recurrent_activation(x_z + recurrent_z, keras_layer)
rec_act_r = _convert_recurrent_activation(x_r + recurrent_r, keras_layer)
units = keras_layer.units
recurrent_h = _op.nn.dense(rec_act_r * h_tm1_op, rec_weights[1], units=units)
act_hh = _convert_activation(x_h + recurrent_h, keras_layer, etab, data_layout)
# previous and candidate state mixed by update gate
output = rec_act_z * h_tm1_op + (_expr.const(1.0, dtype="float32") - rec_act_z) * act_hh
out_shape = tuple(dim if dim else 1 for dim in _as_list(keras_layer.output_shape)[0])
output = _op.reshape(output, newshape=out_shape)
return [output, output]
def _convert_repeat_vector(
inexpr, keras_layer, etab, data_layout, input_shape=None
): # pylint: disable=unused-argument
if input_shape is None:
input_shape = keras_layer.input_shape
input_shape = list(input_shape)
repeats = keras_layer.n
out_shape = [-1, repeats] + input_shape[1:]
out = _op.repeat(inexpr, repeats=repeats, axis=0)
out = _op.reshape(out, out_shape)
return out
def _convert_l2_normalize(inexpr, keras_layer, data_layout):
l2_normalize_is_loaded = False
param_list = []
for i in dis.get_instructions(keras_layer.function):
if i.opname in ["LOAD_GLOBAL", "LOAD_DEREF"]:
continue
if i.opname in ["LOAD_ATTR", "LOAD_METHOD"]:
if i.argval == "l2_normalize":
assert not l2_normalize_is_loaded, "l2_normalize was already LOADED"
l2_normalize_is_loaded = True
elif i.opname in ["LOAD_CONST", "LOAD_FAST"] and l2_normalize_is_loaded:
param_list.append(i.argval)
elif i.opname == "BUILD_LIST":
sz = i.argval
assert len(param_list) >= sz
new_list = param_list[-sz:]
param_list = param_list[:-sz]
param_list.append(new_list)
elif i.opname in ["CALL_FUNCTION_KW", "CALL_METHOD"]:
break
axis = None
is_param_list_parsed = False
if l2_normalize_is_loaded and len(param_list) > 0:
# last param_list item is tuple of strings means that
# lambda uses named parameters when calling l2_normalize
if (
isinstance(param_list[-1], tuple)
and len(param_list[-1]) > 0
and isinstance(param_list[-1][0], str)
):
param_names = param_list[-1]
if len(param_names) == 1 and param_names[0] == "x":
# lambda v: K.l2_normalize(x=v)
axis = None
is_param_list_parsed = True
elif len(param_names) == 1 and param_names[0] == "axis" and len(param_list) == 3:
# lambda x: K.l2_normalize(x, axis=(2,3))
axis = param_list[1]
is_param_list_parsed = True
elif len(param_names) == 2 and len(param_list) == 3:
# lambda x: K.l2_normalize(x=x, axis=(2,3))
# lambda x: K.l2_normalize(axis=(2,3), x=x)
axis = param_list[param_names.index("axis")]
is_param_list_parsed = True
else:
# lambda x: K.l2_normalize(x)
if len(param_list) == 1:
axis = None
is_param_list_parsed = True
# lambda x: K.l2_normalize(x, (2,3))
elif len(param_list) == 2:
axis = param_list[1]
is_param_list_parsed = True
def is_int_or_tuple_of_ints(v):
if isinstance(v, list) and len(v) > 0:
for i in v:
if not isinstance(i, int):
return False
return True
if isinstance(v, tuple) and len(v) > 0:
return isinstance(v[0], int)
return isinstance(v, int)
assert is_param_list_parsed and (
axis is None or is_int_or_tuple_of_ints(axis)
), "Can not parse l2_normalize lambda function found in Lambda layer"
if isinstance(axis, int):
axis = [axis]
if data_layout == "NCHW":
dims = len(keras_layer.input_shape)
def fix_axis_for_nchw(axis):
if axis == 0:
return 0
if axis in [(dims - 1), -1]:
return 1
return axis + 1
axis = [fix_axis_for_nchw(x) for x in axis]
return _op.nn.l2_normalize(inexpr, eps=1e-12, axis=axis)
def _convert_lambda(inexpr, keras_layer, _, data_layout):
fcode = keras_layer.function.__code__
# Convert l2_normalize
if (
fcode.co_name == "<lambda>"
and len(fcode.co_names) > 0
and fcode.co_names[-1] == "l2_normalize"
):
return _convert_l2_normalize(inexpr, keras_layer, data_layout)
raise tvm.error.OpNotImplemented(
"Function {} used in Lambda layer is not supported in frontend Keras.".format(
fcode.co_names
)
)
def _convert_time_distributed(inexpr, keras_layer, etab, data_layout, input_shape=None):
# TimeDistributed: split input tensor along the second dimension (assumed to be time),
# apply inner layer to each split individually,
# and then combine the results
if input_shape is None:
input_shape = keras_layer.input_shape
assert len(input_shape) >= 2, "Input to TimeDistributed must have at least two dimensions"
inner_layer = keras_layer.layer
inner_input_shape = [d for (i, d) in enumerate(input_shape) if i != 1]
# for NDHWC, inner data layout will drop the D
inner_data_layout = data_layout
if data_layout == "NDHWC":
inner_data_layout = "NHWC"
# some code duplication from keras_op_to_relay
# but it's useful to avoid cluttering the etab
inner_layer_op_name = type(keras_layer.layer).__name__
if inner_layer_op_name not in _convert_map:
raise tvm.error.OpNotImplemented(
"The inner layer for TimeDistributed {} is not supported for frontend Keras.".format(
inner_layer_op_name
)
)
conversion_func = lambda expr: _convert_map[inner_layer_op_name](
expr, inner_layer, etab, inner_data_layout, input_shape=inner_input_shape
)
split_dim = input_shape[1]
split_input = _op.split(inexpr, split_dim, 1)
split_shape = list(input_shape)
if split_shape[0] is None:
split_shape[0] = 1
split_shape[1] = 1
split_var = new_var(
"time_distributed_split",
type_annotation=TupleType(
[TensorType(split_shape, dtype="float32") for i in range(split_dim)]
),
)
# For each split, squeeze away the second dimension,
# apply the inner layer.
# Afterwards, combine the transformed splits back along
# the second dimension using stack
splits = [
conversion_func(_op.squeeze(_expr.TupleGetItem(split_var, i), axis=[1]))
for i in range(split_dim)
]
return _expr.Let(split_var, split_input.astuple(), _op.stack(splits, axis=1))
def _default_skip(inexpr, keras_layer, etab, data_layout): # pylint: disable=unused-argument
"""Layers that can be skipped because they are train time only."""
return inexpr
_convert_map = {
"Dense": _convert_dense,
"Activation": _convert_activation,
"Softmax": _convert_advanced_activation,
"ReLU": _convert_advanced_activation,
"LeakyReLU": _convert_advanced_activation,
"PReLU": _convert_advanced_activation,
"ELU": _convert_advanced_activation,
"ThresholdedReLU": _convert_advanced_activation,
"AveragePooling2D": _convert_pooling,
"MaxPooling2D": _convert_pooling,
"GlobalAveragePooling2D": _convert_pooling,
"GlobalMaxPooling2D": _convert_pooling,
"Conv2D": _convert_convolution,
"Conv2DTranspose": _convert_convolution,
"DepthwiseConv2D": _convert_convolution,
"SeparableConv2D": _convert_separable_convolution,
"Flatten": _convert_flatten,
"Reshape": _convert_reshape,
"Concatenate": _convert_concat,
"BatchNormalization": _convert_batchnorm,
# Specific tf.Keras terminology for batch normalization
"BatchNormalizationV1": _convert_batchnorm,
"Add": _convert_merge,
"Subtract": _convert_merge,
"Multiply": _convert_merge,
"ZeroPadding2D": _convert_padding,
"UpSampling2D": _convert_upsample,
"Cropping2D": _convert_cropping,
# 'ZeroPadding1D' : _convert_padding,
# 'AveragePooling1D' : _convert_pooling,
# 'MaxPooling1D' : _convert_pooling,
# 'GlobalAveragePooling1D' : _convert_pooling,
# 'GlobalMaxPooling1D' : _convert_pooling,
# 'Cropping1D' : _convert_cropping,
# 'UpSampling1D' : _convert_upsample,
"Conv1D": _convert_convolution1d,
# "Conv1DTranspose": _convert_convolution1d,
"Conv3D": _convert_convolution3d,
"Conv3DTranspose": _convert_convolution3d,
# 'SeparableConv3D' : _convert_convolution3d,
"MaxPooling3D": _convert_pooling3d,
"AveragePooling3D": _convert_pooling3d,
"GlobalMaxPooling3D": _convert_global_pooling3d,
"GlobalAveragePooling3D": _convert_global_pooling3d,
"UpSampling3D": _convert_upsample3d,
"ZeroPadding3D": _convert_padding3d,
"SimpleRNN": _convert_simple_rnn,
"LSTM": _convert_lstm,
"GRU": _convert_gru,
# 'Bidirectional' : _convert_bidirectional,
"TimeDistributed": _convert_time_distributed,
"Average": _convert_merge,
"Minimum": _convert_merge,
"Maximum": _convert_merge,
"Dot": _convert_merge,
"Permute": _convert_permute,
"Embedding": _convert_embedding,
"RepeatVector": _convert_repeat_vector,
"Lambda": _convert_lambda,
"InputLayer": _default_skip,
"Dropout": _default_skip,
"AlphaDropout": _default_skip,
"SpatialDropout2D": _default_skip,
"SpatialDropout1D": _default_skip,
"GaussianDropout": _default_skip,
"GaussianNoise": _default_skip,
}
def _check_unsupported_layers(model):
missing_ops = set()
for layer in model.layers:
op_name = type(layer).__name__
if op_name not in _convert_map:
missing_ops.add(op_name)
if missing_ops:
raise NotImplementedError(
"The following operators are not implemented: {}".format(missing_ops)
)
def keras_op_to_relay(inexpr, keras_layer, outname, etab, data_layout):
"""Convert a Keras layer to a Relay expression and update the expression table.
Parameters
----------
inexpr : relay.expr.Expr or a list of it
The input Relay expression(s).
keras_layer : keras.layers
The Keras layer to be converted.
outname : str
Name of the output Relay expression.
etab : relay.frontend.common.ExprTable
The global expression table to be updated.
data_layout : str
The input data layout
"""
op_name = type(keras_layer).__name__
if op_name not in _convert_map:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend Keras.".format(op_name)
)
outs = _convert_map[op_name](inexpr, keras_layer, etab, data_layout)
outs = _as_list(outs)
for t_idx, out in enumerate(outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
return outs
def from_keras(model, shape=None, layout="NCHW"):
"""Convert keras model to relay Function.
Parameters
----------
model : keras.engine.training.Model or tensorflow.keras.models.Model
The keras model to be converted.
shape: dict of str to int list/tuple
Input shapes of the model, optional
layout: str
One of 'NCHW' or 'NHWC', indicates how data should be arranged in
the output model. Default layout is 'NCHW' as it in general
performs better across TVM.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by Relay.
"""
def _check_model_is_tf_keras():
return type(model).__module__.startswith("tensorflow.python.keras")
def _convert_input_layer(keras_layer):
input_name = keras_layer.name
input_shape = shape[input_name] if shape is not None and input_name in shape else None
etab.set_expr(input_name, new_var(input_name, shape=input_shape))
def _convert_layer(keras_layer, etab, scope=""):
inbound_nodes = (
keras_layer.inbound_nodes
if hasattr(keras_layer, "inbound_nodes")
else keras_layer._inbound_nodes
if hasattr(keras_layer, "_inbound_nodes")
else None
)
if inbound_nodes is None:
raise TypeError(
"Unknown layer type or unsupported Keras version : {}".format(keras_layer)
)
outs = []
for node_idx, node in enumerate(inbound_nodes):
# If some nodes in imported model are not relevant to the current model,
# skip such layers.
# - In Keras, model._network_nodes contains keys of all nodes relevant to the
# current model;
# - In tf.Keras, this is already done as part of tensorflow.keras.network.get_config
if not is_tf_keras:
if (
hasattr(model, "_node_key")
and not model._node_key(keras_layer, node_idx) in model._network_nodes
):
continue
inexpr = []
# Since Keras allows creating multiple layers from the same name instance,
# we append node index to the expr name to make it unique.
# The one exception is InputLayer. Changing input variable names after conversion
# would confuse users, so we should keep them as far as possible. Fortunately,
# they are named uniquely to input_1, input_2, input_3... by default.
# node_indices attribute removed in tensorflow 2.3, however iterate_inbound() can
# be used
if hasattr(node, "node_indices"):
zip_node = zip(
_as_list(node.inbound_layers),
_as_list(node.node_indices),
_as_list(node.tensor_indices),
_as_list(node.input_tensors),
)
node_attributes = zip_node
else:
node_attributes = node.iterate_inbound()
for inbound_layer, n_idx, t_idx, _ in node_attributes:
if isinstance(inbound_layer, input_layer_class):
expr_name = inbound_layer.name
_convert_input_layer(inbound_layer)
else:
expr_name = scope + inbound_layer.name + ":" + str(n_idx) + ":" + str(t_idx)
expr = etab.get_expr(expr_name)
inexpr.append(expr)
# Handle nested layers
if hasattr(keras_layer, "layers"):
input_index = 0
for layer in keras_layer.layers:
if isinstance(layer, input_layer_class):
# Replace input layer with inbound node
etab.set_expr(layer.name, inexpr[input_index])
input_index += 1
else:
# Convert child layer. Prepend scope with parent layer name.
layer_outs = _convert_layer(layer, etab, keras_layer.name + "_" + scope)
# Get output of last child layer and mark as output of parent.
outname = keras_layer.name + ":" + str(node_idx)
for t_idx, out in enumerate(layer_outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
outs.extend(layer_outs)
else:
if len(inexpr) == 1:
inexpr = inexpr[0]
outs.extend(
keras_op_to_relay(
inexpr,
keras_layer,
scope + keras_layer.name + ":" + str(node_idx),
etab,
layout,
)
)
return outs
is_tf_keras = _check_model_is_tf_keras()
if not is_tf_keras:
# Importing from Keras
try:
import keras
except ImportError:
raise ImportError("Keras must be installed")
if keras.backend.backend() != "tensorflow":
raise ValueError("Keras frontend currently supports tensorflow backend only.")
if keras.backend.image_data_format() != "channels_last":
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
expected_model_class = keras.engine.training.Model
if hasattr(keras.engine, "InputLayer"):
input_layer_class = keras.engine.InputLayer
else:
# TFlite >=2.6
input_layer_class = keras.engine.input_layer.InputLayer
else:
# Importing from Tensorflow Keras (tf.keras)
try:
from tensorflow import keras as tf_keras
except ImportError:
raise ImportError("Tensorflow must be installed")
expected_model_class = tf_keras.models.Model
input_layer_class = tf_keras.layers.InputLayer
assert isinstance(model, expected_model_class)
etab = ExprTable()
# Set global data format.
assert layout in [
"NWC",
"NCHW",
"NHWC",
"NDHWC",
], "Layout must be one of 'NWC', 'NCHW', NHWC or NDHWC"
for keras_layer in model.layers:
if isinstance(keras_layer, input_layer_class):
_convert_input_layer(keras_layer)
else:
_convert_layer(keras_layer, etab)
# model._output_coordinates contains out_node(oc[0]), node_index(oc[1]) and tensor_index(oc[2])
# Get all output nodes in etab using the name made from above values.
# The out exprs were added to etab in keras_op_to_relay using this name.
outexpr = [
etab.get_expr(oc[0].name + ":" + str(oc[1]) + ":" + str(oc[2]))
for oc in model._output_coordinates
]
outexpr = outexpr[0] if len(outexpr) == 1 else _expr.Tuple(outexpr)
func = _function.Function(analysis.free_vars(outexpr), outexpr)
params = {k: _nd.array(np.array(v, dtype=np.float32)) for k, v in etab.params.items()}
return IRModule.from_expr(func), params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, no-else-return, too-many-lines
"""MXNet symbol frontend."""
import json
import math
import numpy as np
import tvm
from tvm import relay
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import scope_builder as _scope_builder
from .common import StrAttrsDict
from .common import get_name as _get_name
from .common import infer_shape as _infer_shape
from .common import infer_type as _infer_type
from .common import infer_value as _infer_value
from .mxnet_qnn_op_utils import (
dequantize_mxnet_min_max,
get_conv_mkldnn_requantized_scale_outDtype,
get_mkldnn_int8_scale,
get_mkldnn_requantize_scale_outDtype,
get_mkldnn_uint8_scale,
quantize_conv_bias_mkldnn_from_var,
quantize_conv_weights_bias_channel_mkldnn_from_var,
quantize_mxnet_min_max,
)
from .nnvm_common import (
_arg_reduce,
_binop_scalar,
_cast,
_clip,
_elemwise_sum,
_init_op,
_rbinop_scalar,
_reduce,
_rename,
_reshape,
_softmax_op,
_transpose,
_upsampling,
_warn_not_used,
)
__all__ = ["from_mxnet"]
_activation_map = {"sigmoid": _op.sigmoid, "tanh": _op.tanh, "relu": _op.nn.relu}
def _mx_fully_connected(inputs, attrs):
import mxnet as mx # pylint: disable=import-outside-toplevel
units = attrs.get_int("num_hidden")
use_bias = not attrs.get_bool("no_bias", False)
try:
_ = mx.sym.FullyConnected(mx.sym.var("x"), num_hidden=1, flatten=True)
has_flatten = True
except mx.base.MXNetError:
# no flatten attribute in old mxnet
has_flatten = False
use_flatten = attrs.get_bool("flatten", True)
if has_flatten and use_flatten:
inputs[0] = _op.nn.batch_flatten(inputs[0])
data_shape = _infer_type(inputs[0]).checked_type.shape
if len(data_shape) > 2:
inputs[0] = _op.reverse_reshape(inputs[0], [-1, 0])
res = _op.nn.dense(inputs[0], inputs[1], units=units)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=-1)
if len(data_shape) > 2:
new_shape = data_shape[:-1]
new_shape.append(units)
res = _op.reshape(res, new_shape)
return res
def _get_channel_axis(layout, op_name):
if layout in ["NCHW", "NCDHW"]:
return 1
if layout == "NHWC":
return 3
if layout == "NDHWC":
return 4
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "layout" of operator {} is not valid.'.format(layout, op_name)
)
def _mx_activations(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 1
if act_type == "softrelu":
def _stable_softrelu(x):
# log(1 + exp(-abs(x))) + relu(x)
one = _expr.const(1, dtype="float32")
exp_neg_abs_x = _op.exp(_op.negative(_op.abs(x)))
return _op.add(_op.log(_op.add(one, exp_neg_abs_x)), _op.nn.relu(x))
return _stable_softrelu(inputs[0])
if act_type not in _activation_map:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend MXNet.".format(act_type)
)
return _activation_map[act_type](inputs[0])
def _mx_compare(new_op, wrapper):
def impl(inputs, attrs):
expr = _infer_type(inputs[0])
dtype = expr.checked_type.dtype
return wrapper(new_op)(inputs, attrs).astype(dtype)
return impl
def _mx_unravel_index(inputs, attrs):
assert len(inputs) == 1
shape = attrs.get_int_tuple("shape")
shape_expr = _expr.const(list(shape))
return _op.unravel_index(inputs[0], shape_expr)
def _mx_swap_axis(inputs, attrs):
assert len(inputs) == 1
dim1 = attrs.get_int("dim1")
dim2 = attrs.get_int("dim2")
shape = _infer_type(inputs[0]).checked_type.shape
axes = list(range(len(shape)))
axes[dim1] = dim2
axes[dim2] = dim1
return _op.transpose(inputs[0], axes=axes)
def _mx_zeros(inputs, attrs):
assert len(inputs) == 0
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
if 0 in shape:
return None
return _op.zeros(shape=shape, dtype=dtype)
def _mx_conv(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) == 3:
return _mx_conv3d(inputs, attrs)
elif len(kernel_size) == 2:
return _mx_conv2d(inputs, attrs)
elif len(kernel_size) == 1:
return _mx_conv1d(inputs, attrs)
else:
raise tvm.error.OpAttributeInvalid(
"1D, 2D or 3D kernels only are supported for operator Convolution"
)
def _mx_conv1d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 1:
raise tvm.error.OpAttributeInvalid(
"Non 1D or 2D kernels are not supported for operator Convolution"
)
data_layout = attrs.get_str("layout", "NCW")
# MXNet Conv1D only supports ‘NCW’ layout for now.
if data_layout != "NCW":
raise tvm.error.OpAttributeInvalid('Only "NCW" data layout is supported for 1D Convolution')
data_layout = "NCHW"
channel_axis = 1
kernel_layout = "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = (1,) + kernel_size
new_attrs["strides"] = (1,) + attrs.get_int_tuple("stride", (1,))
new_attrs["padding"] = (0,) + attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = (1,) + attrs.get_int_tuple("dilate", (1,))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", False)
data = _op.expand_dims(inputs[0], axis=2)
kernel = _op.expand_dims(inputs[1], axis=2)
res = _op.nn.conv2d(data, kernel, **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
res = _op.squeeze(res, axis=[2])
return res
def _get_mx_conv2d_attrs(attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCHW")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "OIHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
return new_attrs
def _mx_conv2d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCHW")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid("Only 2D kernels are supported for operator Convolution")
new_attrs = _get_mx_conv2d_attrs(attrs)
channel_axis = _get_channel_axis(data_layout, "conv2d")
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv2d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _get_mx_conv3d_attrs(attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCDHW")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "DHWIO" if data_layout == "NDHWC" else "OIDHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
return new_attrs
def _mx_conv3d(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
data_layout = attrs.get_str("layout", "NCDHW")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid("Only 3D kernels are supported for operator Convolution")
new_attrs = _get_mx_conv3d_attrs(attrs)
channel_axis = _get_channel_axis(data_layout, "conv3d")
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.conv3d(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv_transpose(inputs, attrs):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) == 3:
return _mx_conv3d_transpose(inputs, attrs)
elif len(kernel_size) == 2:
return _mx_conv2d_transpose(inputs, attrs)
elif len(kernel_size) == 1:
return _mx_conv1d_transpose(inputs, attrs)
else:
raise tvm.error.OpAttributeInvalid(
"1D, 2D or 3D kernels only are supported for operator Convolution"
)
def _mx_conv1d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.'
)
data_layout = attrs.get_str("layout", "NCW")
if data_layout != "NCW":
raise tvm.error.OpAttributeInvalid('Only "NCW" data layout is supported for 1D Convolution')
channel_axis = 1
kernel_layout = "OIW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride", (1,))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0,))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0,))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1,))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv1d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv2d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv2D-transpose.'
)
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid(
"Non-2D kernels are not supported for operator Conv2D-transpose."
)
data_layout = attrs.get_str("layout", "NCHW")
channel_axis = _get_channel_axis(data_layout, "conv2d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "HWIO" if data_layout == "NHWC" else "IOHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv2d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_conv3d_transpose(inputs, attrs):
if "target_shape" in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "target_shape" is not supported for operator Conv3D-transpose.'
)
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid(
"Non-3D kernels are not supported for operator Conv3D-transpose."
)
data_layout = attrs.get_str("layout", "NCDHW")
channel_axis = _get_channel_axis(data_layout, "conv3d_transpose")
if "kernel_layout" in attrs.attrs:
kernel_layout = attrs.get_str("kernel_layout")
else:
kernel_layout = "DHWIO" if data_layout == "NDHWC" else "OIDHW"
new_attrs = {}
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["kernel_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["output_padding"] = attrs.get_int_tuple("adj", (0, 0, 0))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["dilation"] = attrs.get_int_tuple("dilate", (1, 1, 1))
new_attrs["groups"] = attrs.get_int("num_group", 1)
new_attrs["data_layout"] = data_layout
new_attrs["kernel_layout"] = kernel_layout
use_bias = not attrs.get_bool("no_bias", True)
res = _op.nn.conv3d_transpose(inputs[0], inputs[1], **new_attrs)
if use_bias:
assert len(inputs) == 3
res = _op.nn.bias_add(res, inputs[2], axis=channel_axis)
return res
def _mx_pooling(inputs, attrs):
global_pool = attrs.get_bool("global_pool", False)
pool_type = attrs.get_str("pool_type")
def _pool2d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 2:
raise tvm.error.OpAttributeInvalid("Only 2D kernels are supported for operator Pool2D.")
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0))
new_attrs["ceil_mode"] = attrs.get_str("pooling_convention", "valid") == "full"
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
def _pool3d(new_op, is_avg):
kernel_size = attrs.get_int_tuple("kernel")
if len(kernel_size) != 3:
raise tvm.error.OpAttributeInvalid("Only 3D kernels are supported for operator Pool3D.")
new_attrs = {}
new_attrs["pool_size"] = kernel_size
new_attrs["strides"] = attrs.get_int_tuple("stride", (1, 1, 1))
new_attrs["padding"] = attrs.get_int_tuple("pad", (0, 0, 0))
new_attrs["ceil_mode"] = attrs.get_str("pooling_convention", "valid") == "full"
if is_avg:
new_attrs["count_include_pad"] = attrs.get_bool("count_include_pad", True)
return new_op(inputs[0], **new_attrs)
# 3D pooling
if len(_infer_shape(inputs[0])) == 5:
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool3d(inputs[0])
return _pool3d(_op.nn.max_pool3d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool3d(inputs[0])
return _pool3d(_op.nn.avg_pool3d, True)
raise tvm.error.OpNotImplemented(
"Operator {} Pooling is not supported for frontend MXNet.".format(
pool_type.capitalize()
)
)
# 2D Pooling
if pool_type == "max":
if global_pool:
return _op.nn.global_max_pool2d(inputs[0])
return _pool2d(_op.nn.max_pool2d, False)
if pool_type == "avg":
if global_pool:
return _op.nn.global_avg_pool2d(inputs[0])
return _pool2d(_op.nn.avg_pool2d, True)
raise tvm.error.OpNotImplemented(
"Operator {} Pooling is not supported for frontend MXNet.".format(pool_type.capitalize())
)
def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs):
rate = attrs.get_float("p", 0.5)
return _op.nn.dropout(inputs[0], rate=rate)
def _mx_BlockGrad(inputs, attrs): # pylint: disable=unused-argument
return inputs
def _mx_batch_norm(inputs, attrs):
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnImplemented(
'Attribute "output_mean_var" is not supported for operator Batch Norm.'
)
if attrs.get_bool("use_global_stats", False):
_warn_not_used("use_global_stats", "batch_norm")
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 0.001)
new_attrs["center"] = True
new_attrs["scale"] = not attrs.get_bool("fix_gamma", True)
return _op.nn.batch_norm(*inputs, **new_attrs)
def _mx_instance_norm(inputs, attrs):
assert len(inputs) == 3
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.instance_norm(*inputs, **new_attrs)
def _mx_layer_norm(inputs, attrs):
assert len(inputs) == 3
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Layer Norm.'
)
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.layer_norm(*inputs, **new_attrs)
def _mx_group_norm(inputs, attrs):
assert len(inputs) == 3
if attrs.get_bool("output_mean_var", False):
raise tvm.error.OpAttributeUnimplemented(
'Attribute "output_mean_var" is not supported for operator Group Norm.'
)
new_attrs = {}
new_attrs["axis"] = 1
new_attrs["num_groups"] = attrs.get_int("num_groups", 1)
new_attrs["epsilon"] = attrs.get_float("eps", 1e-5)
return _op.nn.group_norm(*inputs, **new_attrs)
def _mx_slice(inputs, attrs):
new_attrs = {}
begin = list(attrs.get_int_tuple("begin", None))
end = list(attrs.get_int_tuple("end", None))
stride = attrs.get_int_tuple("step", None)
input_shape = _infer_type(inputs[0]).checked_type.shape
if begin is None:
raise tvm.error.OpAttributeRequired('Attribute "begin" not found in operator Slice.')
if end is None:
raise tvm.error.OpAttributeRequired('Attribute "end" not found in operator Slice.')
begin = (x if x is not None else 0 for x in begin)
for i, ed in enumerate(end):
if ed is None:
end[i] = input_shape[i]
new_attrs = {"begin": list(begin), "end": list(end)}
if stride is not None:
stride = (x if x is not None else 1 for x in stride)
new_attrs["strides"] = list(stride)
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_slice_like(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["axes"] = attrs.get_int_tuple("axes", None)
return _op.slice_like(*inputs, **new_attrs)
def _mx_slice_axis(inputs, attrs):
assert len(inputs) == 1
expr = _infer_type(inputs[0])
shape = expr.checked_type.shape
axis = attrs.get_int("axis")
ax_beg = attrs.get_int("begin")
ax_end = attrs.get_str("end")
if axis < 0:
axis += len(shape)
assert 0 <= axis < len(shape)
if ax_end == "None":
ax_end = int(shape[axis])
else:
ax_end = int(ax_end)
if ax_beg < 0:
ax_beg += int(shape[axis])
if ax_end < 0:
ax_end += int(shape[axis])
assert 0 <= ax_beg < int(shape[axis])
assert ax_beg < ax_end <= int(shape[axis])
begin = []
end = []
for i, dim in enumerate(shape):
if i != axis:
begin.append(0)
end.append(dim)
else:
begin.append(ax_beg)
end.append(ax_end)
return _op.strided_slice(inputs[0], begin, end)
def _mx_crop_like(inputs, attrs):
if len(inputs) < 2:
raise tvm.error.OpAttributeUnimplemented(
"Only support crop_like pattern for operator Crop."
)
if attrs.get_bool("center_crop", False):
raise tvm.error.OpAttributeUnimplemented("Center crop is not supported in operator Crop.")
if attrs.get_int_tuple("h_w", (0, 0)) != (0, 0):
raise tvm.error.OpAttributeUnimplemented("Doesn't support h_w in operator Crop.")
offset = attrs.get_int_tuple("offset", (0, 0))
new_attrs = {}
if offset == (0, 0):
new_attrs["axes"] = (2, 3)
return _op.slice_like(*inputs, **new_attrs)
expr = _infer_type(inputs[1])
like_shape = expr.checked_type.shape
new_attrs["begin"] = [0, 0, offset[0], offset[1]]
new_attrs["end"] = [
like_shape[0],
like_shape[1],
offset[0] + like_shape[2],
offset[1] + like_shape[3],
]
return _op.strided_slice(inputs[0], **new_attrs)
def _mx_split(inputs, attrs):
axis = attrs.get_int("axis", 1)
new_attrs = {}
new_attrs["indices_or_sections"] = attrs.get_int("num_outputs")
new_attrs["axis"] = axis
res = _op.split(inputs[0], **new_attrs)
if attrs.get_bool("squeeze_axis", False):
return tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_softmax_activation(inputs, attrs):
mode = attrs.get_str("mode", "instance")
axis = 0 if mode == "instance" else 1
return _op.nn.softmax(inputs[0], axis=axis)
def _mx_softmax_output(inputs, attrs):
if attrs.get_bool("multi_output", False):
return _op.nn.softmax(inputs[0], axis=1)
return _op.nn.softmax(inputs[0])
def _mx_linear_regression_output(inputs, _):
return inputs[0]
def _mx_logistic_regression_output(inputs, _):
return _op.sigmoid(inputs[0])
def _mx_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
return _op.concatenate(tuple(inputs), axis=axis)
def _mx_stack(inputs, attrs):
axis = attrs.get_int("axis", 0)
return _op.stack(tuple(inputs), axis=axis)
def _mx_expand_dims(inputs, attrs):
axis = attrs.get_int("axis")
return _op.expand_dims(inputs[0], axis=axis)
def _mx_pad(inputs, attrs):
pad_mode = attrs.get_str("mode", None)
if pad_mode is None:
raise tvm.error.OpAttributeRequired('Attribute "mode" not found in operator pad.')
if pad_mode not in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid("Value " + mode + ' in attribute "mode" is not valid')
pad_width = attrs.get_int_tuple("pad_width", None)
if pad_width is None:
raise tvm.error.OpAttributeRequired('Attribute "pad_width" not found in operator pad.')
if None in pad_width:
raise tvm.error.OpAttributeInvalid(
'Value None in attribute "pad_width" of operator Slice is not valid.'
)
constant_value = attrs.get_float("constant_value", 0.0)
padding = tuple(tuple((b, a)) for b, a in zip(pad_width[::2], pad_width[1::2]))
return _op.nn.pad(
data=inputs[0], pad_width=padding, pad_value=constant_value, pad_mode=pad_mode
)
def _mx_leaky_relu(inputs, attrs):
act_type = attrs.get_str("act_type", "leaky")
if act_type == "leaky":
return _op.nn.leaky_relu(inputs[0], alpha=attrs.get_float("slope", 0.25))
if act_type == "prelu":
assert len(inputs) == 2
return _op.nn.prelu(*inputs)
if act_type == "elu":
# -slope * relu(1-exp(x)) + relu(x)
slope = attrs.get_float("slope", 0.25)
one = _expr.const(1, dtype="float32")
x = inputs[0]
mslope = _op.nn.relu(_op.subtract(one, _op.exp(x)))
mslope = _op.multiply(mslope, _expr.const(-slope, dtype="float32"))
return _op.add(mslope, _op.nn.relu(x))
if act_type == "rrelu":
# NOTE this is only converted for inference.
lower_bound = attrs.get_float("lower_bound")
upper_bound = attrs.get_float("upper_bound")
alpha = (lower_bound + upper_bound) / 2.0
return _op.nn.leaky_relu(inputs[0], alpha=alpha)
if act_type == "gelu":
# 0.5 * x * (1 + erf(x / sqrt(2)))
sqrt2 = _expr.const(math.sqrt(2), dtype="float32")
erf = _op.erf(_op.divide(inputs[0], sqrt2))
one = _expr.const(1, dtype="float32")
erf_plus_one = _op.add(one, erf)
half = _expr.const(0.5, dtype="float32")
half_x = _op.multiply(inputs[0], half)
return _op.multiply(half_x, erf_plus_one)
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend MXNet.".format(act_type)
)
def _mx_make_power(power):
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _expr.const(power, dtype=None)
# Note: int maps to "int32", float maps to "float32"
return _op.power(inputs[0], scalar)
return _impl
def _mx_make_exponent(base):
# exp(b, x) = e^b * e^x
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.exp(_expr.const(base, dtype="float32"))
return _op.multiply(inputs[0], scalar)
return _impl
def _mx_make_logarithm(base):
# log(b, x) = log(x) / log(b)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
scalar = _op.log(_expr.const(base, dtype="float32"))
return _op.divide(inputs[0], scalar)
return _impl
def _mx_expm1():
# exp_minus_1 x = exp(x) - 1
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.subtract(inputs[0], one))
return _impl
def _mx_log1p():
# 1_plus_log x = log(x + 1)
def _impl(inputs, _): # Note: no attrs
assert len(inputs) == 1
one = _expr.const(1, dtype="float32")
return _op.log(_op.add(inputs[0], one))
return _impl
def _mx_lrn(inputs, attrs):
new_attrs = {}
new_attrs["alpha"] = attrs.get_float("alpha", 0.0001)
new_attrs["beta"] = attrs.get_float("beta", 0.75)
new_attrs["bias"] = attrs.get_float("knorm", 2)
# NCHW format and normalization along channel axis
new_attrs["axis"] = 1
new_attrs["size"] = attrs.get_int("nsize")
assert len(inputs) == 1
return _op.nn.lrn(inputs[0], **new_attrs)
def _mx_multibox_prior(inputs, attrs):
new_attrs = {}
new_attrs["sizes"] = attrs.get_float_tuple("sizes", (1.0,))
new_attrs["steps"] = attrs.get_float_tuple("steps", (-1.0, -1.0))
new_attrs["offsets"] = attrs.get_float_tuple("offsets", (0.5, 0.5))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (1.0,))
new_attrs["clip"] = attrs.get_bool("clip", False)
return _op.vision.multibox_prior(inputs[0], **new_attrs)
def _mx_multibox_detection(inputs, attrs):
new_attrs0 = {}
new_attrs0["clip"] = attrs.get_bool("clip", True)
new_attrs0["threshold"] = attrs.get_float("threshold", 0.01)
new_attrs0["variances"] = attrs.get_float_tuple("variances", (0.1, 0.1, 0.2, 0.2))
new_attrs1 = {}
new_attrs1["return_indices"] = False
new_attrs1["iou_threshold"] = attrs.get_float("nms_threshold", 0.5)
new_attrs1["force_suppress"] = attrs.get_bool("force_suppress", False)
new_attrs1["top_k"] = attrs.get_int("nms_topk", -1)
ret = _op.vision.multibox_transform_loc(inputs[0], inputs[1], inputs[2], **new_attrs0)
return _op.vision.non_max_suppression(ret[0], ret[1], ret[1], **new_attrs1)
def _mx_dot(inputs, attrs):
assert len(inputs) == 2
a, b = inputs
rank_a = len(_infer_type(a).checked_type.shape)
rank_b = len(_infer_type(b).checked_type.shape)
if rank_a != 2 or rank_b != 2:
raise tvm.error.OpAttributeUnimplemented("Only 2-D arrays are supported.")
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = 'Value {} in attribute "transpose_a" of operator dot ' "is not valid."
raise tvm.error.OpAttributeInvalid(msg.format(transpose_a))
if transpose_b is False:
b = _op.transpose(b, axes=[1, 0])
return _op.nn.dense(a, b)
def _mx_batch_dot(inputs, attrs):
assert len(inputs) == 2
a, b = inputs
a_shape = _infer_type(a).checked_type.shape
batch_shapes = None
if len(a_shape) > 3:
batch_shapes = a_shape[:-2]
a = _op.reverse_reshape(a, newshape=(-1, 0, 0))
b_shape = _infer_type(b).checked_type.shape
if len(b_shape) > 3:
if batch_shapes is None:
batch_shapes = b_shape[:-2]
b = _op.reverse_reshape(b, newshape=(-1, 0, 0))
transpose_a = attrs.get_bool("transpose_a", False)
transpose_b = attrs.get_bool("transpose_b", False)
if transpose_a is True:
msg = 'Value {} in attribute "transpose_a" of operator batch_dot ' "is not valid."
raise tvm.error.OpAttributeInvalid(msg.format(transpose_a))
if transpose_b is False:
b = _op.transpose(b, axes=[0, 2, 1])
out = _op.nn.batch_matmul(a, b)
if batch_shapes is not None:
out = _op.reverse_reshape(out, newshape=tuple(batch_shapes) + (0, 0))
return out
def _mx_arange(inputs, attrs):
assert len(inputs) == 0
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange.'
)
dtype = attrs.get_str("dtype", "float32")
stop = attrs.get_str("stop", "None")
if stop == "None":
stop = None
else:
stop = _expr.const(float(stop), dtype=dtype)
new_attrs = {}
new_attrs["start"] = _expr.const(attrs.get_float("start", 0.0), dtype=dtype)
new_attrs["stop"] = stop
new_attrs["step"] = _expr.const(attrs.get_float("step", 1.0), dtype=dtype)
new_attrs["dtype"] = dtype
return _op.arange(**new_attrs)
# pylint: disable=unused-argument
def _mx_make_loss(inputs, attrs):
# while doing inference make_loss does not have any effect
# and it should be mapped to identity
return inputs[0]
def _mx_contrib_arange_like(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("repeat", 1) != 1:
raise tvm.error.OpAttributeUnimplemented(
'Attribute "repeat" is not supported in operator arange_like.'
)
ty = _infer_type(inputs[0]).checked_type
assert ty
shape, dtype = get_const_tuple(ty.shape), ty.dtype
axis = attrs.get_int("axis", None)
if axis is None:
n_elems = 1
for dim in shape:
if not isinstance(dim, int):
raise tvm.error.OpError("Don't support arange_like with symbolic input shape.")
n_elems *= dim
else:
axis = axis + len(shape) if axis < 0 else axis
assert 0 <= axis < len(shape)
n_elems = shape[axis]
if not isinstance(n_elems, int):
raise tvm.error.OpError("Don't support arange_like with symbolic input shape.")
shape = (n_elems,)
start = attrs.get_float("start", 0.0)
step = attrs.get_float("step", 1.0)
stop = start + step * n_elems
new_attrs = {}
new_attrs["start"] = _expr.const(start, dtype=dtype)
new_attrs["stop"] = _expr.const(stop, dtype=dtype)
new_attrs["step"] = _expr.const(step, dtype=dtype)
new_attrs["dtype"] = dtype
ret = _op.arange(**new_attrs)
if len(shape) > 1:
ret = _op.reshape(ret, shape)
return ret
def _mx_repeat(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["repeats"] = attrs.get_int("repeats")
new_attrs["axis"] = attrs.get_int("axis", 0)
return _op.repeat(inputs[0], **new_attrs)
def _mx_tile(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["reps"] = attrs.get_int_tuple("reps")
return _op.tile(inputs[0], **new_attrs)
def _mx_take(inputs, attrs):
assert len(inputs) == 2
mode = attrs.get_str("mode", "clip")
if mode == "raise":
raise tvm.error.OpAttributeUnimplemented("take with raise mode is not supported yet")
axis = attrs.get_int("axis", 0)
return _op.take(inputs[0], inputs[1].astype("int32"), axis=axis, mode=mode)
def _mx_gather_nd(inputs, attrs):
assert len(inputs) == 2
assert len(_infer_shape(inputs[1])) > 1, "index tensor to have at least 2 dimensions"
return _op.gather_nd(inputs[0], inputs[1])
def _mx_reverse(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
def _mx_sequence_reverse(inputs, attrs):
new_attrs = {}
use_seq_lengths = attrs.get_bool("use_sequence_length")
if not use_seq_lengths:
assert len(inputs) == 1
new_attrs["axis"] = attrs.get_int("axis")
return _op.reverse(inputs[0], **new_attrs)
assert len(inputs) == 2
new_attrs["seq_axis"] = attrs.get_int("axis")
# MXNet assumes batch_axis as 1.
new_attrs["batch_axis"] = 1
return _op.reverse_sequence(inputs[0], inputs[1], **new_attrs)
def _mx_roi_align(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["sample_ratio"] = attrs.get_int("sample_ratio", -1)
new_attrs["layout"] = "NCHW"
return _op.vision.roi_align(inputs[0], inputs[1], **new_attrs)
def _mx_resize(inputs, attrs):
scale_height = attrs.get_float("scale_height", None)
scale_width = attrs.get_float("scale_width", None)
height = attrs.get_int("height", 1)
width = attrs.get_int("width", 1)
expr = _infer_type(inputs[0])
shape = expr.checked_type.shape
if scale_height is not None:
height = (scale_height * shape[2]).astype("int32")
if scale_width is not None:
width = (scale_width * shape[3]).astype("int32")
size = (height, width)
return _op.image.resize2d(inputs[0], size, coordinate_transformation_mode="align_corners")
def _mx_amp_multicast(inputs, attrs):
cast_narrow = attrs.get_bool("cast_narrow", False)
dtypes = [_infer_type(x).checked_type.dtype for x in inputs]
supported_dtypes = ["float16", "float32"]
assert all(
[x in supported_dtypes for x in dtypes]
), "amp_multicast support is limited to float16 and float32 inputs only."
has_float16 = any(x == "float16" for x in dtypes)
has_float32 = any(x == "float32" for x in dtypes)
dtype = dtypes[0]
if cast_narrow and has_float16:
dtype = "float16"
if not cast_narrow and has_float32:
dtype = "float32"
return [_op.cast(x, dtype) for x in inputs]
def _mx_grid_generator(inputs, attrs):
transform_type = attrs.get_str("transform_type")
if transform_type == "affine":
target_shape = attrs.get_int_tuple("target_shape")
return _op.image.affine_grid(_op.reshape(inputs[0], (0, 2, 3)), target_shape)
if transform_type == "warp":
checked_type = _infer_type(inputs[0]).checked_type
batch, _, height, width = get_const_tuple(checked_type.shape)
dtype = checked_type.dtype
identity_affine = relay.const(np.array([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]], dtype=dtype))
identity_affine = _op.broadcast_to(identity_affine, (batch, 2, 3))
normalizer = (2.0 / np.array([width - 1, height - 1])).reshape(1, -1, 1, 1).astype(dtype)
normalized_flow = inputs[0] * relay.const(normalizer)
grid = _op.image.affine_grid(identity_affine, (height, width))
return grid + normalized_flow
raise ValueError("unknown transform type" + transform_type)
def _mx_bilinear_sampler(inputs, attrs):
return _op.image.grid_sample(inputs[0], inputs[1], "bilinear", "NCHW")
def _mx_roi_pooling(inputs, attrs):
new_attrs = {}
new_attrs["pooled_size"] = attrs.get_int_tuple("pooled_size")
new_attrs["spatial_scale"] = attrs.get_float("spatial_scale")
new_attrs["layout"] = "NCHW"
return _op.vision.roi_pool(inputs[0], inputs[1], **new_attrs)
def _mx_proposal(inputs, attrs):
new_attrs = {}
new_attrs["scales"] = attrs.get_float_tuple("scales", (4.0, 8.0, 16.0, 32.0))
new_attrs["ratios"] = attrs.get_float_tuple("ratios", (0.5, 1.0, 2.0))
new_attrs["feature_stride"] = attrs.get_int("feature_stride", 16)
new_attrs["threshold"] = attrs.get_float("threshold", 0.7)
new_attrs["rpn_pre_nms_top_n"] = attrs.get_int("rpn_pre_nms_top_n", 6000)
new_attrs["rpn_post_nms_top_n"] = attrs.get_int("rpn_post_nms_top_n", 300)
new_attrs["rpn_min_size"] = attrs.get_int("rpn_min_size", 16)
new_attrs["iou_loss"] = attrs.get_bool("iou_loss", False)
assert not attrs.get_bool("output_score", False), "proposal doesn't support output score"
return _op.vision.proposal(inputs[0], inputs[1], inputs[2], **new_attrs)
def _mx_box_nms(inputs, attrs):
force_suppress = attrs.get_bool("force_suppress", False)
iou_thresh = attrs.get_float("overlap_thresh", 0.5)
top_k = attrs.get_int("topk", -1)
valid_thresh = attrs.get_float("valid_thresh", 0)
coord_start = attrs.get_int("coord_start", 2)
score_index = attrs.get_int("score_index", 1)
id_index = attrs.get_int("id_index", -1)
in_format = attrs.get_str("in_format", "corner")
out_format = attrs.get_str("out_format", "corner")
if in_format != "corner":
raise tvm.error.OpAttributeInvalid(
'Value of attribute "in_format" must equal "corner" for operator box_nms.'
)
if out_format != "corner":
raise tvm.error.OpAttributeInvalid(
'Value of attribute "out_format" must equal "corner" for operator box_nms.'
)
ret = _op.vision.get_valid_counts(
inputs[0], score_threshold=valid_thresh, id_index=id_index, score_index=score_index
)
nms_out = _op.vision.non_max_suppression(
ret[1],
ret[0],
ret[2],
iou_threshold=iou_thresh,
force_suppress=force_suppress,
top_k=top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
invalid_to_bottom=True,
)
return nms_out
def _mx_box_decode(inputs, attrs):
std0 = relay.const(attrs.get_float("std0", 1), "float32")
std1 = relay.const(attrs.get_float("std1", 1), "float32")
std2 = relay.const(attrs.get_float("std2", 1), "float32")
std3 = relay.const(attrs.get_float("std3", 1), "float32")
clip = attrs.get_float("clip", -1)
in_format = attrs.get_str("format", "corner")
anchors = inputs[1] # (1, N, 4) encoded in corner or center
a = _op.split(anchors, indices_or_sections=4, axis=-1)
# Convert to format "center".
if in_format == "corner":
a_width = a[2] - a[0]
a_height = a[3] - a[1]
a_x = a[0] + a_width * relay.const(0.5, "float32")
a_y = a[1] + a_height * relay.const(0.5, "float32")
else:
a_x, a_y, a_width, a_height = a
data = inputs[0] # (B, N, 4) predicted bbox offset
p = _op.split(data, indices_or_sections=4, axis=-1)
ox = p[0] * std0 * a_width + a_x
oy = p[1] * std1 * a_height + a_y
dw = p[2] * std2
dh = p[3] * std3
if clip > 0:
clip = relay.const(clip, "float32")
dw = _op.minimum(dw, clip)
dh = _op.minimum(dh, clip)
dw = _op.exp(dw)
dh = _op.exp(dh)
ow = dw * a_width * relay.const(0.5, "float32")
oh = dh * a_height * relay.const(0.5, "float32")
out = _op.concatenate([ox - ow, oy - oh, ox + ow, oy + oh], axis=-1)
return out
def _mx_l2_normalize(inputs, attrs):
new_attrs = {}
mode = attrs.get_str("mode", "instance")
if mode == "channel":
new_attrs["axis"] = [1]
elif mode == "instance":
ndim = len(_infer_type(inputs[0]).checked_type.shape)
new_attrs["axis"] = list(range(1, ndim))
elif mode == "spatial":
ndim = len(_infer_type(inputs[0]).checked_type.shape)
new_attrs["axis"] = list(range(2, ndim))
else:
raise tvm.error.OpAttributeInvalid(
'Mode "{}" is not supported for operator l2_normalize.'.format(mode)
)
new_attrs["eps"] = attrs.get_float("eps", 1e-10)
return _op.nn.l2_normalize(inputs[0], **new_attrs)
def _mx_softsign(inputs, attrs):
return inputs[0] / (_expr.const(1.0) + _op.abs(inputs[0]))
def _mx_softmin(inputs, attrs):
axis = attrs.get_int("axis", -1)
return _op.nn.softmax(_op.negative(inputs[0]), axis)
def _mx_hard_sigmoid(inputs, attrs):
x = (_expr.const(0.2) * inputs[0]) + _expr.const(0.5)
return _op.clip(x, a_min=0.0, a_max=1.0)
def _mx_reciprocal(inputs, attrs):
return _expr.const(1.0) / inputs[0]
def _mx_shape_array(inputs, attrs):
assert len(inputs) == 1
if attrs.get_int("lhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_begin")
if attrs.get_int("lhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support lhs_end")
if attrs.get_int("rhs_begin", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_begin")
if attrs.get_int("rhs_end", None) is not None:
raise tvm.error.OpAttributeUnimplemented("shape_array doesn't support rhs_end")
return _op.shape_of(inputs[0], dtype="int64")
def _mx_full(inputs, attrs):
assert len(inputs) == 0
val = attrs.get_float("value")
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
return _op.full(_expr.const(val, dtype), shape, dtype)
def _mx_squeeze(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", None)
return _op.squeeze(inputs[0], axis)
def _mx_broadcast_axis(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", [])
size = attrs.get_int_tuple("size", [])
assert len(axis) == len(size)
if len(axis) == 0:
return inputs[0]
expr = _infer_type(inputs[0])
src_shape = expr.checked_type.shape
tgt_shape = []
for i, dim in enumerate(src_shape):
if i not in axis:
tgt_shape.append(dim)
else:
assert int(dim) == 1
idx = axis.index(i)
tgt_shape.append(size[idx])
return _op.broadcast_to(inputs[0], tgt_shape)
def _mx_embedding(inputs, _):
assert len(inputs) == 2
indices, weight = inputs
return _op.take(weight, indices.astype("int32"), axis=0)
def _mx_smooth_l1(inputs, attrs):
scalar = attrs.get_float("scalar", 1.0)
scalar_sq = scalar * scalar
mask = _op.less(inputs[0], _expr.const(1.0 / scalar_sq, dtype="float32"))
return _op.where(
mask,
_expr.const(scalar_sq / 2.0, dtype="float32") * inputs[0] * inputs[0],
_op.abs(inputs[0]) - _expr.const(0.5 / scalar_sq),
)
def _mx_deformable_convolution(inputs, attrs):
new_attrs = {}
new_attrs["kernel_size"] = attrs.get_int_tuple("kernel")
new_attrs["strides"] = attrs.get_int_tuple("stride")
new_attrs["padding"] = attrs.get_int_tuple("pad")
new_attrs["dilation"] = attrs.get_int_tuple("dilate")
new_attrs["channels"] = attrs.get_int("num_filter")
new_attrs["deformable_groups"] = attrs.get_int("num_deformable_group", 1)
new_attrs["groups"] = attrs.get_int("num_group", 1)
assert attrs.get_str("layout", "NCHW") == "NCHW", "Deformable conv2d only supports NCHW layout"
use_bias = not attrs.get_bool("no_bias", False)
res = _op.nn.deformable_conv2d(inputs[0], inputs[1], inputs[2], **new_attrs)
if use_bias:
assert len(inputs) == 4
res = _op.nn.bias_add(res, inputs[3])
return res
def _mx_argsort(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", True)
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.argsort(inputs[0], **new_attrs)
def _mx_topk(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["k"] = attrs.get_int("k", 1)
new_attrs["axis"] = attrs.get_int("axis", -1)
new_attrs["is_ascend"] = attrs.get_bool("is_ascend", False)
ret_type = attrs.get_str("ret_typ", "indices")
if ret_type == "mask":
raise tvm.error.OpAttributeUnimplemented(
"Attribute ret_type=mask is not supported in topk operator"
)
new_attrs["ret_type"] = "values" if ret_type == "value" else ret_type
new_attrs["dtype"] = attrs.get_str("dtype", "float32")
return _op.topk(inputs[0], **new_attrs)
def _mx_sequence_mask(inputs, attrs):
assert len(inputs) == 1 or len(inputs) == 2
new_attrs = {}
use_sequence_length = attrs.get_bool("use_sequence_length", False)
new_attrs["mask_value"] = attrs.get_float("value", 0.0)
new_attrs["axis"] = attrs.get_int("axis", 0)
if use_sequence_length:
return _op.sequence_mask(*inputs, **new_attrs)
else:
return inputs[0]
def _mx_contrib_div_sqrt_dim(inputs, _):
assert len(inputs) == 1
ndim = len(_infer_type(inputs[0]).checked_type.shape)
dim = _op.take(_op.shape_of(inputs[0]), _expr.const(ndim - 1, dtype="int32"))
dtype = _infer_type(inputs[0]).checked_type.dtype
sqrt_dim = _op.sqrt(dim.astype(dtype))
out = inputs[0] / sqrt_dim
return out
def _mx_rnn_param_concat(inputs, _):
# We don't need to concatenate RNN params because we will unravel the RNN op
return [inputs]
def _mx_rnn_layer(inputs, attrs):
def _rnn_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias, activation):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
out = _activation_map[activation](i2h + h2h)
return out, [out]
def _gru_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
expr = _infer_type(data)
dtype = expr.checked_type.dtype
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
i2h_r, i2h_z, i2h = _op.split(i2h, indices_or_sections=3, axis=1)
h2h_r, h2h_z, h2h = _op.split(h2h, indices_or_sections=3, axis=1)
reset_gate = _activation_map["sigmoid"](i2h_r + h2h_r)
update_gate = _activation_map["sigmoid"](i2h_z + h2h_z)
next_h_tmp = _activation_map["tanh"](reset_gate * h2h + i2h)
next_h = (_expr.const(1, dtype) - update_gate) * next_h_tmp + update_gate * states[0]
return next_h, [next_h]
def _lstm_cell(data, states, i2h_weight, h2h_weight, i2h_bias, h2h_bias):
i2h = _op.nn.bias_add(_op.nn.dense(data, i2h_weight), i2h_bias, axis=-1)
h2h = _op.nn.bias_add(_op.nn.dense(states[0], h2h_weight), h2h_bias, axis=-1)
gates = i2h + h2h
slice_gates = _op.split(gates, indices_or_sections=4, axis=1)
in_gate = _activation_map["sigmoid"](slice_gates[0])
forget_gate = _activation_map["sigmoid"](slice_gates[1])
in_transform = _activation_map["tanh"](slice_gates[2])
out_gate = _activation_map["sigmoid"](slice_gates[3])
next_c = forget_gate * states[1] + in_gate * in_transform
next_h = out_gate * _activation_map["tanh"](next_c)
return next_h, [next_h, next_c]
num_layers = attrs.get_int("num_layers", 1)
mode = attrs.get_str("mode")
output_states = attrs.get_bool("state_outputs", False)
if mode.startswith("rnn"):
mode, activation = mode.split("_")
assert mode in ["rnn", "gru", "lstm"]
bidirectional = attrs.get_bool("bidirectional", False)
direct = 2 if bidirectional else 1
layout = attrs.get_str("layout", "TNC")
if layout != "TNC":
raise tvm.error.OpAttributeUnimplemented(
"RNN with layout other than TNC is not supported yet"
)
num_states = 2 if mode == "lstm" else 1
assert len(inputs) == num_states + 2
seq_data = inputs[0]
concat_weight = inputs[1]
init_states = inputs[2:]
expr = _infer_type(seq_data)
data_shape = expr.checked_type.shape
seq_len = int(data_shape[0])
assert len(concat_weight) == num_layers * 4 * direct
for idx, state in enumerate(init_states[:]):
if isinstance(state, dict):
node = state
attrs = StrAttrsDict(node.get("attrs", {}))
op_name = node["op"]
# by default, RNN layer uses zeros to initialize states
assert op_name == "_zeros"
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
init_layout = attrs.get_str("__layout__")
new_shape = list(shape)
for i, dim in enumerate(shape):
if dim == 0:
axis = layout.find(init_layout[i])
assert axis >= 0
new_shape[i] = int(data_shape[axis])
init_states[idx] = _op.zeros(new_shape, dtype)
weights = []
bias = []
states = []
back_weights = []
back_bias = []
back_states = []
for i in range(num_layers):
weights.append(
[concat_weight[i * 2 * direct].args[0], concat_weight[i * 2 * direct + 1].args[0]]
)
bias.append(
[
concat_weight[(num_layers + i) * 2 * direct].args[0],
concat_weight[(num_layers + i) * 2 * direct + 1].args[0],
]
)
s = []
for state in init_states:
s.append(_op.take(state, _expr.const(i * direct, "int32"), axis=0))
states.append(s)
if bidirectional:
back_weights.append(
[
concat_weight[i * 2 * direct + 2].args[0],
concat_weight[i * 2 * direct + 3].args[0],
]
)
back_bias.append(
[
concat_weight[(num_layers + i) * 2 * direct + 2].args[0],
concat_weight[(num_layers + i) * 2 * direct + 3].args[0],
]
)
s = []
for state in init_states:
s.append(_op.take(state, _expr.const(i * direct + 1, "int32"), axis=0))
back_states.append(s)
xs = [_op.take(seq_data, _expr.const(t, "int32"), axis=0) for t in range(seq_len)]
for l in range(num_layers):
outputs = []
back_outputs = []
for x in xs:
if mode == "rnn":
out, new_states = _rnn_cell(x, states[l], *weights[l], *bias[l], activation)
elif mode == "gru":
out, new_states = _gru_cell(x, states[l], *weights[l], *bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(x, states[l], *weights[l], *bias[l])
states[l] = new_states
outputs.append(out)
if bidirectional:
for x in reversed(xs):
if mode == "rnn":
out, new_states = _rnn_cell(
x, back_states[l], *back_weights[l], *back_bias[l], activation
)
elif mode == "gru":
out, new_states = _gru_cell(x, back_states[l], *back_weights[l], *back_bias[l])
else: # mode == "lstm"
out, new_states = _lstm_cell(x, back_states[l], *back_weights[l], *back_bias[l])
back_states[l] = new_states
back_outputs.append(out)
back_outputs.reverse()
concat_outputs = []
for t, out in enumerate(outputs):
new_out = _op.concatenate([out, back_outputs[t]], axis=-1)
concat_outputs.append(new_out)
outputs = concat_outputs
xs = outputs
ret = [_op.stack(outputs, axis=0)]
if output_states:
for i in range(num_states):
inputs = []
for l, s in enumerate(states):
inputs.append(s[i])
if bidirectional:
inputs.append(back_states[l][i])
ret.append(_op.stack(inputs, axis=0))
return ret
def _mx_one_hot(inputs, attrs):
indices = inputs[0].astype("int32")
depth = attrs.get_int("depth", 0)
dtype = attrs.get_str("dtype", "int32")
on_value = tvm.relay.const(attrs.get_float("on_value", 1.0), dtype)
off_value = tvm.relay.const(attrs.get_float("off_value", 0.0), dtype)
return _op.one_hot(indices, on_value, off_value, depth, -1, dtype)
def _mx_depth_to_space(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["block_size"] = attrs.get_int("block_size")
return _op.nn.depth_to_space(*inputs, **new_attrs)
def _mx_space_to_depth(inputs, attrs):
assert len(inputs) == 1
new_attrs = {}
new_attrs["block_size"] = attrs.get_int("block_size")
return _op.nn.space_to_depth(*inputs, **new_attrs)
def _mx_correlation(inputs, attrs):
assert len(inputs) == 2
new_attrs = {}
new_attrs["kernel_size"] = attrs.get_int("kernel_size", 1)
new_attrs["max_displacement"] = attrs.get_int("max_displacement", 1)
new_attrs["stride1"] = attrs.get_int("stride1", 1)
new_attrs["stride2"] = attrs.get_int("stride2", 1)
new_attrs["padding"] = attrs.get_int("pad_size", 0)
new_attrs["is_multiply"] = attrs.get_bool("is_multiply", True)
new_attrs["layout"] = "NCHW"
return _op.nn.correlation(*inputs, **new_attrs)
def _mx_contrib_fifo_buffer(inputs, attrs):
new_attrs = {}
new_attrs["axis"] = attrs.get_int("axis")
return _op.nn.fifo_buffer(*inputs, **new_attrs)
def _mx_contrib_interleaved_matmul_selfatt_qk(inputs, attrs):
"""
tmp = mx.nd.reshape(queries_keys_values, shape=(0, 0, num_heads, 3, -1))
q_proj = mx.nd.transpose(tmp[:,:,:,0,:], axes=(1, 2, 0, 3))
q_proj = mx.nd.reshape(q_proj, shape=(-1, 0, 0), reverse=True)
q_proj = mx.nd.contrib.div_sqrt_dim(q_proj)
k_proj = mx.nd.transpose(tmp[:,:,:,1,:], axes=(1, 2, 0, 3))
k_proj = mx.nd.reshape(k_proj, shape=(-1, 0, 0), reverse=True)
output = mx.nd.batch_dot(q_proj, k_proj, transpose_b=True)
"""
assert len(inputs) == 1
qkv = inputs[0]
num_heads = attrs.get_int("heads")
qkv = _op.reshape(qkv, newshape=(0, 0, num_heads, 3, -1))
q_proj = _op.take(qkv, _expr.const(0, "int32"), axis=3)
q_proj = _op.transpose(q_proj, axes=[1, 2, 0, 3])
q_proj = _op.reverse_reshape(q_proj, newshape=(-1, 0, 0))
q_proj = _mx_contrib_div_sqrt_dim([q_proj], None)
k_proj = _op.take(qkv, _expr.const(1, "int32"), axis=3)
k_proj = _op.transpose(k_proj, axes=[1, 2, 0, 3])
k_proj = _op.reverse_reshape(k_proj, newshape=(-1, 0, 0))
ret = _op.nn.batch_matmul(q_proj, k_proj)
return ret
def _mx_contrib_interleaved_matmul_selfatt_valatt(inputs, attrs):
"""
tmp = mx.nd.reshape(queries_keys_values, shape=(0, 0, num_heads, 3, -1))
v_proj = mx.nd.transpose(tmp[:,:,:,2,:], axes=(1, 2, 0, 3))
v_proj = mx.nd.reshape(v_proj, shape=(-1, 0, 0), reverse=True)
output = mx.nd.batch_dot(attention, v_proj)
output = mx.nd.reshape(output, shape=(-1, num_heads, 0, 0), reverse=True)
output = mx.nd.transpose(output, axes=(2, 0, 1, 3))
output = mx.nd.reshape(output, shape=(0, 0, -1))
"""
assert len(inputs) == 2
qkv, att = inputs
num_heads = attrs.get_int("heads")
qkv = _op.reshape(qkv, newshape=(0, 0, num_heads, 3, -1))
v_proj = _op.take(qkv, _expr.const(2, "int32"), axis=3)
v_proj = _op.transpose(v_proj, axes=(1, 2, 0, 3))
v_proj = _op.reverse_reshape(v_proj, newshape=(-1, 0, 0))
v_proj = _op.transpose(v_proj, axes=[0, 2, 1])
out = _op.nn.batch_matmul(att, v_proj)
out = _op.reverse_reshape(out, newshape=(-1, num_heads, 0, 0))
out = _op.transpose(out, axes=(2, 0, 1, 3))
out = _op.reshape(out, newshape=(0, 0, -1))
return out
def _mx_cond(inputs, attrs, subgraphs):
assert len(subgraphs) == 3
cond_input_locs = json.loads(attrs.get_str("cond_input_locs"))
then_input_locs = json.loads(attrs.get_str("then_input_locs"))
else_input_locs = json.loads(attrs.get_str("else_input_locs"))
num_outputs = attrs.get_int("num_outputs")
input_args = []
for i, arg in enumerate(inputs):
var = _expr.var("arg%s" % i, _infer_type(arg).checked_type)
input_args.append(var)
cond_args = [input_args[i] for i in cond_input_locs]
then_args = [input_args[i] for i in then_input_locs]
else_args = [input_args[i] for i in else_input_locs]
cond_arg_shapes = [arg.type_annotation.shape for arg in cond_args]
cond_arg_dtype_info = [arg.type_annotation.dtype for arg in cond_args]
cond_func = _from_mxnet_impl(subgraphs[0], cond_arg_shapes, cond_arg_dtype_info)
cond = _expr.Call(cond_func, cond_args).astype("bool")
cond_shape = get_const_tuple(_infer_type(cond).checked_type.shape)
if len(cond_shape) > 0:
assert len(cond_shape) == 1 and cond_shape[0] == 1, "Condition is not scalar"
cond = _op.take(cond, _expr.const(1, "int"))
sb = _scope_builder.ScopeBuilder()
with sb.if_scope(cond):
then_arg_shapes = [arg.type_annotation.shape for arg in then_args]
then_arg_dtype_info = [arg.type_annotation.dtype for arg in then_args]
then_func = _from_mxnet_impl(subgraphs[1], then_arg_shapes, then_arg_dtype_info)
sb.ret(_expr.Call(then_func, then_args))
with sb.else_scope():
else_arg_shapes = [arg.type_annotation.shape for arg in else_args]
else_arg_dtype_info = [arg.type_annotation.dtype for arg in else_args]
else_func = _from_mxnet_impl(subgraphs[2], else_arg_shapes, else_arg_dtype_info)
sb.ret(_expr.Call(else_func, else_args))
func = _function.Function(input_args, sb.get())
ret = _expr.Call(func, inputs)
if num_outputs > 1:
ret = _expr.TupleWrapper(ret, num_outputs)
return ret
def _qnn_contrib_concat(inputs, attrs):
axis = attrs.get_int("dim", 1)
num_args = attrs.get_int("num_args", -1)
assert num_args > 0
input_exprs = inputs[0:num_args]
min_start_idx = num_args
max_start_idx = num_args + 1
mins = list()
maxs = list()
for i in range(min_start_idx, len(inputs), 2):
mins.append(inputs[i])
for i in range(max_start_idx, len(inputs), 2):
maxs.append(inputs[i])
# Check if all the input tensors have same qnn params.
if len(set(mins)) == 1 and len(set(maxs)) == 1:
output_min = mins[0]
output_max = maxs[0]
concat = _op.concatenate(tuple(input_exprs), axis=axis)
return concat, output_min, output_max
else:
# Get all dtypes. Find input and output scales, call concatenate.
dtypes = [_infer_type(x).checked_type.dtype for x in input_exprs]
assert all(
[x == "uint8" for x in dtypes]
), "Current support is limited to uint8 inputs only."
new_min = min(mins)
new_max = max(maxs)
assert new_min == 0
output_scale = get_mkldnn_uint8_scale(new_min, new_max)
min_max = zip(mins, maxs)
input_scales = [get_mkldnn_uint8_scale(x, y) for (x, y) in min_max]
input_zeros = [0] * len(input_scales)
output_zero = 0
input_scales_expr = [relay.const(x, "float32") for x in input_scales]
input_zeros_expr = [relay.const(x, "int32") for x in input_zeros]
output_scale_expr = relay.const(output_scale, "float32")
output_zero_expr = relay.const(output_zero, "int32")
res = relay.qnn.op.concatenate(
input_exprs,
input_scales_expr,
input_zeros_expr,
output_scale_expr,
output_zero_expr,
axis=axis,
)
return res, new_min, new_max
def _qnn_quantize(inputs, attrs):
out_dtype = "int8"
out_type = attrs.get_str("out_type")
if out_type == "auto":
if attrs.has_attr("min_calib_range") and attrs.has_attr("max_calib_range"):
if attrs.get_float("min_calib_range") >= 0:
out_dtype = "uint8"
else:
out_dtype = "int8"
else:
out_dtype = out_type
if out_dtype not in {"int8", "uint8"}:
raise ValueError("Unsupported out_dtype: %s" % out_dtype)
min_calib_range = attrs.get_float("min_calib_range", 0.0)
max_calib_range = attrs.get_float("max_calib_range", 0.0)
quantized_output, _, _ = quantize_mxnet_min_max(
inputs[0], min_range=min_calib_range, max_range=max_calib_range, out_dtype=out_dtype
)
return quantized_output, min_calib_range, max_calib_range
def _qnn_contrib_quantized_fifo_buffer(inputs, attrs, params):
data = inputs[0]
buffer = inputs[1]
min_calib_range = inputs[2]
max_calib_range = inputs[3]
data_dtype = _infer_type(data).checked_type.dtype
buffer_shape = _infer_shape(buffer)
buffer_name = _get_name(buffer)
params[buffer_name] = _nd.array(np.zeros(buffer_shape).astype(data_dtype))
new_buffer = relay.var(buffer_name, relay.TensorType(buffer_shape, data_dtype))
inputs[1] = new_buffer
res = _op.nn.fifo_buffer(data=data, buffer=new_buffer, axis=attrs.get_int("axis"))
return res, min_calib_range, max_calib_range
def _get_subgraph_op(subgraphs, op_name):
assert len(subgraphs) == 1, "Subgraph should have 1 node but has {}".format(len(subgraphs))
subgraph = subgraphs[0]
nodes = subgraph["nodes"]
assert nodes is not None
for node in nodes:
if node["op"] == op_name:
return node
raise ValueError("Op {} was not found in the subgraph".format(op_name))
def _qnn_conv(inputs, attrs, subgraphs, params):
def _has_fused_activation(_attrs, _supported_activations):
has_fused_activation = False
if attrs.get_bool("with_act", False) or attrs.get_bool("with_postsum_act", False):
subgraph_activation_attrs = _get_subgraph_op(subgraphs, "Activation")["attrs"]
act_type = subgraph_activation_attrs["act_type"]
if act_type not in _supported_activations:
raise ValueError(
"Fused activation {} is not supported at " "this time".format(act_type)
)
has_fused_activation = True
return has_fused_activation
def _get_data_scale_and_zp(_data, _inputs, _data_min_idx, _data_max_idx):
"""Finds the Qnn params for the data expr."""
data_min = _inputs[_data_min_idx]
data_max = _inputs[_data_max_idx]
assert data_min <= data_max
data_dtype = _infer_type(_data).checked_type.dtype
assert data_dtype in {"int8", "uint8"}
if data_min < 0.0:
assert (
data_dtype == "int8"
), "Expect int8 when data_min < 0.0, consider quantize model with int8."
_data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
_data_zero_point = 0
return _data_scale, _data_zero_point
def _get_bn_alpha_coeff(_bn_gamma_idx, _bn_beta_idx, _bn_running_mean_idx, _bn_running_var_idx):
"""Extract the BN coeff. These will be use later for BN folding into convolution."""
# Extract relevant attrs from bn.
bn_attrs = _get_subgraph_op(subgraphs, "BatchNorm")["attrs"]
bn_epsilon_param = float(bn_attrs["eps"])
bn_scale_param = bn_attrs["fix_gamma"] == "False"
bn_center_param = True
# Extract the relevant relay expressions.
bn_running_var = inputs[_bn_running_var_idx]
bn_gamma = inputs[_bn_gamma_idx]
bn_beta = inputs[_bn_beta_idx]
bn_running_mean = inputs[_bn_running_mean_idx]
# Get coefficient to multiply to weights.
bn_epsilon = relay.const(bn_epsilon_param, "float32")
denominator = relay.sqrt(relay.add(bn_running_var, bn_epsilon))
_bn_scale = relay.divide(relay.const(1.0, "float32"), denominator)
if bn_scale_param:
_bn_scale = relay.multiply(bn_gamma, _bn_scale)
# Get the shift.
_bn_shift = relay.negative(relay.multiply(bn_running_mean, _bn_scale))
if bn_center_param:
_bn_shift = relay.add(bn_beta, _bn_shift)
return _bn_scale, _bn_shift
def _fold_bn(_bn_scale, _bn_shift, _has_bias, _has_bn):
"""Fold BN into kernel and bias. Get new kernel and bias."""
_kernel = inputs[1]
if _bn_scale:
assert attrs.get_bool("with_bn", False)
# Weights are on OIHW, and _bn_scale is in O.
exp_bn_scale = relay.expand_dims(_bn_scale, axis=1, num_newaxis=3)
_kernel = relay.multiply(exp_bn_scale, _kernel)
_bias = None
if _has_bias:
_bias = inputs[2]
if _has_bn:
assert _bn_shift is not None
assert _bn_scale is not None
_bias = relay.add(relay.multiply(_bn_scale, _bias), _bn_shift)
elif _has_bn:
assert _bn_shift is not None
assert _bn_scale is not None
_bias = _bn_shift
return _kernel, _bias
def _get_quantized_kernel(_kernel, _bias, _data_scale):
# For quantizing, we need min/max of kernel. So, we have to pre compute this expr.
np_kernel = _infer_value(_kernel, params).numpy()
kernel_channel_min = np.amin(np_kernel, axis=(1, 2, 3))
kernel_channel_max = np.amax(np_kernel, axis=(1, 2, 3))
np_bias = None
if _bias is not None:
np_bias = _infer_value(_bias, params).numpy()
return quantize_conv_weights_bias_channel_mkldnn_from_var(
_kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale
)
def _get_qnn_conv2d(
_data,
_kernel,
_data_zero_point,
_kernel_zero_point,
_data_scale,
_kernel_vector_scale,
_conv2d_attrs,
):
return relay.qnn.op.conv2d(
_data,
_kernel,
input_zero_point=relay.const(_data_zero_point, "int32"),
kernel_zero_point=relay.const(_kernel_zero_point, "int32"),
input_scale=relay.const(_data_scale, "float32"),
kernel_scale=relay.const(_kernel_vector_scale),
channels=_conv2d_attrs["channels"],
groups=_conv2d_attrs["groups"],
kernel_size=_conv2d_attrs["kernel_size"],
strides=_conv2d_attrs["strides"],
dilation=_conv2d_attrs["dilation"],
padding=_conv2d_attrs["padding"],
data_layout=_conv2d_attrs["data_layout"],
kernel_layout=_conv2d_attrs["kernel_layout"],
)
def _get_requantized_op(_res, _input_scale, _output_scale, _out_dtype):
# Requantize to get the output back
return relay.qnn.op.requantize(
_res,
input_scale=relay.const(_input_scale),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(_output_scale, "float32"),
output_zero_point=relay.const(0, "int32"),
axis=1,
out_dtype=_out_dtype,
)
def _get_sum(_res, _output_scale, out_dtype):
"""Handles sum of the second quantized tensor."""
# This is done in following steps
# 1) rhs is the add's second operand. First rhs will be requantized to output scale with
# dtype int32. The int32 dtype is to keep precision high before adding.
# 2) Call normal add
# 3) Depending on final out_dtype, clip and cast (basically requantize).
_output_scale = relay.const(_output_scale, "float32")
data_sum = inputs[-5]
data_sum_min = inputs[-2]
data_sum_max = inputs[-1]
data_sum_dtype = _infer_type(data_sum).checked_type.dtype
data_sum_scale = (
get_mkldnn_uint8_scale(data_sum_min, data_sum_max)
if data_sum_dtype == "uint8"
else get_mkldnn_int8_scale(data_sum_min, data_sum_max)
)
data_sum_scale = relay.const(data_sum_scale, "float32")
zero_point = relay.const(0, "int32")
# Save one requantize if the previous expr already has a requantize node. This also improves
# little bit with accuracy.
if isinstance(data_sum, _expr.Call) and data_sum.op.name == "qnn.requantize":
prev_input, prev_scale, prev_zero_point = data_sum.args[0:3]
prev_axis = data_sum.attrs.axis
data_sum = relay.qnn.op.requantize(
prev_input,
input_scale=prev_scale,
input_zero_point=prev_zero_point,
output_scale=_output_scale,
output_zero_point=zero_point,
axis=prev_axis,
out_dtype="int32",
)
else:
data_sum = relay.qnn.op.requantize(
data_sum,
input_scale=data_sum_scale,
input_zero_point=zero_point,
output_scale=_output_scale,
output_zero_point=zero_point,
out_dtype="int32",
)
# 2) Add two int32 tensors.
_res = relay.add(_res, data_sum)
# 3) Clip/cast to change the out dtype.
_res = relay.clip(
_res,
a_min=float(tvm.tir.op.min_value(out_dtype).value),
a_max=float(tvm.tir.op.max_value(out_dtype).value),
)
_res = relay.cast(_res, out_dtype)
return _res
def _parse():
assert len(subgraphs) == 1
subgraph_conv_attrs = StrAttrsDict(_get_subgraph_op(subgraphs, "Convolution")["attrs"])
is_quantized = attrs.get_bool("quantized", False)
if is_quantized:
# The MKLDNN has a quantized convolution subgraph. There are many different arguments
# that are taken into account to parse the subgraph.
# * no_bias
# * with_sum
# * with_bn
# * with_postsum_relu
# * with_act
#
# Note - Relu/clip handling is not required because output min/max take care of that.
#
# The parsing can be broken down into following steps
# 1) Get the input data scale and zero points.
# 2) Extract BN params.
# 3) Fold the BN params into kernel and bias.
# 4) Quantize the kernel.
# 4) Call QNN conv2d op.
# 5) Quantize bias and call bias_add.
# 6) Handle sum of quantized tensors if needed. Or just Requantize.
has_bias = not subgraph_conv_attrs.get_bool("no_bias", False)
has_sum = attrs.get_bool("with_sum", False)
has_bn = attrs.get_bool("with_bn", False)
###############################################
# 1) Get the input data scale and zero point.
###############################################
# Last 2 indexes are data min and max. If the conv has a sum, then last 2 indexes are
# for the second tensor. So, the data min max indexes are last 3 and 4
data_min_idx = -2
data_max_idx = -1
if has_sum:
data_min_idx = -4
data_max_idx = -3
data = inputs[0]
data_scale, data_zero_point = _get_data_scale_and_zp(
data, inputs, data_min_idx, data_max_idx
)
#############################
# 2) Extract the BN params.
#############################
# Find the indexes to look at for BN.
bn_scale = bn_shift = None
if has_bn:
if has_bias:
bn_start_idx = 3
else:
bn_start_idx = 2
bn_gamma_idx = bn_start_idx
bn_beta_idx = bn_start_idx + 1
bn_running_mean_idx = bn_start_idx + 2
bn_running_var_idx = bn_start_idx + 3
bn_scale, bn_shift = _get_bn_alpha_coeff(
bn_gamma_idx, bn_beta_idx, bn_running_mean_idx, bn_running_var_idx
)
########################################
# 3) Fold the BN into kernel and bias.
########################################
kernel, bias = _fold_bn(bn_scale, bn_shift, has_bias, has_bn)
#######################################################################
# 4) Fold BN params into kernel. Get quantized kernel and QNN params.
#######################################################################
kernel, kernel_vector_scale, kernel_zero_point = _get_quantized_kernel(
kernel, bias, data_scale
)
##########################
# 5) Call QNN conv2d op.
##########################
conv2d_attrs = _get_mx_conv2d_attrs(subgraph_conv_attrs)
res = _get_qnn_conv2d(
data,
kernel,
data_zero_point,
kernel_zero_point,
data_scale,
kernel_vector_scale,
conv2d_attrs,
)
###############################################
# 6) Fold BN params into bias. Call bias_add.
###############################################
if has_bias or has_bn:
bias_scale = data_scale * kernel_vector_scale
int32_bias = quantize_conv_bias_mkldnn_from_var(bias, bias_scale)
res = _op.nn.bias_add(res, int32_bias, axis=1)
#####################################################################
# 7) Handle sum of quantized tensors if needed. Or just Requantize.
#####################################################################
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale, out_dtype = get_conv_mkldnn_requantized_scale_outDtype(
min_output_range, max_output_range
)
# QNN conv2d output scale is product of data_scale and kernel_vector_scale
input_scale = data_scale * kernel_vector_scale
if attrs.get_bool("with_sum", False):
# There is a second tensor that has to be added to the QNN conv2d output. Therefore,
# the QNN conv2d is first requantized to output scale with int32 precision. The
# second tensor will also be requantized to output scale with int32 precision,
# followed by an add operator.
res = _get_requantized_op(res, input_scale, output_scale, "int32")
res = _get_sum(res, output_scale, out_dtype)
else:
# Get the requantized conv output
res = _get_requantized_op(res, input_scale, output_scale, out_dtype)
return res, min_output_range, max_output_range
else:
res = _mx_conv(inputs, subgraph_conv_attrs)
has_fused_relu = _has_fused_activation(attrs, ["relu"])
if has_fused_relu:
res = _op.nn.relu(res)
return res
return _parse()
def _qnn_flatten(inputs, attrs):
# pylint: disable=unused-argument
data = inputs[0]
output_min = inputs[1]
output_max = inputs[2]
output = _op.nn.batch_flatten(data)
return output, output_min, output_max
def _qnn_dequantize(inputs, attrs):
# pylint: disable=unused-argument
data = inputs[0]
input_min = inputs[1]
input_max = inputs[2]
in_dtype = _infer_type(data).checked_type.dtype
result = dequantize_mxnet_min_max(data, input_min, input_max, in_dtype)
return result
def _qnn_activation(inputs, attrs):
act_type = attrs.get_str("act_type")
assert len(inputs) == 3
assert act_type == "relu", "Currently only relu is supported"
data = inputs[0]
range_min = inputs[1]
range_max = inputs[2]
res = _op.nn.relu(data)
return res, range_min, range_max
def _qnn_pooling(inputs, attrs):
input_min = inputs[1]
input_max = inputs[2]
data = inputs[0]
data_dtype = _infer_type(data).checked_type.dtype
pool_type = attrs.get_str("pool_type")
if data_dtype in ("int8", "uint8") and pool_type != "max":
data = _op.cast(data, "int32")
res = _mx_pooling([data, input_min, input_max], attrs)
if data_dtype in ("int8", "uint8") and pool_type != "max":
res = _op.cast(res, data_dtype)
return res, input_min, input_max
def _qnn_batch_norm(inputs, attrs):
# Perform batch norm in FP32
data = inputs[0]
# Dequantize the data.
data_min_idx, data_max_idx = (-2, -1)
data_min, data_max = inputs[data_min_idx], inputs[data_max_idx]
data_dtype = _infer_type(data).checked_type.dtype
data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
data_zp = 0
data = relay.qnn.op.dequantize(
data, relay.const(data_scale, "float32"), relay.const(data_zp, "int32")
)
# Run BN. The last 4 inputs are same as before.
new_inputs = [data, *inputs[1:5]]
res = _mx_batch_norm(new_inputs, attrs)
# Quantize the result
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale, out_dtype = get_conv_mkldnn_requantized_scale_outDtype(
min_output_range, max_output_range
)
res = relay.qnn.op.quantize(
res[0], relay.const(output_scale, "float32"), relay.const(0, "int32"), out_dtype=out_dtype
)
return res, min_output_range, max_output_range
def _qnn_fully_connected(inputs, attrs, subgraphs, params):
def _get_input_scale_zp(_data_dtype, _inputs, _has_bias):
data_min_idx, data_max_idx = (3, 4) if _has_bias else (2, 3)
data_min, data_max = _inputs[data_min_idx], _inputs[data_max_idx]
_data_scale = (
get_mkldnn_uint8_scale(data_min, data_max)
if _data_dtype == "uint8"
else get_mkldnn_int8_scale(data_min, data_max)
)
_data_zp = 0
return _data_scale, _data_zp
def _get_kernel_scale_zp_tensor_quantized(_kernel, _inputs, _has_bias):
kernel_dtype = _infer_type(_kernel).checked_type.dtype
if kernel_dtype != "int8":
raise tvm.error.OpNotImplemented(
"Tensor wise quantized expects weights in int8 data type"
)
if isinstance(_kernel, tvm.relay.Call) and _kernel.op.name == "qnn.quantize":
_kernel_scale = _kernel.args[1].data.numpy()
_kernel_zp = _kernel.args[2].data.numpy()
return _kernel_scale, _kernel_zp
kernel_min_idx, kernel_max_idx = (5, 6) if _has_bias else (4, 5)
kernel_min_name = _get_name(_inputs[kernel_min_idx])
kernel_min = params[kernel_min_name].numpy()[0]
kernel_max_name = _get_name(_inputs[kernel_max_idx])
kernel_max = params[kernel_max_name].numpy()[0]
_kernel_scale = (
get_mkldnn_uint8_scale(kernel_min, kernel_max)
if kernel_dtype == "uint8"
else get_mkldnn_int8_scale(kernel_min, kernel_max)
)
_kernel_zp = 0
return _kernel_scale, _kernel_zp
def _get_kernel_scale_zp_channel_quantized(_kernel, _bias, _data_scale):
kernel_dtype = _infer_type(_kernel).checked_type.dtype
if kernel_dtype != "float32":
raise tvm.error.OpNotImplemented(
"Channel wise quantized expects weights in float32 data type"
)
# Get the FP32 values, calculate min/max and then channel quantize them
np_kernel = _infer_value(_kernel, params).numpy()
kernel_channel_min = np.amin(np_kernel, axis=(1,))
kernel_channel_max = np.amax(np_kernel, axis=(1,))
np_bias = None
if _bias is not None:
np_bias = _infer_value(_bias, params).numpy()
return quantize_conv_weights_bias_channel_mkldnn_from_var(
_kernel, np_bias, kernel_channel_min, kernel_channel_max, _data_scale
)
def _get_bias_requantize_scale(_inputs, _data_scale, _kernel_scale):
_bias = _inputs[2]
if isinstance(_bias, tvm.relay.Call) and _bias.op.name == "qnn.quantize":
_bias_scale = _bias.args[1].data.numpy()
_bias_requantize_scale = _bias_scale / (_data_scale * _kernel_scale)
_bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32")
return _bias_requantize_scale
bias_min_name = _get_name(_inputs[7])
bias_min = params[bias_min_name].numpy()[0]
bias_max_name = _get_name(_inputs[8])
bias_max = params[bias_max_name].numpy()[0]
bias_scale = get_mkldnn_int8_scale(bias_min, bias_max)
_bias_requantize_scale = bias_scale / (_data_scale * _kernel_scale)
_bias_requantize_scale = _expr.const(_bias_requantize_scale, dtype="float32")
return _bias_requantize_scale
is_quantized = attrs.get_bool("quantized", False)
with_relu = attrs.get_bool("with_relu", False)
subgraph_dense_attrs = StrAttrsDict(_get_subgraph_op(subgraphs, "FullyConnected")["attrs"])
if not is_quantized:
res = _mx_fully_connected(inputs, subgraph_dense_attrs)
if with_relu:
res = _op.nn.relu(res)
return res
else:
has_bias = not subgraph_dense_attrs.get_bool("no_bias", False)
units = subgraph_dense_attrs.get_int("num_hidden")
is_flatten = subgraph_dense_attrs.get_bool("flatten", True)
enable_float_output = attrs.get_bool("enable_float_output", False)
is_channel_quantized = attrs.get_bool("channel_wise_quantize", False)
########################
# Get data, kernel, bias
########################
data, kernel = inputs[0], inputs[1]
bias = None
if has_bias:
bias = inputs[2]
##############################
# Handle for shape of data > 2
##############################
if is_flatten:
data = _op.nn.batch_flatten(data)
data_shape = _infer_type(data).checked_type.shape
if len(data_shape) > 2:
data = _op.reverse_reshape(data, [-1, 0])
###############################
# Get data scale and zero point
###############################
data_dtype = _infer_type(data).checked_type.dtype
data_scale, data_zp = _get_input_scale_zp(data_dtype, inputs, has_bias)
#################################
# Get weight scale and zero point
#################################
if is_channel_quantized:
kernel, kernel_scale, kernel_zp = _get_kernel_scale_zp_channel_quantized(
kernel, bias, data_scale
)
else:
kernel_scale, kernel_zp = _get_kernel_scale_zp_tensor_quantized(
kernel, inputs, has_bias
)
################
# Call QNN dense
################
res = relay.qnn.op.dense(
data,
kernel,
input_zero_point=relay.const(data_zp, "int32"),
kernel_zero_point=relay.const(kernel_zp, "int32"),
input_scale=relay.const(data_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
units=units,
)
#################
# Handle bias add
#################
if has_bias:
if is_channel_quantized:
bias_scale = data_scale * kernel_scale
int32_bias = quantize_conv_bias_mkldnn_from_var(bias, bias_scale)
res = _op.nn.bias_add(res, int32_bias, axis=-1)
else:
bias_data = inputs[2]
bias_requantize_scale = _get_bias_requantize_scale(inputs, data_scale, kernel_scale)
multiplied_bias = _op.multiply(
_op.cast(bias_data, "float32"), bias_requantize_scale
)
rounded_bias = _op.round(multiplied_bias)
clipped_bias = _op.clip(
rounded_bias,
a_min=tvm.tir.op.min_value("int32").value,
a_max=tvm.tir.op.max_value("int32").value,
)
requantized_bias = _op.cast(clipped_bias, "int32")
res = _op.nn.bias_add(res, requantized_bias, axis=-1)
##############################################
# Dequantize if float32 output else Requantize
##############################################
if enable_float_output:
output_scale = np.float32(data_scale * kernel_scale)
res = relay.qnn.op.dequantize(
res, relay.const(output_scale), input_zero_point=relay.const(0, "int32"), axis=1
)
if with_relu:
res = _op.nn.relu(res)
else:
if is_channel_quantized:
raise tvm.error.OpNotImplemented(
"Channel wise quantized dense with non float output is not supported yet"
)
out_dtype = "uint8" if attrs.get_bool("with_relu", False) else "int8"
input_scale = np.float32(data_scale * kernel_scale)
min_output_range = attrs.get_float("min_calib_range")
max_output_range = attrs.get_float("max_calib_range")
output_scale = get_mkldnn_requantize_scale_outDtype(
min_output_range, max_output_range, out_dtype
)
res = relay.qnn.op.requantize(
res,
input_scale=relay.const(input_scale, "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(0, "int32"),
out_dtype=out_dtype,
)
if with_relu:
res = _op.nn.relu(res)
##############################
# Handle for shape of data > 2
##############################
if len(data_shape) > 2:
new_shape = data_shape[:-1]
new_shape.append(units)
res = _op.reshape(res, new_shape)
if enable_float_output:
return res
return res, min_output_range, max_output_range
def _mx_broadcast_to(inputs, attrs):
data = inputs[0]
tgt_shape = attrs.get_int_tuple("shape", [])
return _op.broadcast_to(data, tgt_shape)
def _mx_broadcast_like(inputs, attrs):
assert len(inputs) == 2
for axes in ["lhs_axes", "rhs_axes"]:
if axes in attrs.attrs:
raise tvm.error.OpAttributeUnImplemented(
'Attribute "{}" is not supported for operator broadcast_like.'.format(axes)
)
return _op.broadcast_to_like(*inputs)
def _mx_logical_not(inputs, input_types):
data = inputs[0]
dtype = _infer_type(data).checked_type.dtype
data = _op.cast(data, "bool") if dtype != "bool" else data
return _op.cast(_op.logical_not(data), dtype)
def _mx_broadcast_logical(logical_op):
def impl(inputs, input_types):
lhs_type = _infer_type(inputs[0]).checked_type.dtype
rhs_type = _infer_type(inputs[1]).checked_type.dtype
lhs = _op.cast(inputs[0], "bool") if lhs_type != "bool" else inputs[0]
rhs = _op.cast(inputs[1], "bool") if rhs_type != "bool" else inputs[1]
return _op.cast(logical_op(lhs, rhs), lhs_type)
return impl
def _mx_npi_transpose(inputs, attrs):
axes = attrs.get_int_tuple("axes", None)
# translate default case
axes = None if len(axes) == 0 or axes[0] is None else axes
return _op.transpose(inputs[0], axes=axes)
def _mx_npi_pad(inputs, attrs):
pad_mode = attrs.get_str("mode", None)
if pad_mode is None:
raise tvm.error.OpAttributeRequired('Attribute "mode" not found in operator pad.')
if pad_mode not in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid("Value " + mode + ' in attribute "mode" is not valid')
if "pad_width" not in attrs.attrs:
raise tvm.error.OpAttributeRequired('Attribute "pad_width" not found in operator pad.')
# Begin to parse tuple of tuple, we cannot use get_int_tuple here because it's a tuple of tuple.
pad_width = attrs.attrs["pad_width"]
pad_width = pad_width.replace("(", "[")
pad_width = pad_width.replace(")", "]")
pad_width = json.loads(pad_width)
constant_values = attrs.get_float("constant_values", 0.0)
return _op.nn.pad(
data=inputs[0], pad_width=pad_width, pad_value=constant_values, pad_mode=pad_mode
)
def _mx_npi_concatenate(inputs, attrs):
axis = attrs.get_str("axis", "0")
if axis == "None":
return _op.reshape(_op.concatenate(tuple(inputs), axis=0), (-1,))
else:
return _op.concatenate(tuple(inputs), axis=int(axis))
def _mx_npi_stack(inputs, attrs):
axis = attrs.get_str("axis", "0")
if axis == "None":
return _op.reshape(_op.stack(tuple(inputs), axis=0), (-1,))
else:
return _op.stack(tuple(inputs), axis=int(axis))
def _mx_npx_reshape(inputs, attrs):
shape = attrs.get_int_tuple("newshape")
reverse = attrs.get_bool("reverse", False)
shape_list = list(shape)
old_shape = get_const_tuple(_infer_type(inputs[0]).checked_type.shape)
new_shape = []
if reverse:
old_shape = old_shape[::-1]
shape_list = shape_list[::-1]
ptr = 0
unknown_axis = None
src_ptr = 0
while src_ptr < len(shape_list):
ele = shape_list[src_ptr]
src_ptr += 1
if ele > 0:
new_shape.append(ele)
ptr += 1
elif ele == -1:
new_shape.append(-1)
if unknown_axis is not None:
raise tvm.error.OpAttributeInvalid("Can only have one -1 in the input shape.")
unknown_axis = len(new_shape)
ptr += 1
elif ele == -2:
new_shape.append(old_shape[ptr])
ptr += 1
elif ele == -3:
if old_shape[ptr] != 1:
raise tvm.error.OpAttributeInvalid(
"Dimension of the original shape "
"that corresponds to -3 must be 1. Received"
" {}".format(old_shape[ptr])
)
ptr += 1
elif ele == -4:
new_shape += old_shape[ptr:]
break
elif ele == -5:
new_shape.append(old_shape[ptr] * old_shape[ptr + 1])
ptr += 2
elif ele == -6:
# Split axis
lhs = shape_list[src_ptr]
rhs = shape_list[src_ptr + 1]
src_ptr += 2
if lhs == -1 and rhs == -1:
raise tvm.error.OpAttributeInvalid("The lhs and rhs can not both be -1.")
if lhs == -1:
if old_shape[ptr] % rhs != 0:
raise tvm.error.OpAttributeInvalid(
"When splitting the axis, "
"the dimension of the split axis must "
"be divisible by the splitted values."
)
lhs = old_shape[ptr] // rhs
if rhs == -1:
if old_shape[ptr] % lhs != 0:
raise tvm.error.OpAttributeInvalid(
"When splitting the axis, "
"the dimension of the split axis must "
"be divisible by the splitted values."
)
rhs = old_shape[ptr] // lhs
new_shape.append(lhs)
new_shape.append(rhs)
ptr += 1
else:
raise tvm.error.OpAttributeInvalid("Shape dimension %d is not supported" % ele)
if reverse:
new_shape = new_shape[::-1]
return _op.reshape(inputs[0], newshape=new_shape)
def _mx_split_v2(inputs, attrs):
axis = attrs.get_int("axis")
indices = list(attrs.get_int_tuple("indices", []))
# remove the prefix '0'
if len(indices) != 0 and indices[0] == 0:
indices.remove(0)
sections = attrs.get_int("sections", 0)
indices_or_sections = list(indices) if len(indices) != 0 else sections
res = _op.split(inputs[0], indices_or_sections=indices_or_sections, axis=axis)
if attrs.get_bool("squeeze_axis", False):
res = tuple([_op.squeeze(x, axis=[axis]) for x in res])
return res
def _mx_npi_where_rscalar(inputs, attrs):
cond, dat = inputs
scalar = attrs.get_float("scalar")
cond_shape = get_const_tuple(_infer_type(cond).checked_type.shape)
dat_shape = get_const_tuple(_infer_type(dat).checked_type.shape)
dtype = _infer_type(dat).checked_type.dtype
# Check for broadcasting
out_shape = np.broadcast(np.empty(cond_shape), np.empty(dat_shape)).shape
if out_shape != cond_shape:
cond = _op.broadcast_to(cond, out_shape)
if out_shape != dat_shape:
dat = _op.broadcast_to(dat, out_shape)
scalar = _expr.const(scalar, dtype=dtype)
ones = _op.ones_like(dat)
scalar = _op.multiply(ones, scalar)
return _op.where(cond, dat, scalar)
# Note: due to attribute conversion constraint
# ops in the identity set must be attribute free
_identity_list = [
"abs",
"log",
"exp",
"erf",
"sqrt",
"floor",
"ceil",
"round",
"trunc",
"sign",
"sigmoid",
"negative",
"reshape_like",
"zeros_like",
"ones_like",
"cos",
"cosh",
"sin",
"sinh",
"tan",
"tanh",
"where",
]
_convert_map = {
"_copy": _rename(_op.copy),
"relu": _rename(_op.nn.relu),
"broadcast_add": _rename(_op.add),
"broadcast_plus": _rename(_op.add),
"broadcast_sub": _rename(_op.subtract),
"broadcast_minus": _rename(_op.subtract),
"broadcast_mul": _rename(_op.multiply),
"broadcast_div": _rename(_op.divide),
"broadcast_mod": _rename(_op.mod),
"broadcast_maximum": _rename(_op.maximum),
"broadcast_minimum": _rename(_op.minimum),
"broadcast_power": _rename(_op.power),
"arccos": _rename(_op.acos),
"arcsin": _rename(_op.asin),
"arctan": _rename(_op.atan),
"arccosh": _rename(_op.acosh),
"arcsinh": _rename(_op.asinh),
"arctanh": _rename(_op.atanh),
"broadcast_equal": _mx_compare(_op.equal, _rename),
"broadcast_not_equal": _mx_compare(_op.not_equal, _rename),
"broadcast_greater": _mx_compare(_op.greater, _rename),
"broadcast_greater_equal": _mx_compare(_op.greater_equal, _rename),
"broadcast_lesser": _mx_compare(_op.less, _rename),
"broadcast_lesser_equal": _mx_compare(_op.less_equal, _rename),
"broadcast_logical_or": _mx_broadcast_logical(_op.logical_or),
"broadcast_logical_and": _mx_broadcast_logical(_op.logical_and),
"broadcast_logical_xor": _mx_broadcast_logical(_op.logical_xor),
"broadcast_to": _mx_broadcast_to,
"broadcast_like": _mx_broadcast_like,
"logical_not": _mx_logical_not,
"_equal": _mx_compare(_op.equal, _rename),
"_not_equal": _mx_compare(_op.not_equal, _rename),
"_greater": _mx_compare(_op.greater, _rename),
"_greater_equal": _mx_compare(_op.greater_equal, _rename),
"_lesser": _mx_compare(_op.less, _rename),
"_lesser_equal": _mx_compare(_op.less_equal, _rename),
"elemwise_add": _rename(_op.add),
"elemwise_sub": _rename(_op.subtract),
"elemwise_mul": _rename(_op.multiply),
"elemwise_div": _rename(_op.divide),
"_maximum": _rename(_op.maximum),
"_minimum": _rename(_op.minimum),
"flatten": _rename(_op.nn.batch_flatten),
"Flatten": _rename(_op.nn.batch_flatten),
# scalar power
"square": _mx_make_power(2),
"rsqrt": _mx_make_power(-1 / 2),
"cbrt": _mx_make_power(1 / 3),
"rcbrt": _mx_make_power(-1 / 3),
"__pow_scalar__": _binop_scalar(_op.power),
"_power_scalar": _binop_scalar(_op.power),
"__rsub_scalar__": _rbinop_scalar(_op.subtract),
"_rminus_scalar": _rbinop_scalar(_op.subtract),
"__rdiv_scalar__": _rbinop_scalar(_op.divide),
"_rdiv_scalar": _rbinop_scalar(_op.divide),
"__rpow_scalar__": _rbinop_scalar(_op.power),
# scalar op
"__add_scalar__": _binop_scalar(_op.add),
"_plus_scalar": _binop_scalar(_op.add),
"__sub_scalar__": _binop_scalar(_op.subtract),
"_minus_scalar": _binop_scalar(_op.subtract),
"__mul_scalar__": _binop_scalar(_op.multiply),
"_mul_scalar": _binop_scalar(_op.multiply),
"__div_scalar__": _binop_scalar(_op.divide),
"_div_scalar": _binop_scalar(_op.divide),
"log2": _mx_make_logarithm(2),
"log10": _mx_make_logarithm(10),
"log1p": _mx_log1p,
"expm1": _mx_expm1,
"_equal_scalar": _mx_compare(_op.equal, _binop_scalar),
"_not_equal_scalar": _mx_compare(_op.not_equal, _binop_scalar),
"_greater_scalar": _mx_compare(_op.greater, _binop_scalar),
"_greater_equal_scalar": _mx_compare(_op.greater_equal, _binop_scalar),
"_lesser_scalar": _mx_compare(_op.less, _binop_scalar),
"_lesser_equal_scalar": _mx_compare(_op.less_equal, _binop_scalar),
"_maximum_scalar": _binop_scalar(_op.maximum),
"_minimum_scalar": _binop_scalar(_op.minimum),
# reduction ops
"mean": _reduce(_op.mean),
"max": _reduce(_op.max),
"min": _reduce(_op.min),
"sum": _reduce(_op.sum),
"max_axis": _reduce(_op.max),
"min_axis": _reduce(_op.min),
"sum_axis": _reduce(_op.sum),
"argmax": _arg_reduce(_op.argmax),
"argmin": _arg_reduce(_op.argmin),
# init ops
"_ones": _init_op(_op.ones),
# softmax
"softmax": _softmax_op(_op.nn.softmax),
"log_softmax": _softmax_op(_op.nn.log_softmax),
"Softmax": _softmax_op(_op.nn.softmax),
"softsign": _mx_softsign,
"softmin": _mx_softmin,
"hard_sigmoid": _mx_hard_sigmoid,
"reciprocal": _mx_reciprocal,
# per op specialization
"Reshape": _reshape,
"reshape": _reshape,
"Cast": _cast,
"amp_cast": _cast,
"amp_multicast": _mx_amp_multicast,
"clip": _clip,
"transpose": _transpose,
"UpSampling": _upsampling,
"add_n": _elemwise_sum,
# MXNet specific implementations
"_zeros": _mx_zeros,
"FullyConnected": _mx_fully_connected,
"Activation": _mx_activations,
"Convolution": _mx_conv,
"Convolution_v1": _mx_conv2d,
"Deconvolution": _mx_conv_transpose,
"Pooling": _mx_pooling,
"Pooling_v1": _mx_pooling,
"Dropout": _mx_dropout,
"BatchNorm": _mx_batch_norm,
"BatchNorm_v1": _mx_batch_norm,
"_contrib_SyncBatchNorm": _mx_batch_norm,
"InstanceNorm": _mx_instance_norm,
"LayerNorm": _mx_layer_norm,
"GroupNorm": _mx_group_norm,
"LRN": _mx_lrn,
"L2Normalization": _mx_l2_normalize,
"slice": _mx_slice,
"slice_like": _mx_slice_like,
"slice_axis": _mx_slice_axis,
"SliceChannel": _mx_split,
"split": _mx_split,
"_split_v2": _mx_split_v2,
"SwapAxis": _mx_swap_axis,
"expand_dims": _mx_expand_dims,
"Concat": _mx_concat,
"concat": _mx_concat,
"stack": _mx_stack,
"dot": _mx_dot,
"batch_dot": _mx_batch_dot,
"LeakyReLU": _mx_leaky_relu,
"_arange": _mx_arange,
"_full": _mx_full,
"repeat": _mx_repeat,
"tile": _mx_tile,
"pad": _mx_pad,
"Pad": _mx_pad,
"take": _mx_take,
"gather_nd": _mx_gather_nd,
"reverse": _mx_reverse,
"SequenceReverse": _mx_sequence_reverse,
"squeeze": _mx_squeeze,
"broadcast_axis": _mx_broadcast_axis,
"broadcast_axes": _mx_broadcast_axis,
"BlockGrad": _mx_BlockGrad,
"shape_array": _mx_shape_array,
"Embedding": _mx_embedding,
"argsort": _mx_argsort,
"topk": _mx_topk,
"_unravel_index": _mx_unravel_index,
"SequenceMask": _mx_sequence_mask,
"SoftmaxOutput": _mx_softmax_output,
"SoftmaxActivation": _mx_softmax_activation,
"LinearRegressionOutput": _mx_linear_regression_output,
"LogisticRegressionOutput": _mx_logistic_regression_output,
"smooth_l1": _mx_smooth_l1,
"make_loss": _mx_make_loss,
"_contrib_div_sqrt_dim": _mx_contrib_div_sqrt_dim,
"_contrib_arange_like": _mx_contrib_arange_like,
"one_hot": _mx_one_hot,
"depth_to_space": _mx_depth_to_space,
"space_to_depth": _mx_space_to_depth,
"Correlation": _mx_correlation,
# vision
"_contrib_BilinearResize2D": _mx_resize,
"_contrib_MultiBoxPrior": _mx_multibox_prior,
"_contrib_MultiBoxDetection": _mx_multibox_detection,
"_contrib_ROIAlign": _mx_roi_align,
"ROIPooling": _mx_roi_pooling,
"_contrib_Proposal": _mx_proposal,
"_contrib_MultiProposal": _mx_proposal,
"_contrib_box_nms": _mx_box_nms,
"_contrib_box_decode": _mx_box_decode,
"_contrib_DeformableConvolution": _mx_deformable_convolution,
"_contrib_AdaptiveAvgPooling2D": _mx_adaptive_avg_pooling,
"GridGenerator": _mx_grid_generator,
"BilinearSampler": _mx_bilinear_sampler,
# NLP
"RNN": _mx_rnn_layer,
"_rnn_param_concat": _mx_rnn_param_concat,
"_contrib_interleaved_matmul_selfatt_qk": _mx_contrib_interleaved_matmul_selfatt_qk,
"_contrib_interleaved_matmul_selfatt_valatt": _mx_contrib_interleaved_matmul_selfatt_valatt,
# control flow
"_cond": _mx_cond,
# Depricated:
"Crop": _mx_crop_like,
# List of missing operators that are present in NNVMv1
# TODO(tvm-tvm): support all operators.
#
# "contrib_fifo_buffer": _mx_contrib_fifo_buffer,
"ring_buffer": _mx_contrib_fifo_buffer,
# Qnn ops
"_contrib_quantize_v2": _qnn_quantize,
"_contrib_quantized_concat": _qnn_contrib_concat,
# "_contrib_quantized_fifo_buffer": _qnn_contrib_quantized_fifo_buffer,
"_contrib_quantized_ring_buffer": _qnn_contrib_quantized_fifo_buffer,
"_sg_mkldnn_conv": _qnn_conv,
"_contrib_quantized_flatten": _qnn_flatten,
"_contrib_dequantize": _qnn_dequantize,
"_contrib_quantized_act": _qnn_activation,
"_contrib_quantized_pooling": _qnn_pooling,
"_contrib_quantized_batch_norm": _qnn_batch_norm,
"_sg_mkldnn_fully_connected": _qnn_fully_connected,
# numpy
"_np_transpose": _mx_npi_transpose,
"_npi_transpose": _mx_npi_transpose,
"_npi_pad": _mx_npi_pad,
"_npi_concatenate": _mx_npi_concatenate,
"_npx_reshape": _mx_npx_reshape,
"_np_copy": _rename(_op.copy),
"_npi_copy": _rename(_op.copy),
"_npi_power": _rename(_op.power),
"_npi_power_scalar": _binop_scalar(_op.power),
"_npi_multiply": _rename(_op.multiply),
"_npi_multiply_scalar": _binop_scalar(_op.multiply),
"_npi_add": _rename(_op.add),
"_npi_add_scalar": _binop_scalar(_op.add),
"_npi_subtract": _rename(_op.subtract),
"_npi_subtract_scalar": _binop_scalar(_op.subtract),
"_npi_where_rscalar": _mx_npi_where_rscalar,
"_npi_less": _rename(_op.less),
"_npi_less_equal": _mx_compare(_op.less_equal, _rename),
"_npi_tanh": _rename(_op.tanh),
"_npi_true_divide_scalar": _binop_scalar(_op.divide),
"_npi_stack": _mx_npi_stack,
}
# set identity list
_convert_map.update({k: _rename(k) for k in _identity_list})
_control_flow_ops = ["_cond", "_foreach", "_while_loop"]
_qnn_subgraph_ops = ["_sg_mkldnn_conv", "_sg_mkldnn_fully_connected"]
_subgraph_ops = _control_flow_ops + _qnn_subgraph_ops
_params_ops = ["_contrib_quantized_ring_buffer"]
def _get_op_params(children, attrs, op_name, node, params):
op_params = [children, attrs]
if op_name in _subgraph_ops:
subgraphs = node["subgraphs"]
op_params.append(subgraphs)
if op_name in _qnn_subgraph_ops:
op_params.append(params)
if op_name in _params_ops:
op_params.append(params)
return op_params
def _from_mxnet_impl(symbol, shape_dict, dtype_info, params=None, mod=None):
# pylint: disable=unused-argument
"""Convert mxnet symbol to compatible relay Function.
Reconstruct a relay Function by traversing the mxnet symbol.
Parameters
----------
symbol : mxnet.sym.Symbol
Incompatible symbol from mxnet.
The op_name and attrs inside are not always compatible.
shape_dict : dict
Known parameter shapes
dtype_info : dict or str.
Known parameter dtypes
mod : tvm.IRModule
The module that contains global information. It will be used for
converting ops that need global information, e.g. control-flow ops.
Returns:
-------
func : tvm.relay.Function
Converted relay Function
"""
assert symbol is not None
if isinstance(symbol, dict):
jgraph = symbol
else:
jgraph = json.loads(symbol.tojson())
jnodes = jgraph["nodes"]
node_map = {}
shape_idx = 0
# Check if there have any unsupported ops
unsupported = {}
for node in jnodes:
op_name = node["op"]
if op_name != "null" and op_name not in _convert_map:
if op_name not in unsupported:
unsupported[op_name] = 0
unsupported[op_name] += 1
if unsupported:
msg = "\n".join(["{}: {}".format(op_name, cnt) for op_name, cnt in unsupported.items()])
raise tvm.error.OpNotImplemented(
"One or more operators are not supported in frontend MXNet:\n{}".format(msg)
)
for nid, node in enumerate(jnodes):
children = [node_map[e[0]][e[1]] for e in node["inputs"]]
attrs = StrAttrsDict(node.get("attrs", {}))
node_name = node["name"]
op_name = node["op"]
if op_name == "null":
if isinstance(shape_dict, dict):
shape = shape_dict[node_name] if node_name in shape_dict else None
elif isinstance(shape_dict, (list, tuple)):
shape = shape_dict[shape_idx]
else:
raise ValueError("Unknown type of shape_dict: %s" + type(shape_dict))
if isinstance(dtype_info, dict):
dtype = dtype_info[node_name] if node_name in dtype_info else "float32"
elif isinstance(dtype_info, (list, tuple)):
dtype = dtype_info[shape_idx]
else:
dtype = dtype_info
if isinstance(shape_dict, (list, tuple)):
shape_idx += 1
node_map[nid] = [_expr.var(node_name, shape=shape, dtype=dtype)]
else:
assert op_name in _convert_map
op_params = _get_op_params(children, attrs, op_name, node, params)
res = _convert_map[op_name](*op_params)
if res is None:
# defer conversion, used in RNN state initialization
res = [node]
elif isinstance(res, (_expr.TupleWrapper, tuple, list)):
pass
elif isinstance(res, _expr.Expr):
res = [res]
else:
raise RuntimeError("unexpected type %s" % type(res))
node_map[nid] = res
outputs = [node_map[e[0]][e[1]] for e in jgraph["heads"]]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
return func
def _update_shape_dtype(shape, dtype, params):
"""Update shape dtype given params information"""
shape = {} if shape is None else shape
if not params:
return shape, dtype
shape = shape.copy()
shape.update({k: v.shape for k, v in params.items()})
if isinstance(dtype, str):
for k, v in params.items():
if v.dtype != dtype:
raise ValueError("%s: dtype not expected %s vs %s" % (k, dtype, v.dtype))
else:
dtype = dtype.copy()
dtype.update({k: str(v.dtype) for k, v in params.items()})
return shape, dtype
def from_mxnet(symbol, shape=None, dtype="float32", arg_params=None, aux_params=None):
"""Convert from MXNet"s model into compatible relay Function.
Parameters
----------
symbol : mxnet.Symbol or mxnet.gluon.HybridBlock
MXNet symbol.
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
arg_params : dict of str to mx.NDArray
The argument parameters in mxnet
aux_params : dict of str to mx.NDArray
The auxiliary parameters in mxnet
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by nnvm
"""
try:
import mxnet as mx # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError("{}. MXNet is required to parse symbols.".format(e))
mod = IRModule()
if isinstance(symbol, mx.sym.Symbol):
params = {}
arg_params = arg_params if arg_params else {}
aux_params = aux_params if aux_params else {}
for k, v in arg_params.items():
params[k] = _nd.array(v.asnumpy())
for k, v in aux_params.items():
params[k] = _nd.array(v.asnumpy())
shape, dtype = _update_shape_dtype(shape, dtype, params)
func = _from_mxnet_impl(symbol, shape, dtype, params, mod)
elif isinstance(symbol, mx.gluon.HybridBlock):
if arg_params is not None or aux_params is not None:
raise ValueError("arg_params and aux_params ae not used when importing HybridBlock")
params = {}
for k, v in symbol.collect_params().items():
params[k] = _nd.array(v.data().asnumpy())
inputs = []
for name in shape:
inputs.append(mx.sym.Variable(name))
sym = symbol(*inputs)
if isinstance(sym, (list, tuple)):
sym = mx.sym.Group(sym)
shape, dtype = _update_shape_dtype(shape, dtype, params)
func = _from_mxnet_impl(sym, shape, dtype, params, mod)
elif isinstance(symbol, mx.gluon.Block):
raise NotImplementedError("Only Hybrid Blocks are supported now.")
else:
msg = "mxnet.Symbol or gluon.HybridBlock expected, got {}".format(type(symbol))
raise ValueError(msg)
mod["main"] = func
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/mxnet_qnn_op_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, no-else-return
"""MXNet qnn dialect helper methods for MXNet specific implementations of more
generic qnn supported ops.
"""
import numpy as np
from tvm import relay
from tvm.relay.qnn.op.qnn import quantize, dequantize
# The below values are taken from -
# https://github.com/apache/incubator-mxnet/blob/master/src/operator/quantization/quantization_utils.h#L38-L39
zero_centered_uint8_quantized_range = np.float32(255.5)
zero_centered_int8_quantized_range = np.float32(127.5)
def _get_mkldnn_scale(data_min, data_max, quantized_range):
"""Computes the scale as per MKLDNN specification mentioned here -
https://intel.github.io/mkl-dnn/ex_int8_simplenet.html
Parameters
----------
data_min : float32
A number representing the lower end of the tensor to be quantized.
data_max : float32
A number representing the upper end of the tensor to be quantized.
quantized_range : float32
255 for uint8 and 127 for int8. This is the data type range.
Returns
-------
scale : A floating point number which acts as the scale for quantization.
"""
real_range = np.max([np.abs(np.float32(data_min)), np.abs(np.float32(data_max))])
scale = np.divide(quantized_range, real_range)
scale_inverse = np.divide(1.0, scale)
return scale_inverse
def _quantize_scale_with_zero_centered(data, scale, zero_point, out_dtype):
quantized_output = quantize(
data, relay.const(scale, "float32"), relay.const(zero_point, "int32"), out_dtype=out_dtype
)
return quantized_output, scale, zero_point
def _quantize_with_zero_centered(data, data_min, data_max, quantized_range, out_dtype):
"""Quantizes the given data tensor by calculating the scale
using the MKLDNN formula `quantized_range / max(abs(data_min, data_max))`.
Where quantized_range is 255 for uint8 and 127 for int8. The `data_min`
and `data_max` are the min and max to use for the `data` tensor elements.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
quantized_range : float
255 for uint8 and 127 for int8. This is the data type range.
out_dtype : str
The output data type. Can be int8 or uint8
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
scale = _get_mkldnn_scale(data_min, data_max, quantized_range)
zero_point = 0
return _quantize_scale_with_zero_centered(data, scale, zero_point, out_dtype)
def _quantize_mkldnn_min_max_uint8(data, data_min, data_max):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type is `uint8`.
The method of quantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/quantize.cc:72
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _quantize_with_zero_centered(
data, data_min, data_max, zero_centered_uint8_quantized_range, "uint8"
)
def _quantize_mkldnn_min_max_int8(data, data_min, data_max):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type is `int8`.
The method of quantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/quantize.cc:72
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _quantize_with_zero_centered(
data, data_min, data_max, zero_centered_int8_quantized_range, "int8"
)
def get_mkldnn_int8_scale(range_min, range_max):
"""Computes the quantization scale using MKLDNN specifications
with the given range. The output datatype of tensor to be quantized should be
int8.
Parameters
----------
range_min : float32
A number representing the lower end of the tensor to be quantized.
range_max : float32
A number representing the upper end of the tensor to be quantized.
Returns
-------
scale : A float32 number which acts as the scale for quantization.
"""
scale = _get_mkldnn_scale(range_min, range_max, zero_centered_int8_quantized_range)
return np.float32(scale)
def get_mkldnn_uint8_scale(range_min, range_max):
"""Computes the quantization scale using MKLDNN specifications
with the given range. The output datatype of tensor to be quantized should be
uint8.
Parameters
----------
range_min : float32
A number representing the lower end of the tensor to be quantized.
range_max : float32
A number representing the upper end of the tensor to be quantized.
Returns
-------
scale : A float32 number which acts as the scale for quantization.
"""
scale = _get_mkldnn_scale(range_min, range_max, zero_centered_uint8_quantized_range)
return np.float32(scale)
def quantize_conv_weights_bias_channel_mkldnn_from_var(
weights_var, bias, min_vector_range, max_vector_range, data_scale
):
"""Helper method to quantize the convolution kernel in prequantized model
in MXNet with MKLDNN. The kernel is always quantized to int8 output datatype.
The inputs are the raw weights which are floating point numbers. The min and
max ranges are used from the weight itself. The name supplied is used to create
a tvm.relay.var with the given name.
Parameters
----------
weights_var : tvm.relay.var
The float32 representation of the weights.
bias : np.array
The float32 np array for bias.
min_vector_range : array of float32
A number representing the minimum of the weights per channel.
max_vector_range : array of float32
A number representing the maximum of the weights per channel.
data_scale : float
The data scale value.
Returns
-------
result : tvm.relay.expr
The quantized representation of the weights.
"""
quantized_range = zero_centered_int8_quantized_range
real_vector_range = np.maximum(np.absolute(min_vector_range), np.absolute(max_vector_range))
# If real_vector_range is 0, then to avoid division by 0 in scaling,
# make real_vector INT32_max
vector_scale = np.where(
real_vector_range == 0,
1.0 / float(np.iinfo(np.int32).max),
np.divide(real_vector_range, quantized_range),
)
# Handle bias impact on scales as done by MxNet-MKLDNN.
if bias is not None:
common = 2.0 * bias.astype("float32") * (1 / data_scale)
vector_scale_min = np.where(
bias > 0, common / float(np.iinfo(np.int32).max), common / float(np.iinfo(np.int32).min)
)
vector_scale = np.maximum(vector_scale, vector_scale_min)
zero_point = 0
quantized_output = quantize(
weights_var,
relay.const(vector_scale),
relay.const(zero_point, "int32"),
axis=0,
out_dtype="int8",
)
return quantized_output, vector_scale, zero_point
def get_mkldnn_requantize_scale_outDtype(min_output_range, max_output_range, out_dtype):
"""Get the MKLDNN requantized scale."""
quantized_out_range = (
zero_centered_int8_quantized_range
if out_dtype == "int8"
else zero_centered_uint8_quantized_range
)
out_range = np.max([np.abs(np.float32(min_output_range)), np.abs(np.float32(max_output_range))])
output_scale = quantized_out_range / out_range
requantize_scale = np.float32(1 / output_scale)
return requantize_scale
def get_conv_mkldnn_requantized_scale_outDtype(min_output_range, max_output_range):
out_dtype = "uint8" if min_output_range >= 0.0 else "int8"
requantize_scale = get_mkldnn_requantize_scale_outDtype(
min_output_range, max_output_range, out_dtype
)
return requantize_scale, out_dtype
def quantize_conv_bias_mkldnn_from_var(bias_var, bias_scale):
"""Quantized conv2d bias"""
zero_point = 0
quantized_bias = quantize(
data=bias_var,
output_scale=relay.const(bias_scale),
output_zero_point=relay.const(zero_point, "int32"),
axis=0,
out_dtype="int32",
)
return quantized_bias
def quantize_mxnet_min_max(data, min_range, max_range, out_dtype="int8"):
"""Quantizes the given `data` in float32 and the given
min and max ranges and the output data type.
Only `int8` and `uint8` is supported as output data types.
The input data type is expected to be `float32`.
Mxnet has two different flavors for quantization 1) Default 2)MKLDNN.
To get the second one Mxnet must be built with MKLDNN during compile time.
Users can choose either of the implementation for TVM runtime.
The main difference between the two implementation is that MKLDNN is centered
around 0 and the default implementation for uint8 is not.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
min_range : float
The minimum to use data elements.
max_range : float
The maximum to use for data elements.
out_dtype: str, optional
The output data type, can be 'int8' or 'uint8'
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if out_dtype == "uint8":
return _quantize_mkldnn_min_max_uint8(data, min_range, max_range)
elif out_dtype == "int8":
return _quantize_mkldnn_min_max_int8(data, min_range, max_range)
else:
raise ValueError("Expected out_dtype to be int8 or uint8 but was %s" % out_dtype)
def _dequantize_zero_centered(data, data_min, data_max, quantized_range):
"""Dequantizes the given data tensor by calculating the scale
using the MKLDNN formula `max(abs(data_min, data_max))/quantized_range`.
Where quantized_range is 255 for uint8 and 127 for int8. The `data_min`
and `data_max` are the min and max to use for the `data` tensor elements.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type {int8 or uint8}.
data_min : float
The minimum to use data elements.
data_max : float
The maximum to use for data elements.
quantized_range : float
255 for uint8 and 127 for int8. This is the data type range.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
real_range = np.max([np.abs(np.float32(data_min)), np.abs(np.float32(data_max))])
scale = relay.const(np.divide(real_range, quantized_range), "float32")
zero_point = relay.const(0, "int32")
return dequantize(data, scale, zero_point)
def _dequantize_mkldnn_min_max_int8(data, imin_range, imax_range):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges and the output data type is `float32`.
The method of dequantizing is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/dequantize.cc:67
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _dequantize_zero_centered(
data,
data_min=imin_range,
data_max=imax_range,
quantized_range=zero_centered_int8_quantized_range,
)
def _dequantize_mkldnn_min_max_uint8(data, imin_range, imax_range):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges and the output data type is `float32`.
The method of dequantize is described here - https://tinyurl.com/y5k6fz5w.
We use our default quantize implementation from src/relay/qnn/op/dequantize.cc:67
but compute the `scale` and `zero_point` to fit our equation.
Unlike in TFLite where we get the scale and zero_point from the model, MKLDNN
stores the min and max from which we calculate the scale and zero_point.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
imin_range : float
The minimum to use data elements.
imax_range : float
The maximum to use for data elements.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _dequantize_zero_centered(
data,
data_min=imin_range,
data_max=imax_range,
quantized_range=zero_centered_uint8_quantized_range,
)
def dequantize_mxnet_min_max(data, min_range, max_range, in_dtype="int8"):
"""Dequantizes the given `data` in {int8 or uint8} and the given
min and max ranges. The output data type is float32.
Only `float32` is supported as output data types.
The input data type is expected to be {int8 or uint8}.
Mxnet has two different flavors for dequantization 1) Default 2)MKLDNN.
To get the second one Mxnet must be built with MKLDNN during compile time.
Users can choose either of the implementation for TVM runtime.
The main difference between the two implementation is that MKLDNN is centered
around 0 and the default implementation for uint8 is not.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
min_range : float
The minimum to use data elements for the output.
max_range : float
The maximum to use for data elements for the output.
in_dtype: str, optional
The input data type, can be 'int8' or 'uint8'
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if in_dtype == "uint8":
return _dequantize_mkldnn_min_max_uint8(data, min_range, max_range)
elif in_dtype == "int8":
return _dequantize_mkldnn_min_max_int8(data, min_range, max_range)
else:
raise ValueError("Expected out_dtype to be int8 or uint8 but was %s" % in_dtype)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/nnvm_common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition
"""Utility functions common to NNVM and MxNet conversion."""
import warnings
from ... import error
from ...tir.op import min_value
from .. import expr as _expr
from .. import op as _op
from .common import get_relay_op
from .common import infer_type as _infer_type
from .common import infer_shape as _infer_shape
def _warn_not_used(attr, op="nnvm"):
err = "{} is ignored in {}.".format(attr, op)
warnings.warn(err)
def _rename(new_op):
if isinstance(new_op, str):
new_op = get_relay_op(new_op)
# attrs are ignored.
def impl(inputs, _, _dtype="float32"):
return new_op(*inputs)
return impl
def _reshape(inputs, attrs):
shape = attrs.get_int_tuple("shape")
reverse = attrs.get_bool("reverse", False)
if reverse:
return _op.reverse_reshape(inputs[0], newshape=shape)
return _op.reshape(inputs[0], newshape=shape)
def _init_op(new_op):
"""Init ops like zeros/ones"""
def _impl(inputs, attrs):
assert len(inputs) == 0
shape = attrs.get_int_tuple("shape")
dtype = attrs.get_str("dtype", "float32")
return new_op(shape=shape, dtype=dtype)
return _impl
def _softmax_op(new_op):
"""softmax/log_softmax"""
def _impl(inputs, attrs, _dtype="float32"):
axis = attrs.get_int("axis", -1)
use_length = attrs.get_bool("use_length", False)
if use_length:
# The second arg is valid_length. We can use sequence mask to mask the input before
# computing softmax
assert len(inputs) == 2
data = inputs[0]
length = inputs[1]
data_shape = _infer_shape(data)
data_dtype = _infer_type(data).checked_type.dtype
length_shape = _infer_shape(length)
if axis < 0:
axis = len(data_shape) + axis
data_ndims = len(data_shape)
length_ndims = len(length_shape)
# Sequence_mask supports axis = 0 and 1 and requires data to be in specific format.
if axis == data_ndims - 1 and data_ndims > 2 and length_ndims == 2:
new_batch_size = 1
for dim in range(length_ndims):
assert data_shape[dim] == length_shape[dim]
new_batch_size *= data_shape[dim]
# Reshape the data and length to satisfy sequence mask
data = _op.reshape(data, newshape=(new_batch_size, -1))
length = _op.reshape(length, newshape=(new_batch_size))
# Input data is now 2D, we can set the axis = 1
axis = 1
elif data_ndims > 2:
raise error.OpNotImplemented(
"Operator softmax with use_length=True is supported only for axis -1"
)
res = _op.sequence_mask(
data=data,
valid_length=length,
mask_value=float(min_value(data_dtype).value),
axis=axis,
)
# Apply softmax
res = new_op(res, axis=axis)
# Reshape back to input data shape
if len(data_shape) > 2:
return _op.reshape(res, newshape=data_shape)
return res
return new_op(inputs[0], axis=axis)
return _impl
def _reduce(new_op):
"""Reduction ops like sum/min/max"""
def _impl(inputs, attrs, _dtype="float32"):
assert len(inputs) == 1
axis = attrs.get_int_tuple("axis", [])
keepdims = attrs.get_bool("keepdims", False)
exclude = attrs.get_bool("exclude", False)
# use None for reduce over all axis.
axis = None if len(axis) == 0 else axis
return new_op(inputs[0], axis=axis, keepdims=keepdims, exclude=exclude)
return _impl
def _arg_reduce(new_op):
"""Arg Reduction ops like argmin/argmax"""
def _impl(inputs, attrs):
assert len(inputs) == 1
axis = attrs.get_int("axis", None)
keepdims = attrs.get_bool("keepdims", False)
res = new_op(inputs[0], axis=[axis], keepdims=keepdims)
# cast to dtype.
res = res.astype("float32")
return res
return _impl
def _cast(inputs, attrs):
"""Type cast"""
dtype = attrs.get_str("dtype")
return inputs[0].astype(dtype=dtype)
def _clip(inputs, attrs):
a_min = attrs.get_float("a_min")
a_max = attrs.get_float("a_max")
return _op.clip(inputs[0], a_min=a_min, a_max=a_max)
def _transpose(inputs, attrs):
axes = attrs.get_int_tuple("axes", None)
# translate default case
axes = None if len(axes) == 0 else axes
return _op.transpose(inputs[0], axes=axes)
def _upsampling(inputs, attrs):
scale = attrs.get_int("scale")
return _op.nn.upsampling(inputs[0], scale_h=scale, scale_w=scale)
def _elemwise_sum(inputs, _, _dtype="float32"):
assert len(inputs) > 0
res = inputs[0]
for x in inputs[1:]:
res = _op.add(res, x)
return res
def _binop_scalar(new_op):
def _impl(inputs, attrs, odtype=None):
assert len(inputs) == 1
scalar = attrs.get_float("scalar")
if odtype is None:
odtype = _infer_type(inputs[0]).checked_type.dtype
scalar = _expr.const(scalar, dtype=odtype)
return new_op(inputs[0], scalar)
return _impl
def _rbinop_scalar(new_op):
def _impl(inputs, attrs, odtype=None):
assert len(inputs) == 1
scalar = attrs.get_float("scalar")
if odtype is None:
odtype = _infer_type(inputs[0]).checked_type.dtype
scalar = _expr.const(scalar, dtype=odtype)
return new_op(scalar, inputs[0])
return _impl
def _compare(new_op):
"""Compare ops like greater/less"""
def _impl(inputs, _, odtype="float32"):
assert len(inputs) == 2
return new_op(inputs[0], inputs[1]).astype(odtype)
return _impl
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/oneflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""OneFlow: OneFlow is a performance-centered and open-source deep learning framework."""
import os
import re
import copy
from collections import OrderedDict
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import ty as _ty
from .common import (
AttrCvt,
Renamer,
fold_constant,
get_relay_op,
infer_shape,
infer_type,
new_var,
)
__all__ = ["from_oneflow"]
FLOW_2_STR_DTYPE = {
2: "float32",
3: "float64",
6: "int64",
5: "int32",
4: "int8",
7: "uint8",
9: "float16",
}
def is_input_op(node):
"""Return true when the node is the input of the graph."""
return node.WhichOneof("op_type") == "input_conf"
def is_user_op(node):
"""Return true when the node is the intermediate variables of graph."""
return node.WhichOneof("op_type") == "user_conf"
def is_output_op(node):
"""Return true when the node is the output of the graph."""
return node.WhichOneof("op_type") == "output_conf"
def is_param_op(node):
"""Return true when the node is the intermediate variables of model(saved)."""
return node.WhichOneof("op_type") == "variable_conf"
def get_node_info(node):
"""
Get basic information about nodes: shape, data_type
"""
# list->tuple
shape = tuple(node.input_conf.blob_conf.shape.dim)
# get data type
dtype = node.input_conf.blob_conf.data_type
if dtype in list(FLOW_2_NP_DTYPE.keys()):
data_type = FLOW_2_NP_DTYPE[dtype]
else:
raise IndexError("Please check the data type of your node: %s" % node.name)
return shape, data_type
def _dtype_shape_promotion(inputs):
"""Promote data type and shape for list of tensors."""
dtype_order = ["bool", "int8", "int16", "int32", "int64", "float32", "float64"]
ranks = [len(infer_shape(x)) for x in inputs]
if set(ranks) == set([1, 0]):
for i, r in enumerate(ranks):
if r == 0:
inputs[i] = _op.expand_dims(inputs[i], axis=0)
dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in inputs)
if len(dtypes) == 1:
return inputs
max_dtype = dtype_order[max(dtypes)]
for i, input_op in enumerate(inputs):
if infer_type(input_op).checked_type.dtype != max_dtype:
inputs[i] = input_op.astype(max_dtype)
return inputs
def parse_attr(attr):
"""Parse attribute of user op in oneflow."""
attrs = {}
for a in attr:
attr_str = str(attr[a])
if attr_str[0:7] == "at_list":
attr_str_ = attr_str.split(" ")[0]
if attr_str_ == "at_list_float":
attrs[a] = tuple(attr[a].at_list_float.val)
elif attr_str_ == "at_list_int32":
attrs[a] = tuple(attr[a].at_list_int32.val)
elif attr_str_ == "at_list_int64":
attrs[a] = tuple(attr[a].at_list_int64.val)
elif attr_str.split(":")[0] == "at_string":
attrs[a] = attr[a].at_string
elif attr_str.split(" ")[0] == "at_shape":
attrs[a] = tuple(list(attr[a].at_shape.dim))
else:
attr_str_ = attr_str.split(":")[0]
if attr_str_ == "at_bool":
attrs[a] = attr[a].at_bool
elif attr_str_ == "at_double":
attrs[a] = attr[a].at_double
elif attr_str_ == "at_float":
attrs[a] = attr[a].at_float
elif attr_str_ == "at_int32":
attrs[a] = attr[a].at_int32
elif attr_str_ == "at_int64":
attrs[a] = attr[a].at_int64
return attrs
def shape_of(x, dtype="int64"):
ttype = infer_type(x).checked_type
if not _ty.is_dynamic(ttype):
shape = list(ttype.shape)
return _expr.const(shape, dtype)
return _op.shape_of(x, dtype)
def dimension_constraint():
def _dim_check(attrs):
if len(attrs["kernel_size"]) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d and 3d kernel supported."
class OneFlowOpConverter(object):
"""A helper class for holding oneflow op converters."""
@classmethod
def get_converter(cls):
"""
Get converter matches given opset.
Parameters
----------
None
Returns
-------
converter, which should be `_impl_vx`.
"""
version = 1
if hasattr(cls, "_impl_v{}".format(version)):
return getattr(cls, "_impl_v{}".format(version))
raise NotImplementedError("version {} of {} not implemented".format(version, cls.__name__))
class Pool(OneFlowOpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
data = inputs[0]
attrs.pop("data_format")
out = AttrCvt(
op_name=cls.name,
transforms={
"kernel_size": "pool_size",
"stride": "strides",
"dilations": ("dilation", 1),
},
ignores=["return_indices", "divisor_override"],
custom_check=dimension_constraint(),
)([data], attrs, params)
return out
class AdaptiveAvgPool2d(OneFlowOpConverter):
"""Operator converter for AdaptiveAvgPool2d"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.nn.adaptive_avg_pool2d(inputs[0], output_size=attrs["output_size"])
class AdaptiveMaxPool2d(OneFlowOpConverter):
"""Operator converter for AdaptiveMaxPool2d"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.nn.adaptive_max_pool2d(inputs[0], output_size=attrs["output_size"])
class GlobalAveragePool(OneFlowOpConverter):
"""Operator converter for GlobalAveragePool"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_avg_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_avg_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_avg_pool3d(inputs[0])
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class GlobalMaxPool(OneFlowOpConverter):
"""Operator converter for GlobalMaxPool"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_max_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_max_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_max_pool3d(inputs[0])
raise NotImplementedError(
"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class Conv(OneFlowOpConverter):
"""A helper class for conv op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
# The kernel is imported from model_dir_path, without the ".weight" logo, etc.
# The data is obtained through the graph, its op contains "_input."
in_names = ["_input."]
kernel_names = [".weight"]
for i in inputs:
IN_NAMES = any(x in str(i) for x in in_names)
KERNEL_NAMES = any(x in str(i) for x in kernel_names)
if IN_NAMES:
data = i
elif KERNEL_NAMES:
kernel = i
else:
data = i
# Use shape of input to determine convolution type.
kernel_type = infer_type(kernel)
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_size" not in attrs:
attrs["kernel_size"] = kernel_shapes[0][2:]
if "dilation_rate" in attrs:
attrs["dilation"] = list(attrs["dilation_rate"])
attrs.pop("dilation_rate")
pad_v = attrs.get("padding_before", [0, 0])
attrs["padding"] = [pad_v[0], pad_v[1], pad_v[0], pad_v[1]]
group_conv1d = False
if cls.name == "conv1d" and attrs.get("groups") != 1:
group_conv1d = True
# Expand input from NCW to NCHW
data = _op.expand_dims(data, axis=2)
# Expand kernel from OIW to OIHW
kernel = _op.expand_dims(kernel, axis=2)
# Add new value to kernel_shape, strices, dilation, pads, if needed
attrs["kernel_size"] = [1] + list(attrs["kernel_size"])
if "strides" in attrs:
attrs["strides"] = [1] + list(attrs["strides"])
if "dilations" in attrs:
attrs["dilation"] = [1] + list(attrs["dilations"])
out = AttrCvt(
op_name=cls.name,
transforms={
"group": ("groups", 1),
},
ignores=["data_format", "filters", "padding_after", "padding_before"],
custom_check=dimension_constraint(),
)([data, kernel], attrs, params)
# If this was a group_conv1d, squish output back to NCW.
if group_conv1d:
out = _op.squeeze(out, axis=[2])
return out
class ConvTranspose(OneFlowOpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
in_names = ["_input."]
kernel_names = [".weight"]
for i in inputs:
IN_NAMES = any(x in str(i) for x in in_names)
KERNEL_NAMES = any(x in str(i) for x in kernel_names)
if IN_NAMES:
data = i
elif KERNEL_NAMES:
kernel = i
else:
data = i
# get number of channels
attrs["channels"] = attrs.get("filters", 1)
attrs["groups"] = attrs.get("group", 1)
kernel_type = infer_type(kernel)
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_size" not in attrs:
attrs["kernel_size"] = kernel_shapes[0][2:]
if "dilation_rate" in attrs:
attrs["dilation"] = list(attrs["dilation_rate"])
attrs.pop("dilation_rate")
pad_v = attrs.get("padding_before", [0, 0])
attrs["padding"] = [pad_v[0], pad_v[1], pad_v[0], pad_v[1]]
out = AttrCvt(
op_name=cls.name,
transforms={
"group": ("groups", 1),
},
disables=["filters", "data_format", "padding_before"],
custom_check=dimension_constraint(),
)([data, kernel], attrs, params)
return out
class Upsample(OneFlowOpConverter):
"""A helper class for upsample op converters"""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
data = inputs[0]
input_shape = infer_shape(data)
dims = len(input_shape)
width_scale = attrs.get("width_scale", 1.0)
height_scale = attrs.get("height_scale", 1.0)
align_corners = attrs.get("align_corners", False)
if "nearest" in cls.name:
method = "nearest_neighbor"
elif "trilinear" in cls.name:
method = "trilinear"
elif "bilinear" in cls.name:
method = "bilinear"
# in 3d case, we use the purely static op
if dims == 5:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
scale_d = _op.take(scales, _op.const(1))
else:
assert len(scales) == 5
scale_h = scales[-2]
scale_w = scales[-1]
scale_d = scales[-3]
layout = "NCDHW"
out = _op.nn.upsampling3d(
data,
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
# in 2d case, use dynamic op
else:
if isinstance(height_scale, _expr.Expr):
height_scale = _op.take(height_scale, _op.const(3))
width_scale = _op.take(width_scale, _op.const(4))
layout = "NCHW"
out = _op.nn.upsampling(
inputs[0],
height_scale,
width_scale,
layout=layout,
method=method,
align_corners=align_corners,
)
return out
class UpsampleNearest(Upsample):
"""Operator converter for Upsample Nearest"""
name = "upsample_nearest"
class UpsampleBiLinear(Upsample):
"""Operator converter for Upsample Bilinear"""
name = "upsample_bilinear"
class Conv2d(Conv):
"""Operator converter for Conv2d"""
name = "conv2d"
class ConvTranspose2d(ConvTranspose):
"""Operator converter for ConvTranspose2d"""
name = "conv2d_transpose"
class BatchNorm(OneFlowOpConverter):
"""Operator converter for BatchNorm"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
# sort the inputs
sorted_inputs = copy.deepcopy(inputs)
for i in inputs:
IN_NAMES = "_input." in str(i)
if IN_NAMES:
sorted_inputs[0] = i
elif "weight" in str(i) and not IN_NAMES:
sorted_inputs[1] = i
elif "bias" in str(i) and not IN_NAMES:
sorted_inputs[2] = i
elif "mean" in str(i) and not IN_NAMES:
sorted_inputs[3] = i
elif "var" in str(i) and not IN_NAMES:
sorted_inputs[4] = i
if "data_format" in attrs:
if attrs["data_format"] == "channel_first":
attrs["axis"] = 1
out = AttrCvt(op_name="batch_norm", ignores=["training"], disables=["momentum"])(
sorted_inputs, attrs, params
)
return out[0]
class Flatten(OneFlowOpConverter):
"""Operator converter for Flatten"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
x = inputs[0]
input_shape = list(infer_shape(x))
start = attrs["start_dim"]
end = attrs["end_dim"]
ndim = len(input_shape)
if end < 0:
end += ndim
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(x, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
return out
class MatMul(OneFlowOpConverter):
"""Operator converter for MatMul"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs))
dtype = infer_type(inputs[0]).checked_type.dtype
# Y = alpha * A * B
alpha = float(attrs.get("alpha", 1.0))
transA = bool(attrs.get("transpose_a", False))
transB = bool(attrs.get("transpose_b", False))
a_shape = infer_shape(inputs[0])
b_shape = infer_shape(inputs[1])
if (
(transA and transB and a_shape[-2] != b_shape[-1])
or (transA and not transB and a_shape[-2] != b_shape[-2])
or (transB and not transA and a_shape[-1] != b_shape[-1])
or (not transB and not transA and a_shape[-1] != b_shape[-2])
):
matmul_a = inputs[1]
matmul_b = inputs[0]
else:
matmul_a = inputs[0]
matmul_b = inputs[1]
if transA:
perm = list(range(len(a_shape)))
perm[-2] = len(a_shape) - 1
perm[-1] = len(a_shape) - 2
matmul_a = _op.transpose(matmul_a, axes=perm)
if transB:
perm = list(range(len(b_shape)))
perm[-2] = len(b_shape) - 1
perm[-1] = len(b_shape) - 2
matmul_b = _op.transpose(matmul_b, axes=perm)
# This implemention almost keeps same with ONNX
# Need to check input shape as batch matmul must be supported.
a_shape = shape_of(matmul_a, dtype="int32")
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(matmul_b, dtype="int32")
b_rank = infer_shape(b_shape)[0]
# When performing a batch matmul, we need to properly handle N-dim shapes.
if a_rank > 2 or b_rank > 2:
def flatten_to_nd(x, x_shape, nd=3):
ndims = infer_shape(x_shape)[0]
if ndims == nd:
return x
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
b_type = infer_type(matmul_b)
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(matmul_a, a_shape, 2)
b = _op.transpose(matmul_b)
output = _op.nn.dense(a, b)
else:
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(matmul_a, a_shape, 3)
b = flatten_to_nd(matmul_b, b_shape, 3)
# Transpose matrix dimensions of b.
b = _op.transpose(b, [0, 2, 1])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Determine the output batch dimension.
if a_rank > b_rank:
out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])
elif a_rank < b_rank:
out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])
# If its unclear how broadcasting should be applied, the output
# shape is determined by choosing the maximum value from each input.
else:
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(a_shape, [i], [i + 1]),
_op.strided_slice(b_shape, [i], [i + 1]),
)
for i in range(a_rank - 2)
],
0,
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
out = _op.reshape(output, fold_constant(final_shape))
else:
if b_rank == 1:
matmul_b = _op.expand_dims(matmul_b, 1, 1)
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(matmul_b, axes=(1, 0))
out = _op.nn.dense(matmul_a, input_1_t)
if b_rank == 1:
out = _op.squeeze(out, axis=[-1])
if not np.isclose(alpha, 1.0):
out = out * _expr.const(alpha, dtype=dtype)
return out
class Reduce(OneFlowOpConverter):
"""Operator converter for reduce ops"""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
attr = {"axis": attrs.get("axis", 0), "keepdims": attrs.get("keepdims", True)}
return AttrCvt(cls.name)(inputs, attr)
class ReduceMax(Reduce):
"""Operator converter for ReduceMax"""
name = "max"
class ReduceMin(Reduce):
"""Operator converter for ReduceMin"""
name = "min"
class ReduceSum(Reduce):
"""Operator converter for ReduceSum"""
name = "sum"
class ReduceMean(Reduce):
"""Operator converter for ReduceMean"""
name = "mean"
class Square(OneFlowOpConverter):
"""Operator converter for square"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 1, "Square op {} take 1 inputs, {} given".format(
cls.name, len(inputs)
)
return _op.multiply(inputs[0], inputs[0])
class Add(OneFlowOpConverter):
"""Operator converter for Add"""
name = "add"
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs))
axis = int(attrs.get("axis", 0))
true_names = ["weight", "bias"]
false_names = ["_input."]
for i in inputs:
T_NAMES = any(x in str(i) for x in true_names)
F_NAMES = any(x in str(i) for x in false_names)
if T_NAMES and not F_NAMES:
add_b = i
else:
add_a = i
# fix the shape
add_shape = infer_shape(add_a)
if len(add_shape) > 2:
add_b = _op.expand_dims(add_b, axis=axis, num_newaxis=len(add_shape) - 2)
add_b_shape = list(infer_shape(add_b))
add_b_shape.insert(0, add_shape[0])
add_b = _op.reshape(add_b, tuple(add_b_shape))
out = get_relay_op(cls.name)(add_a, add_b)
return out
class Expand(OneFlowOpConverter):
"""Operator converter for Expand"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
data_in = inputs[0]
shape = list(infer_shape(data_in))
ndims = len(shape)
sizes = attrs["logical_expand_shape"]
out = data_in
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)
shape = [1] * num_newaxis + shape
for i in range(out_dims):
if sizes[i] != -1 and shape[i] == 1:
out = _op.repeat(out, sizes[i], axis=i)
return out
class Transpose(OneFlowOpConverter):
"""Operator converter for transpose."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
perm = attrs["perm"]
return _op.transpose(inputs[0], axes=perm)
class ExpandDim(OneFlowOpConverter):
"""Operator converter for ExpandDim"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.expand_dims(inputs[0], axis=attrs.get("axis", 0))
class BroadcastMath(OneFlowOpConverter):
"""Operator converter for broadcast math ops"""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs))
beta_names = ["weight", "bias", "mean", "var", "Constant"]
for i in inputs:
T_NAMES = any([x in str(i) for x in beta_names])
if T_NAMES and "_input." not in str(i):
input_b = i
else:
input_a = i
if cls.name == "divide":
length = []
for i in inputs:
length.append(len(str(i)))
for i in inputs:
if len(str(i)) == max(length):
input_a = i
else:
input_b = i
if cls.name == "subtract":
length = []
for i in inputs:
length.append(len(str(i)))
for i in inputs:
if len(str(i)) == max(length):
input_b = i
else:
input_a = i
try:
return get_relay_op(cls.name)(input_a, input_b)
except UnboundLocalError:
return get_relay_op(cls.name)(*inputs)
class BroadcastMul(BroadcastMath):
"""Operator converter for Mul broadcast"""
name = "multiply"
class BroadcastAdd(BroadcastMath):
"""Operator converter for Add broadcast"""
name = "add"
class BroadcastSub(BroadcastMath):
"""Operator converter for Sub broadcast"""
name = "subtract"
class BroadcastDiv(BroadcastMath):
"""Operator converter for Div broadcast"""
name = "divide"
class LogicalGreater(OneFlowOpConverter):
"""Operator converter for greater"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
res = None
if attrs.get("has_int_operand", True):
value = attrs.get("int_operand", 0.0)
res = _op.greater(inputs[0], _op.full_like(inputs[0], fill_value=_expr.const(value)))
elif attrs.get("has_float_operand", True):
value = float(attrs.get("float_operand", 0.0))
res = _op.greater(
inputs[0], _op.full_like(inputs[0], fill_value=_expr.const(value)).astype("float32")
)
else:
raise AttributeError(
"please check if has_int_operand or has_float_operand in your attrs"
)
return res
class Log1p(OneFlowOpConverter):
"""Operator converter for Log1p"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.log(inputs[0] + _expr.const(1.0))
class Pow(OneFlowOpConverter):
"""Operator converter for Power"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
inputs = _dtype_shape_promotion(inputs)
return get_relay_op(cls.name)(inputs[0], inputs[1])
class Expm1(OneFlowOpConverter):
"""Operator converter for Expm1"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.exp(inputs[0]) - _expr.const(1.0)
class Unary(OneFlowOpConverter):
"""A helper class for unary op converters"""
name = ""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format(
cls.name, len(inputs)
)
return get_relay_op(cls.name)(*inputs)
class Absolute(Unary):
"""Operator converter for Absolute."""
name = "abs"
class AddN(OneFlowOpConverter):
"""Operator converter for Add_n"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given."
res = inputs[0]
for each in inputs[1:]:
res = _op.add(res, each)
return res
class ScalarAdd(OneFlowOpConverter):
"""Operator convert for Add_scalar"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 1, "add_scalar take == 1 inputs, but {} given.".format(len(inputs))
if attrs.get("has_int_operand", True):
res = inputs[0] + _expr.const(attrs["int_operand"])
elif attrs.get("has_float_operand", True):
res = inputs[0] + _expr.const(attrs["float_operand"])
else:
raise AttributeError(
"please check if has_int_operand or has_float_operand in your attrs"
)
return res
class ScalarMul(OneFlowOpConverter):
"""Operator convert for Mul_scalar"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 1, "add_scalar take == 1 inputs, but {} given.".format(len(inputs))
if attrs.get("has_int_operand", True):
res = inputs[0] * _expr.const(attrs["int_operand"], dtype="float32")
elif attrs.get("has_float_operand", True):
res = inputs[0] * _expr.const(attrs["float_operand"])
else:
raise AttributeError(
"please check if has_int_operand or has_float_operand in your attrs"
)
return res
class ScalarDiv(OneFlowOpConverter):
"""Operator convert for Div_scalar"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 1, "div_scalar take == 1 inputs, but {} given.".format(len(inputs))
if attrs.get("has_int_operand", True):
res = inputs[0] / _expr.const(attrs["int_operand"], dtype="float32")
elif attrs.get("has_float_operand", True):
res = inputs[0] / _expr.const(attrs["float_operand"])
else:
raise AttributeError(
"please check if has_int_operand or has_float_operand in your attrs"
)
return res
class ScalarPow(OneFlowOpConverter):
"""Operator convert for Pow_scalar"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
if attrs.get("has_int_operand", True):
coeff = _expr.const(attrs["int_operand"])
elif attrs.get("has_float_operand", True):
coeff = _expr.const(attrs["float_operand"])
return _op.power(inputs[0], coeff)
class MaxPool2d(Pool):
"""Operator converter for MaxPool"""
name = "max_pool2d"
class AveragePool2d(Pool):
"""Operator converter for AveragePool."""
name = "avg_pool2d"
class Affine(OneFlowOpConverter):
"""Operator converter for Affine transformation."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
alpha = _expr.const(attrs.get("alpha", 1.0))
beta = _expr.const(attrs.get("beta", 0.0))
return (alpha * inputs[0]) + beta
class Reshape(OneFlowOpConverter):
"""Operator converter for Reshape."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.reshape(inputs[0], attrs["shape"])
class Softmax(OneFlowOpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
axis = attrs.get("axis", -1)
data = inputs[0]
if isinstance(axis, str):
axis = int(axis)
return _op.nn.softmax(data, axis=axis)
class LogSoftmax(OneFlowOpConverter):
"""Operator converter for LogSoftmax."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
axis = attrs.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
axes = list(range(axis, ndim))
x = inputs[0]
m = _op.max(x, axes, keepdims=True)
e = _op.exp(x - m)
s = _op.sum(e, axes, keepdims=True)
return x - m - _op.log(s)
class Dropout(OneFlowOpConverter):
"""Operator converter for Dropout."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
out = AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"])
return out
class ThresholdedRelu(OneFlowOpConverter):
"""Operator converter for ThresholdedRelu."""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
alpha = float(attrs.get("alpha", 1.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
class Elu(OneFlowOpConverter):
"""Operator converter for Elu"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
alpha = float(attrs.get("alpha", 1.0))
return _expr.const(-alpha) * _op.nn.relu(
_expr.const(1.0) - _op.exp(inputs[0])
) + _op.nn.relu(inputs[0])
class PReLU(OneFlowOpConverter):
"""Operator converter for PReLU"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
assert len(inputs) == 2, "PReLU need 2 inputs, but {} given".format(len(inputs))
for i in inputs:
if "_input." in str(i):
prelu_a = i
else:
prelu_b = i
input_shape = shape_of(prelu_a)
alpha = _op.broadcast_to_like(prelu_b, prelu_a)
alpha = _op.reshape(alpha, [-1])
output = _op.nn.prelu(_op.reshape(prelu_a, [-1]), alpha, axis=0)
out = _op.reshape(output, input_shape)
return out
class Selu(OneFlowOpConverter):
"""Operator converter for Selu"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
alpha = float(attrs.get("alpha", 1.67326319217681884765625))
gamma = float(attrs.get("gamma", 1.05070102214813232421875))
return _expr.const(gamma) * (
_expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))
+ _op.nn.relu(inputs[0])
)
class Silu(OneFlowOpConverter):
"""Operator converter for Silu"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
a = inputs[0]
b = _op.sigmoid(inputs[0])
return _op.multiply(a, b)
class Gelu(OneFlowOpConverter):
"""Operator converter for Gelu"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
data = inputs[0]
return data * (
_expr.const(0.5) + _op.erf(data * _expr.const(0.5**0.5)) * _expr.const(0.5)
)
class HardTanh(OneFlowOpConverter):
"""Operator converter for HardTanh"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
tanh_min = attrs.get("min_val", 0.0)
tanh_max = attrs.get("max_val", 0.0)
return _op.tensor.clip(inputs[0], tanh_min, tanh_max)
class Softplus(OneFlowOpConverter):
"""Operator converter for Softplus"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
data = _op.exp(data) + _expr.const(1, dtype=data_dtype)
return _op.log(data)
class Softsign(OneFlowOpConverter):
"""Operator converter for Softsign"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return inputs[0] / (_expr.const(1.0) + Absolute.get_converter()(inputs, attrs, params))
class Variance(OneFlowOpConverter):
"""Operator converter for Variance"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
axis = attrs["dim"]
keepdims = attrs["keepdim"]
unbiased = bool(attrs["unbiased"])
return _op.reduce.variance(inputs[0], axis=axis, keepdims=keepdims, unbiased=unbiased)
class Concat(OneFlowOpConverter):
"""Operator converter for Concat"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
attrs.pop("max_dim_size")
inputs = _dtype_shape_promotion(inputs)
return _op.concatenate(inputs, axis=attrs["axis"])
class Clip(OneFlowOpConverter):
"""Operator converter for Clip"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
attr = {}
dtype = infer_type(inputs[0])
if "float" in str(dtype):
attr["a_min"] = attrs["floating_min"]
attr["a_max"] = attrs["floating_max"]
elif "int" in str(dtype):
attr["a_min"] = attrs["integral_min"]
attr["a_max"] = attrs["integral_max"]
else:
attr["a_min"] = -np.inf
attr["a_max"] = np.inf
out = AttrCvt("clip")(inputs, attr, params)
return out
class Slice(OneFlowOpConverter):
"""Operator converter for Slice"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
starts = list(attrs["start"])
ends = list(attrs["stop"])
steps = list(attrs["step"])
return _op.strided_slice(inputs[0], starts, ends, steps)
class Split(OneFlowOpConverter):
"""Operator converter for Split"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
splits = attrs.get("split", None)
if splits is not None:
indices = []
attrs["indices_or_sections"] = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
output = _op.split(inputs[0], indices, attrs.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
class Scatter(OneFlowOpConverter):
"""Operator converter for Scatter"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
axis = attrs.get("axis", 0)
return _op.scatter(inputs[0], inputs[1], inputs[2], axis)
class Unsqueeze(OneFlowOpConverter):
"""Operator converter for Unsqueeze"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
axes = sorted(attrs["axes"])
for axis in axes:
inputs[0] = _op.expand_dims(inputs[0], axis=axis, num_newaxis=1)
return inputs[0]
class Sign(OneFlowOpConverter):
"""Operator converter for Sign"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.sign(inputs[0])
class Reciprocal(OneFlowOpConverter):
"""Operator converter for Reciprocal"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
dtype = infer_type(inputs[0]).checked_type.dtype
return _expr.const(1.0, dtype=dtype) / inputs[0]
class Erf(OneFlowOpConverter):
"""Operator converter for Erf"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _op.erf(inputs[0])
class Erfc(OneFlowOpConverter):
"""Operator converter for Erfs"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
return _expr.const(1.0) - _op.erf(inputs[0])
class HardSigmoid(OneFlowOpConverter):
"""Operator converter for HardSigmoid"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
alpha = attrs.get("alpha", 0.2)
beta = attrs.get("beta", 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return AttrCvt("clip")([transformX], attr)
class OneHot(OneFlowOpConverter):
"""Operator converter for OneHot"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
# Extract relay one_hot inputs.
indices, depth, values = inputs
ndim = len(infer_shape(indices))
# Split onnx on off values into two separate expressions.
off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))
# Extract the datatype of the output from on_value.
dtype = infer_type(on_value).checked_type.dtype
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
indices = _op.where(
indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices
)
# set default value when axis is not set in the model
axis = attrs.get("axis", -1)
if axis < 0:
axis += ndim + 1
return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)
class Where(OneFlowOpConverter):
"""Operator converter for Where"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
condition_rank = len(infer_shape(inputs[0]))
x_rank = len(infer_shape(inputs[1]))
y_rank = len(infer_shape(inputs[2]))
ranks = [condition_rank, x_rank, y_rank]
# If one rank is longer than others, then we can broadcast
# to that shape.
max_rank = max(ranks)
max_rank_idxs = [i for i, x in enumerate(ranks) if x == max_rank]
broadcast_shape = shape_of(inputs[max_rank_idxs[0]])
# If two or more inputs have the same rank, compute the broadcast
# shape by taking the maximum value of each dimensions.
if len(max_rank_idxs) > 1:
for idx in max_rank_idxs:
broadcast_shape = _op.maximum(broadcast_shape, shape_of(inputs[idx]))
broadcast_shape = fold_constant(broadcast_shape)
condition = _op.broadcast_to(inputs[0], broadcast_shape)
x = _op.broadcast_to(inputs[1], broadcast_shape)
y = _op.broadcast_to(inputs[2], broadcast_shape)
return _op.where(condition, x, y)
class Constant(OneFlowOpConverter):
"""Operator converter for Constant"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
is_float = attrs.get("is_floating_value", True)
shape = attrs.get("shape", (1,))
if is_float:
dtype = "float32"
value = attrs.pop("floating_value")
else:
dtype = "int8"
value = attrs.pop("integer_value")
np_array = np.zeros(shape)
np_array.fill(value)
value = _expr.const(np_array, dtype)
return value
class Range(OneFlowOpConverter):
"""Operator converter for Range"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
if len(inputs) != 0:
raise ValueError("Expect no inputs but get {}".format(len(inputs)))
start = attrs.get("start", 0.0)
limit = attrs.get("limit", 1.0)
delta = attrs.get("delta", 1.0)
return _op.arange(
_expr.const(start, dtype="float32"),
_expr.const(limit, dtype="float32"),
_expr.const(delta, dtype="float32"),
)
class Cast(OneFlowOpConverter):
"""Operator converter for Cast"""
@classmethod
def _impl_v1(cls, inputs, attrs, params):
attrs["dtype"] = infer_type(inputs[0]).checked_type.dtype
return AttrCvt(op_name="cast")(inputs, attrs)
def get_convert_map():
# supported oneflow2relay op
return {
# defs/math
"bias_add": Add.get_converter(),
"scalar_add": ScalarAdd.get_converter(),
"scalar_mul": ScalarMul.get_converter(),
"scalar_div": ScalarDiv.get_converter(),
"scalar_pow": ScalarPow.get_converter(),
"reduce_sum": ReduceSum.get_converter(),
"reduce_max": ReduceMax.get_converter(),
"reduce_min": ReduceMin.get_converter(),
"reduce_mean": ReduceMean.get_converter(),
"broadcast_add": BroadcastAdd.get_converter(),
"broadcast_mul": BroadcastMul.get_converter(),
"broadcast_sub": BroadcastSub.get_converter(),
"broadcast_div": BroadcastDiv.get_converter(),
"scalar_logical_greater": LogicalGreater.get_converter(),
"log": Renamer("log"),
"log1p": Log1p.get_converter(),
"acos": Renamer("acos"),
"acosh": Renamer("acosh"),
"asin": Renamer("asin"),
"asinh": Renamer("asinh"),
"atan": Renamer("atan"),
"atanh": Renamer("atanh"),
"cos": Renamer("cos"),
"cosh": Renamer("cosh"),
"sin": Renamer("sin"),
"sinh": Renamer("sinh"),
"tan": Renamer("tan"),
"tanh": Renamer("tanh"),
"pow": Pow.get_converter(),
"exp": Renamer("exp"),
"expm1": Expm1.get_converter(),
"floor": Renamer("floor"),
"ceil": Renamer("ceil"),
"round": Renamer("round"),
"add_n": AddN.get_converter(),
"sqrt": Renamer("sqrt"),
"rsqrt": Renamer("rsqrt"),
"square": Square.get_converter(),
"sign": Sign.get_converter(),
"erf": Erf.get_converter(),
"erfc": Erfc.get_converter(),
"reciprocal": Reciprocal.get_converter(),
# defs/activation
"softmax": Softmax.get_converter(),
"softsign": Softsign.get_converter(),
"hardtanh": HardTanh.get_converter(),
"relu": Renamer("relu"),
"leaky_relu": Renamer("leaky_relu"),
"prelu": PReLU.get_converter(),
"selu": Selu.get_converter(),
"silu": Silu.get_converter(),
"gelu": Gelu.get_converter(),
# defs/nn
"conv2d": Conv2d.get_converter(),
"deconv2d": ConvTranspose2d.get_converter(),
"maxpool_2d": MaxPool2d.get_converter(),
"avgpool_2d": AveragePool2d.get_converter(),
"adaptive_avg_pool2d": AdaptiveAvgPool2d.get_converter(),
"adaptive_max_pool2d": AdaptiveMaxPool2d.get_converter(),
"dropout": Dropout.get_converter(),
"normalization": BatchNorm.get_converter(),
"upsample_nearest_2d": UpsampleNearest.get_converter(),
"upsample_bilinear_2d": UpsampleBiLinear.get_converter(),
# defs/tensor
"matmul": MatMul.get_converter(),
"batch_matmul": MatMul.get_converter(),
"broadcast_matmul": MatMul.get_converter(),
"concat": Concat.get_converter(),
"clip_by_scalar": Clip.get_converter(),
"slice": Slice.get_converter(),
"expand": Expand.get_converter(),
"transpose": Transpose.get_converter(),
"expand_dims": ExpandDim.get_converter(),
"range": Range.get_converter(),
"cast": Cast.get_converter(),
# defs/others
"reshape": Reshape.get_converter(),
"constant": Constant.get_converter(),
"where": Where.get_converter(),
"flatten": Flatten.get_converter(),
"sigmoid": Renamer("sigmoid"),
"sigmoid_v2": Renamer("sigmoid"),
"hardsigmoid": HardSigmoid.get_converter(),
"softplus": Softplus.get_converter(),
"squeeze": AttrCvt("squeeze", {"axes": "axis"}),
"unsqueeze": Unsqueeze.get_converter(),
"identity": Renamer("copy"),
"var": Variance.get_converter(),
}
class oneflow_input(object):
"""
Dual purpose list or dictionary access object
"""
def __init__(self):
self.input_keys = []
self.input_dict = {}
self.n = 0
def __getitem__(self, item):
if isinstance(item, int):
if item > (len(self.input_keys) - 1):
return None
return self.input_dict[self.input_keys[item]]
if isinstance(item, str):
if item not in self.input_keys:
return None
return self.input_dict[item]
if isinstance(item, slice):
keys = self.input_keys[item]
return [self.input_dict[key] for key in keys]
raise ValueError("Only integer, string, and slice accesses allowed.")
def __setitem__(self, item, value):
if isinstance(item, int):
self.input_dict[self.input_keys[item]] = value
elif isinstance(item, str):
self.input_keys.append(item)
self.input_dict[item] = value
else:
raise ValueError("Only integer and string indexed writes allowed.")
def keys(self):
return self.input_keys
def __len__(self):
return len(self.input_keys)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.input_keys):
output = self.input_dict[self.input_keys[self.n]]
self.n += 1
return output
raise StopIteration
def deal_with_input_convert(
node_input, node_input_shape, node_input_dtype, node_path, _nodes, _input_path_2_name
):
"""deal with input convert in oneflow."""
if node_input not in _nodes:
if (
node_path not in _input_path_2_name
or "_input." in node_input
or "FreeEagerTensor" in node_input
):
_nodes[node_input] = new_var(
node_input,
shape=node_input_shape,
dtype=node_input_dtype,
)
else:
names = _input_path_2_name[node_path]
node_replace = None
for k in names:
if k in _nodes:
node_replace = k
if node_replace is not None:
op_replace = copy.deepcopy(_nodes[node_replace])
_nodes[node_input] = op_replace
else:
print("{} will not be in _nodes".format(node_input))
def deal_parameter_convert(
node_input_paths, model_dir_path, _input_path_2_name, _model_array, _params, _nodes
):
"""deal with parameter(weight) convert in oneflow."""
for node_input_path in node_input_paths:
node_path = os.path.join(model_dir_path, node_input_path.replace("m.", "", 1))
node_input_name = node_input_path.split("/")[0]
_input_path_2_name[node_path] = node_input_name
for param_name in _model_array:
node_p = _model_array[param_name]
if node_path == node_p["path"]:
node_array = node_p["params"]
_params[node_input_name] = node_array
_nodes[node_input_name] = new_var(
node_input_name, shape=node_array.shape, dtype=str(node_array.dtype)
)
break
class OneflowGraph(object):
"""
A helper class for handling Relay expression
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : dict of str to str
The input types to the graph
node name:
1. param: m.layer4.1.bn1.weight / ...
2. buffer: m.layer4.1.bn1.running_mean / ...
3. node inputs: m.layer4.1.bn1_input.0
4. node outputs: m.layer4.1.bn1_output.0
"""
def __init__(self, shape, dtype, nodes, model_dir_path):
self._nodes = {}
self._params = {}
self._inputs = {}
self._num_input = 0
self._num_param = 0
self._input_names = []
self._model_array = {}
self._input_path_2_name = {}
self._output_path_2_name = {}
self._init_variable_node = []
self._shape = shape
self._dtype = dtype
self._identity_list = []
self._sort_inputs = {}
import oneflow
model = oneflow.load(model_dir_path)
# model_array: keys: layer_name, values: dict('path', 'params')
for layer_name in model:
layer = model[layer_name]
layer_node = {}
layer_node["path"] = os.path.join(model_dir_path, layer_name, "out") # get path
if "System-Train" in layer_name:
continue
node_name = "m." + layer_name
shape = self._shape[node_name]
dtype = self._dtype[node_name]
array = layer.detach().cpu().numpy()
layer_node["params"] = array.reshape(shape)
self._model_array[layer_name] = layer_node
for node_name in nodes:
node = nodes[node_name]
if is_user_op(node):
for input_name in node.user_conf.input:
node_input_paths = getattr(node.user_conf.input[input_name], "s")
deal_parameter_convert(
node_input_paths,
model_dir_path,
self._input_path_2_name,
self._model_array,
self._params,
self._nodes,
)
for output_name in node.user_conf.output:
node_output_paths = getattr(node.user_conf.output[output_name], "s")
for node_output_path in node_output_paths:
node_path = os.path.join(model_dir_path, node_output_path.replace("m.", ""))
node_output_name = node_output_path.split("/")[0]
self._output_path_2_name[node_path] = node_output_name
elif is_output_op(node):
node_output_path = getattr(node.output_conf, "in")
output_path = os.path.join(
model_dir_path, getattr(node.output_conf, "in").replace("m.", "")
)
self._output_path_2_name[output_path] = node_name
elif is_param_op(node):
if "FreeEagerTensor" in node.name:
shape = tuple(node.variable_conf.shape.dim)
dtype = FLOW_2_STR_DTYPE[node.variable_conf.data_type]
self._shape[node.name] = shape
self._dtype[node.name] = dtype
self._init_variable_node.append(node.name)
if self._init_variable_node != []:
print("{} should be defined by user".format(self._init_variable_node))
def _parse_input(self, node, model_dir_path):
input_user_conf_list = []
for input_name in node.user_conf.input:
input_user_conf_list.append(input_name)
input_user_conf_list.sort()
for input_name in input_user_conf_list:
node_input_paths = getattr(node.user_conf.input[input_name], "s")
for i in node_input_paths:
node_input = i.split("/")[0]
node_input_shape = self._shape[node_input]
node_input_dtype = self._dtype[node_input]
node_path = os.path.join(model_dir_path, i.replace("m.", ""))
deal_with_input_convert(
node_input,
node_input_shape,
node_input_dtype,
node_path,
self._nodes,
self._input_path_2_name,
)
def _parse_output(self, op_name, outputs, cnt_init=0):
"""
o: m.classifier.1_output.xxx
new_o: m.classifier.1-conv2d_0
"_"+new_o_xxx is in self._shape
"""
for o in outputs:
if "_output." not in o:
new_o = o.replace("-" + op_name, "_output")
new_o = new_o.replace("-" + new_o.split("-")[-1], ".0")
for k in self._shape.keys():
if new_o in k:
self._shape[o] = self._shape[k]
self._dtype[o] = self._dtype[k]
break
elif len(outputs) > 1:
outputs.remove(o)
if op_name.lower() == "dropout":
if len(outputs) == 1:
return outputs
outputs = outputs[:-1]
elif op_name.lower() == "constant":
outputs = [self._init_variable_node[cnt_init]]
if len(outputs) > 1:
outputs = list(set(outputs))
return outputs
def from_oneflow(self, nodes, model_dir_path):
"""
Implementation of convert the OneFlow model into an equivalent Relay Function.
"""
# step 1: find out if unsupported ops are used
convert_map = get_convert_map()
unsupported_ops = set()
for node_name in nodes:
node = nodes[node_name]
if is_user_op(node):
# op names, not the layer names
op_name = node.user_conf.op_type_name
if (
op_name not in convert_map
and "constant" not in op_name
and op_name not in self._identity_list
):
unsupported_ops.add(op_name)
# find out the unsupported op
if unsupported_ops:
msg = "The following operators are not supported for frontend OneFlow: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
# step 2: convert op
for node_name in nodes:
node = nodes[node_name]
if is_user_op(node):
# If there is a user-defined node, skip the following steps
if node_name in self._inputs:
continue
op_name = node.user_conf.op_type_name
op_attr = parse_attr(node.user_conf.attr)
self._parse_input(node, model_dir_path=model_dir_path)
node_inputs = oneflow_input()
input_user_conf_list = []
for input_name in node.user_conf.input:
input_user_conf_list.append(input_name)
input_user_conf_list.sort()
for input_name in input_user_conf_list:
node_input_paths = getattr(node.user_conf.input[input_name], "s")
for i in node_input_paths:
node_input = i.split("/")[0]
node_inputs[node_input] = self._nodes[node_input]
node_outputs = []
for output_name in node.user_conf.output:
node_output_paths = getattr(node.user_conf.output[output_name], "s")
for i in node_output_paths:
node_output_path = os.path.join(model_dir_path, i.replace("m.", ""))
if node_output_path in self._input_path_2_name:
node_outputs.append(self._input_path_2_name[node_output_path])
elif node_output_path in self._output_path_2_name:
node_outputs.append(self._output_path_2_name[node_output_path])
node_outputs = self._parse_output(op_name, node_outputs)
# convert
op = self._convert_operator(op_name, node_inputs, op_attr)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
assert (
len(node_outputs) == outputs_num
), "Number of output mismatch {} vs {} in {}.".format(
len(node_outputs), outputs_num, op_name
)
if outputs_num == 1:
op = fold_constant(op)
else:
op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))
op_temp = []
op_temp.append(op)
for i, _ in enumerate(node_outputs):
if isinstance(node_outputs[i], list):
for k in node_outputs[i]:
self._nodes[k] = op_temp[i]
else:
self._nodes[node_outputs[i]] = op_temp[i]
# step 3: get the outputs
outputs = []
for node_name, node in nodes.items():
if is_output_op(node):
node_name_v2 = getattr(node.output_conf, "in").split("/")[0]
if node_name in self._nodes:
outputs.append(self._nodes[node_name])
elif node_name_v2 in self._nodes:
outputs.append(self._nodes[node_name_v2])
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
# step 4: get the relay IR
free_vars = analysis.free_vars(outputs)
nodes = {v: k for k, v in self._nodes.items()}
free_vars = [nodes[var] for var in free_vars]
free_vars_inputs = []
free_vars_parameters = []
for x in free_vars:
if "_input.0" in x:
free_vars_inputs.append(x)
else:
free_vars_parameters.append(x)
free_vars = free_vars_inputs + free_vars_parameters
# step 5: make sure the '_input.0' is the first in self._inputs
for free_var in free_vars:
if free_var not in self._inputs:
self._inputs[free_var] = self._nodes[free_var]
input_names = list(self._inputs.keys())
for input_name in input_names:
if input_name in self._inputs:
self._sort_inputs[input_name] = self._inputs[input_name]
else:
raise IndexError("{} is not in self._inputs".format(input_name))
# step 6: create a function from our output expression and all input variables.
func = _function.Function([v for _, v in self._sort_inputs.items()], outputs)
return IRModule.from_expr(func), self._params
def _convert_operator(self, op_name, node_inputs, op_attr):
"""
Parameters
----------
op_name : str
Operator name, such as conv2d and relu
node_inputs : list of tvm.relay.function.Function
List of inputs.
op_attr : dict
Dict of operator attributes
Returns
-------
sym : tvm.relay.function.Function
Converted relay function
"""
convert_map = get_convert_map()
if op_name in self._identity_list:
sym = get_relay_op(op_name)(*node_inputs, **op_attr)
elif op_name in convert_map:
sym = convert_map[op_name](node_inputs, op_attr, self._params)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
return sym
def from_oneflow(graph, model_dir_path):
"""Convert a OneFlow model into an equivalent Relay Function.
At present, there are two ways to run models in deep learning framework
Dynamic Graph and Static Graph, which are also called Eager Mode and Graph
Mode in OneFlow.
In general, dynamic graphs are easier to use and static graphs have better performance.
OneFlow offers nn.Graph, so that users can use the eager-like programming style to build
static graphs and train the models.
We utilize the intermediate representation of nn.Graph to convert the OneFlow model to Reley.
Parameters
----------
nodes : dict, keys: node.name, value: node
contain the graph
model_dir_path: str
The path of weight
Returns
-------
mod : tvm.IRModule
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
try:
import oneflow as flow
except ImportError:
raise ImportError("please check that OneFlow is installed")
# get info of nodes
shape = {}
dtype = {}
graph_str = repr(graph)
size_where = 2
if "cuda" in graph_str:
size_where = 3
p_size = re.compile(r"size=\(.*?\)", re.S)
p_type = re.compile(r"dtype=.*?\)", re.S)
types = ["INPUT", "PARAMETER", "BUFFER", "OUTPUT"]
for t in types:
data = re.finditer(t + ":.*", graph_str)
for i in data:
attrs = i.group().split(":")
size_str = re.findall(p_size, attrs[size_where])
type_str = re.findall(p_type, attrs[size_where])
assert size_str != [], "size should not be None, please check your repr(graph)"
size_attr = size_str[0].replace("size=", "")
if size_attr[-2] == ",":
size_attr = size_attr.replace(",", "")
data_size = tuple(map(int, size_attr[1:-1].split(", ")))
node_name = attrs[1]
shape[node_name] = data_size
dtype[node_name] = "float32"
if type_str != []:
type_attr = type_str[0].replace("dtype=", "").replace(")", "")
if type_attr[-1] == ",":
type_attr = type_attr.replace(",", "")
dtype[node_name] = type_attr.replace("oneflow.", "")
# get graph proto, if you don't _compile the graph, the _graph_proto will be None
graph_input = re.search(r"INPUT:.*", graph_str).group().split(":")
shape_input = tuple(
map(
int,
re.findall(p_size, graph_input[size_where])[0].replace("size=", "")[1:-1].split(", "),
)
)
if not graph._is_compiled:
graph._compile(flow.rand(shape_input))
graph_proto = graph._graph_proto
# get all nodes
nodes = OrderedDict()
for op in graph_proto.net.op:
nodes[op.name] = op
g = OneflowGraph(shape, dtype, nodes, model_dir_path)
# Use the graph proto as a scope so that ops can access other nodes if needed.
mod, params = g.from_oneflow(nodes=nodes, model_dir_path=model_dir_path)
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/onnx.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
import copy
import math
import warnings
from typing import Optional
import numpy as np
import tvm
from tvm import relay
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import loops as _loops
from .. import op as _op
from .. import qnn as _qnn
from .. import random as _random
from .. import ty as _ty
from .. import vision as _vision
from .common import (
AttrCvt,
Renamer,
autopad,
ensure_scalar_shape,
fold_constant,
get_name,
get_relay_op,
gru_cell,
infer_channels,
infer_shape,
infer_type,
infer_value,
lstm_cell,
new_var,
rnn_cell,
shape_of,
try_resolve_var_to_const,
unbind,
)
__all__ = ["from_onnx"]
# The default configurations of Relay ONNX frontend.
ONNX_DEFAULT_CONFIGS = {
# By default, TVM converts qualified onnx `matmul` to `transpose(weight) + nn.batch_matmul_NT`.
# Change this flag to False to directly convert to `nn.batch_matmul`.
# Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some
# performance issues.
"use_nt_batch_matmul": True,
}
class onnx_input(list):
"""A helper extension to list that returns None for out of bound indices."""
def __getitem__(self, item):
if isinstance(item, slice):
if item.stop is None:
stop = len(self)
else:
stop = item.stop
indices = list(range(stop)[item])
return [self[i] for i in indices]
if isinstance(item, int):
return list(self)[item] if item < len(self) else None
raise TypeError("list indices must be integers or slices, not %s" % type(item).__name__)
def get_numpy(tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError("Unable to import onnx which is required {}".format(e))
return to_array(tensor_proto)
def get_type(elem_type):
"""Converts onnx integer datatype to numpy datatype"""
# If a string was passed instead of a tensor type, it does not need
# conversion and can be returned.
if isinstance(elem_type, str):
return elem_type
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
except ImportError as e:
raise ImportError("Unable to import onnx which is required {}".format(e))
try:
from onnx import TensorProto
except ImportError as e:
raise ImportError("Unable to import TensorProto from onnx {}".format(e))
# Onnx mapping converts bfloat16 to float16 because
# numpy does not have a bfloat16 data type. However,
# tvm has one, so we force the return type to be bfloat16
if elem_type == int(TensorProto.BFLOAT16):
return "bfloat16"
return str(TENSOR_TYPE_TO_NP_TYPE[elem_type])
def get_info(info_proto):
"""Extract the shape from a ValueInfoProto."""
shape = []
shape_name = []
for dim in info_proto.type.tensor_type.shape.dim:
name = dim.dim_param
value = dim.dim_value
if value is None or value == 0:
value = _ty.Any()
shape_name.append(name)
else:
shape_name.append(value)
shape.append(value)
name = info_proto.name
if info_proto.type.tensor_type.elem_type:
dtype = get_type(info_proto.type.tensor_type.elem_type)
else:
dtype = None
return name, shape, dtype, shape_name
def dimension_picker(prefix, suffix=""):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 1:
return prefix + "1d" + suffix
if len(kernel) == 2:
return prefix + "2d" + suffix
if len(kernel) == 3:
return prefix + "3d" + suffix
msg = "Only 1D, 2D, and 3D kernels are supported for operator {}."
op_name = prefix + "1d/2d/3d"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid("Number of pads must be either 2 or 4.")
return pads
def get_pad_pair(input1d, kernel1d, stride1d, mode):
"""infer pad size"""
if input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
pad_before = pad // 2
pad_after = pad - pad_before
if "LOWER" in mode:
return [pad_after, pad_before]
return [pad_before, pad_after]
def onnx_default_layout(dims, op_name):
if dims == 1:
return "NCW"
if dims == 2:
return "NCHW"
if dims == 3:
return "NCDHW"
msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}."
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def onnx_storage_order2layout(storage_order, dims, op_name):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid("Mode of storage_order must be either 0 or 1")
if dims == 1:
return "NCW" if storage_order == 0 else "NWC"
if dims == 2:
return "NCHW" if storage_order == 0 else "NHWC"
if dims == 3:
return "NCDHW" if storage_order == 0 else "NDHWC"
msg = "Only 1D, 2D and 3D layouts are currently supported for operator {}."
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def dimension_constraint():
def _dim_check(attrs):
if len(attrs["kernel_shape"]) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d and 3d kernel supported."
def get_scalar(x, params, dtype="float32"):
"""Helper to get a scalar value for Quantized operators."""
if isinstance(x, _expr.Var) and x.name_hint in params:
return _op.const(params[x.name_hint].numpy(), dtype)
rank = len(infer_shape(x))
assert rank <= 1, "scale and zero_point input must be scalars"
if rank == 1:
x = _op.squeeze(x, [0])
return _op.cast(x, dtype)
def get_scalar_or_1d_tensor(x, params, dtype="float32"):
"""Helper to get a scalar value or 1D tensor for Quantized operators."""
if isinstance(x, _expr.Var) and x.name_hint in params:
return _op.const(params[x.name_hint].numpy(), dtype)
rank = len(infer_shape(x))
assert rank <= 1, "scale and zero_point input must be scalars or 1D tensors"
return _op.cast(x, dtype)
def flatten_to_nd(x, x_shape, nd=3):
"""Flatten input tensor to nd rank"""
ndims = infer_shape(x_shape)[0]
if ndims == nd:
return x
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
def matmul_out_dtype(inputs, out_dtype):
"""Common function to handle MatMul and MatMulInteger16"""
a_shape = shape_of(inputs[0])
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(inputs[1])
b_rank = infer_shape(b_shape)[0]
if a_rank > 2 or b_rank > 2:
# Determine the output batch dimension.
new_a_shape = a_shape
new_b_shape = b_shape
if a_rank > b_rank:
rank_diff = a_rank - b_rank
new_b_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(b_shape).checked_type.dtype),
b_shape,
],
0,
)
elif a_rank < b_rank:
rank_diff = b_rank - a_rank
new_a_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(a_shape).checked_type.dtype),
a_shape,
],
0,
)
else:
pass
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(new_b_shape, [i], [i + 1]),
_op.strided_slice(new_a_shape, [i], [i + 1]),
)
for i in range(max(a_rank, b_rank) - 2)
],
0,
)
b_type = infer_type(inputs[1])
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(inputs[0], a_shape, 2)
b = _op.transpose(inputs[1])
output = _op.nn.dense(a, b, out_dtype=out_dtype)
else:
a = inputs[0]
b = inputs[1]
# broadcast a and b
a_broadcasted_shape = fold_constant(
_op.concatenate(
[
out_batch,
_op.strided_slice(a_shape, [a_rank - 2], [a_rank]),
],
0,
)
)
b_broadcasted_shape = fold_constant(
_op.concatenate(
[
out_batch,
_op.strided_slice(b_shape, [b_rank - 2], [b_rank]),
],
0,
)
)
if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape):
a = _op.transform.broadcast_to(a, a_broadcasted_shape)
if not tvm.ir.structural_equal(b_shape, b_broadcasted_shape):
b = _op.transform.broadcast_to(b, b_broadcasted_shape)
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(a, shape_of(a), 3)
b = flatten_to_nd(b, shape_of(b), 3)
if ONNX_DEFAULT_CONFIGS["use_nt_batch_matmul"]:
# Transpose matrix dimensions of b.
bt = _op.transpose(b, [0, 2, 1])
# Perform a NT batch matmul.
output = _op.nn.batch_matmul(a, bt, out_dtype=out_dtype)
else:
# Perform a NN batch matmul.
output = _op.nn.batch_matmul(a, b, out_dtype=out_dtype, transpose_b=False)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
return _op.reshape(output, fold_constant(final_shape))
if a_rank == 1:
return _op.squeeze(_op.nn.matmul(_op.expand_dims(inputs[0], axis=0), inputs[1]), axis=[0])
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
return _op.nn.dense(inputs[0], input_1_t, out_dtype=out_dtype)
def qmatmul(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
):
"""
Helper function to handle QLinearMatMul
It is very close to 'matmul_out_dtype' but separated due to
differences in signatures of dense, matmul, batch_matmul of nn and qnn.
They requre scaling and zero point arguments
"""
a_shape = shape_of(a)
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(b)
b_rank = infer_shape(b_shape)[0]
if a_rank > 2 or b_rank > 2:
# Determine the output batch dimension.
new_a_shape = a_shape
new_b_shape = b_shape
if a_rank > b_rank:
rank_diff = a_rank - b_rank
new_b_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(b_shape).checked_type.dtype),
b_shape,
],
0,
)
elif a_rank < b_rank:
rank_diff = b_rank - a_rank
new_a_shape = _op.concatenate(
[
_expr.const([1] * rank_diff, dtype=infer_type(a_shape).checked_type.dtype),
a_shape,
],
0,
)
else:
pass
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(new_b_shape, [i], [i + 1]),
_op.strided_slice(new_a_shape, [i], [i + 1]),
)
for i in range(max(a_rank, b_rank) - 2)
],
0,
)
b_type = infer_type(b)
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(a, a_shape, 2)
b = _op.transpose(b)
output = _qnn.op.dense(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
)
else:
# broadcast a and b
a_broadcasted_shape = fold_constant(
_op.concatenate(
[
out_batch,
_op.strided_slice(a_shape, [a_rank - 2], [a_rank]),
],
0,
)
)
b_broadcasted_shape = fold_constant(
_op.concatenate(
[
out_batch,
_op.strided_slice(b_shape, [b_rank - 2], [b_rank]),
],
0,
)
)
if not tvm.ir.structural_equal(a_shape, a_broadcasted_shape):
a = _op.transform.broadcast_to(a, a_broadcasted_shape)
if not tvm.ir.structural_equal(b_shape, b_broadcasted_shape):
b = _op.transform.broadcast_to(b, b_broadcasted_shape)
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(a, shape_of(a), 3)
b = flatten_to_nd(b, shape_of(b), 3)
# Transpose matrix dimensions of b.
bt = _op.transpose(b, [0, 2, 1])
# Perform a NT batch matmul.
output = _qnn.op.batch_matmul(
a,
bt,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
matmul_result_dtype,
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(a_shape, [a_rank - 2], [a_rank - 1]),
_op.strided_slice(b_shape, [b_rank - 1], [b_rank]),
],
0,
)
return _op.reshape(output, fold_constant(final_shape))
if a_rank == 1:
# TODO(vvchernov): There should be qnn.matmul but it is not implemented
# return _op.squeeze(_qnn.op.matmul(_op.expand_dims(a, axis=0),
# b,
# a_zp_scalar,
# b_zp_scalar,
# a_scale_scalar,
# b_scale_scalar,
# transform_num_hidden_units,
# matmul_result_dtype,
# ),
# axis=[0]
# )
return _op.squeeze(
_qnn.op.dense(
_op.expand_dims(a, axis=0),
_op.transpose(b),
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
),
axis=[0],
)
# Otherwise a simple dense op will get the job done.
return _qnn.op.dense(
a,
_op.transpose(b),
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
transform_num_hidden_units,
matmul_result_dtype,
)
def layer_norm(x, eps, gamma, beta):
"""A common function to handle layer norm.
Use LayerNormalization for the actual onnx op.
"""
eps_dtype = infer_type(x).checked_type.dtype
u, s = _op.mean_variance(x, axis=-1, keepdims=True)
output = _op.divide(
_op.subtract(x, u),
_op.sqrt(_op.add(s, _op.const(eps, dtype=eps_dtype))),
)
output = _op.multiply(output, gamma)
if beta is not None:
output = _op.add(output, beta)
return output
class OnnxOpConverter(object):
"""A helper class for holding onnx op converters."""
@classmethod
def get_converter(cls, opset):
"""Get converter matches given opset.
Parameters
----------
opset: int
opset from model.
Returns
-------
converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [int(d.replace("_impl_v", "")) for d in dir(cls) if "_impl_v" in d]
versions = sorted(versions + [opset])
version = versions[max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, "_impl_v{}".format(version)):
return getattr(cls, "_impl_v{}".format(version))
raise NotImplementedError(
"opset version {} of {} not implemented".format(version, cls.__name__)
)
class Unary(OnnxOpConverter):
"""A helper class for unary op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format(
cls.name, len(inputs)
)
op_name = cls.name
return get_relay_op(op_name)(*inputs)
class Elemwise(OnnxOpConverter):
"""A helper class for elemwise op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(cls.name, len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if attr.get("broadcast", 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(attr.get("axis", 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
"""A helper class for pool op converters."""
name = ""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
attr_cvt, data = cls._run_calculation(inputs, attr, params)
out = attr_cvt([data], attr, params)
if ndim - len(attr["kernel_shape"]) == 1:
out = _op.squeeze(out, axis=[0])
return out
@classmethod
def _run_calculation(cls, inputs, attr, params):
"""Helper method to return the processed input data and AttrCvt object"""
data = inputs[0]
input_shape = infer_shape(data)
input_dtype = infer_type(data).checked_type.dtype
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
if cls.name == "avg_pool":
pad_tuple = []
for axis in range(len(input_shape) - 2):
axis_shape = input_shape[2 + axis]
stride = attr.get("strides", [1] * ndim)[axis]
kernel = attr["kernel_shape"][axis]
pad = get_pad_pair(axis_shape, kernel, stride, attr["auto_pad"])
pad_tuple.append(pad)
pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])
attr["pads"] = pad_tuple
else:
# Warning: Pool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
if "int" in input_dtype:
pad_val = np.iinfo(np.dtype(input_dtype)).min
else:
pad_val = np.finfo(np.dtype(input_dtype)).min
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
[1] * ndim,
pad_value=pad_val,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], cls.name))
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name=cls.name
)
else:
if ndim - len(attr["kernel_shape"]) == 1:
data = _op.expand_dims(data, axis=0)
input_shape = [1] + list(input_shape)
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name=cls.name)
return (
AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
"kernel_shape": "pool_size",
"pads": ("padding", 0),
"dilations": ("dilation", 1),
},
ignores=["storage_order"],
custom_check=dimension_constraint(),
),
data,
)
class Absolute(Unary):
"""Operator converter for Absolute."""
name = "abs"
class Add(Elemwise):
"""Operator converter for Add."""
name = "add"
class AveragePool(Pool):
"""Operator converter for AveragePool."""
name = "avg_pool"
class QLinearAveragePool(Pool):
"""Operator converter for QLinearAveragePool from Microsoft onnxruntime contrib opset."""
name = "avg_pool"
@classmethod
def _impl_v1(cls, inputs, attr, params):
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, dtype="int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, dtype="int32")
attr_cvt, data = cls._run_calculation(inputs, attr, params)
input_dtype = infer_type(data).checked_type.dtype
# Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
# and then requantize afer (according to documentation below)
# https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearAveragePool
float_node = _qnn.op.dequantize(data, x_scale, x_zero_point)
out = attr_cvt([float_node], attr, params)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)
class BatchNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
# TODO(vvchernov): 'training_mode' (onnx tag) is not correctly handled, ignore for now
out = AttrCvt(
op_name="batch_norm",
ignores=["spatial", "is_test", "consumed_inputs", "momentum", "training_mode"],
)(inputs, attr, params)
# We only support test mode, so we return data, moving_mean, moving_var,
# and then moving_mean and moving_var again as placeholders for
# the expected "saved_mean", "saved_var".
return _expr.TupleWrapper(_expr.Tuple((*out, out[1], out[2])), 5)
class InstanceNorm(OnnxOpConverter):
"""Operator converter for BatchNorm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="instance_norm")(inputs, attr, params)
class Conv(OnnxOpConverter):
"""Operator converter for Conv."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Use shape of input to determine convolution type.
data = inputs[0]
kernel = inputs[1]
input_shape = infer_shape(data)
ndim = len(input_shape)
kernel_type = infer_type(inputs[1])
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shapes[0][2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = [0 for i in range(ndim - 2)]
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")
attr["channels"] = kernel_shapes[0][0]
out = AttrCvt(
op_name=dimension_picker("conv"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
custom_check=dimension_constraint(),
)([data, kernel], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(OnnxOpConverter):
"""Operator converter for ConvTranspose."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
kernel_shape = [get_const_tuple(out_type.checked_type.shape)]
out_channels = kernel_shape[0][1] * attr.get("group", 1)
attr["channels"] = out_channels
groups = attr.get("group", 1)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[0][2:]
attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr or "output_shape" in attr:
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
kernel_shape = attr["kernel_shape"]
kndim = len(kernel_shape)
dilations = attr.get("dilations", [1] * kndim)
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
if "output_shape" in attr:
for i in range(kndim):
total_pad[i] = (
strides[i] * (input_shape[ndim - kndim + i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- attr["output_shape"][i]
)
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
if "output_shape" in attr and "auto_pad" not in attr:
pad = right + left
elif "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
attr["pads"] = pad
else:
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
deconv=True,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
if "auto_pad" in attr:
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
@classmethod
def _impl_v11(cls, inputs, attr, params):
# get number of channels
out_type = infer_type(inputs[1])
kernel_shape = [get_const_tuple(out_type.checked_type.shape)]
out_channels = kernel_shape[0][1] * attr.get("group", 1)
attr["channels"] = out_channels
groups = attr.get("group", 1)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[0][2:]
attr["groups"] = groups
# infer pads for auto_pad
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr or "output_shape" in attr:
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if "output_shape" in attr or attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
kernel_shape = attr["kernel_shape"]
kndim = len(kernel_shape)
dilations = attr.get("dilations", [1] * kndim)
output_padding = attr.get("output_padding", [0] * kndim)
strides = attr["strides"]
total_pad = [0] * kndim
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConvTranspose
if "output_shape" in attr:
for i in range(kndim):
total_pad[i] = (
strides[i] * (input_shape[ndim - kndim + i] - 1)
+ output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- attr["output_shape"][i]
)
else:
for i in range(kndim):
total_pad[i] = (
output_padding[i]
+ ((kernel_shape[i] - 1) * dilations[i] + 1)
- strides[i]
)
left = [p // 2 for p in total_pad]
right = [total_pad[i] - left[i] for i in range(kndim)]
if "output_shape" in attr and "auto_pad" not in attr:
pad = right + left
elif "LOWER" in attr["auto_pad"]:
pad = left + right
else:
pad = right + left
attr["pads"] = pad
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
if "auto_pad" in attr:
attr.pop("auto_pad")
out = AttrCvt(
op_name=dimension_picker("conv", "_transpose"),
transforms={
"kernel_shape": "kernel_size",
"dilations": ("dilation", 1),
"pads": ("padding", 0),
"group": ("groups", 1),
},
disables=["output_shape"],
custom_check=dimension_constraint(),
)([data, inputs[1]], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class GlobalAveragePool(OnnxOpConverter):
"""Operator converter for GlobalAveragePool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_avg_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_avg_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_avg_pool3d(inputs[0])
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class QLinearGlobalAveragePool(OnnxOpConverter):
"Operator converter for QLinearGlobalAveragePool from Microsoft onnxruntime contrib opset."
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, dtype="int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, dtype="int32")
input_dtype = infer_type(inputs[0]).checked_type.dtype
# Onnxruntime documentation does not mention that this global avg_pool should follow the
# sequence dequantize -> float op -> quantize, but that is how QLinearAveragePool is done.
#
# This op also follows the same pattern since qnn op is not available right now.
# TODO: Generate QNN op to perform quantized operation instead of dequant -> op -> quant
x = _qnn.op.dequantize(inputs[0], x_scale, x_zero_point)
if rank == 3:
out = _op.nn.global_avg_pool1d(x)
elif rank == 4:
out = _op.nn.global_avg_pool2d(x)
elif rank == 5:
out = _op.nn.global_avg_pool3d(x)
else:
raise NotImplementedError(
"Global average pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=input_dtype)
class GlobalMaxPool(OnnxOpConverter):
"""Operator converter for GlobalMaxPool"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
rank = len(infer_shape(inputs[0]))
if rank == 3:
return _op.nn.global_max_pool1d(inputs[0])
if rank == 4:
return _op.nn.global_max_pool2d(inputs[0])
if rank == 5:
return _op.nn.global_max_pool3d(inputs[0])
raise NotImplementedError(
"Global max pooling is only implemented for 1D, 2D, and 3D kernels, got %dD."
% (rank - 2),
)
class Div(Elemwise):
"""Operator converter for Divide."""
name = "divide"
class Elu(OnnxOpConverter):
"""Operator converter for Elu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
return _expr.const(-alpha) * _op.nn.relu(
_expr.const(1.0) - _op.exp(inputs[0])
) + _op.nn.relu(inputs[0])
class Gelu(OnnxOpConverter):
"""Operator converter for Gelu from Microsoft onnxruntime contrib opset.
gelu(x) = 0.5x(1 + erf(x/sqrt(2)))
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
# Declare consts
const_dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=const_dtype)
one = _expr.const(1.0, dtype=const_dtype)
sqrt2 = _expr.const(math.sqrt(2), dtype=const_dtype)
# Compute gelu
term1 = _op.multiply(half, x)
erf = _op.erf(_op.divide(x, sqrt2))
term2 = _op.add(one, erf)
return _op.multiply(term1, term2)
class FastGelu(OnnxOpConverter):
"""Operator converter for FastGelu from Microsoft onnxruntime contrib opset.
fast_gelu(x) = 0.5x(1 + tanh(sqrt(2/pi)(x + 0.044715x^3)))
= 0.5x(1 + tanh((sqrt(2/pi)x + 0.044715(sqrt(2/pi)x^3)))
= 0.5x(1 + tanh(c1 * x + c2 * x^3)))
, where
c1 = sqrt(2/pi)
c2 = 0.044715 * sqrt(2/pi)
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
if inputs[1]:
bias = inputs[1]
bias_shape = infer_shape(bias)
assert len(bias_shape) == 1, "bias term must be a 1D tensor"
x += bias
# Declare consts
const_dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=const_dtype)
one = _expr.const(1.0, dtype=const_dtype)
const1 = _expr.const(math.sqrt(2 / math.pi), dtype=const_dtype)
const2 = _expr.const(0.044715 * math.sqrt(2 / math.pi), dtype=const_dtype)
# Compute FastGelu
term1 = _op.multiply(half, x)
term2 = _op.multiply(const1, x)
term3 = _op.multiply(const2, _op.power(x, _expr.const(3, const_dtype)))
tanh = _op.tanh(_op.add(term2, term3))
return _op.multiply(term1, _op.add(one, tanh))
class BiasGelu(OnnxOpConverter):
"""Operator converter for BiasGelu from Microsoft onnxruntime contrib opset.
bias_gelu(x, b) = 0.5(x + b)(1 + erf((x + b)/sqrt(2)))
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
x = inputs[0]
b = inputs[1]
b_shape = infer_shape(b)
assert len(b_shape) == 1, "BiasGelu bias term must be a 1D tensor"
inp = _op.add(x, b)
return Gelu._impl_v1([inp], attr, params)
class LayerNormalization(OnnxOpConverter):
"""Operator converter for LayerNormalization from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v17(cls, inputs, attr, params):
x = inputs[0]
gamma = inputs[1]
beta = inputs[2]
axis = attr.get("axis", -1)
eps = attr.get("epsilon", 1e-5)
# according to the onnx doc, given the int axis (default -1)
# to compute the mean and inv_stdev which are of dim [d[0], ..., d[axis-1], 1, ..., 1]
# the actual computation is over (axis, ..., rank(x) - 1) axes
# see https://github.com/onnx/onnx/blob/main/docs/Changelog.md#layernormalization-17
rank = len(infer_shape(x))
axis = tuple(range(axis, rank)) if axis >= 0 else tuple(range(rank + axis, rank))
dtype = infer_type(x).checked_type.dtype
mean = _op.mean(x, axis, keepdims=True)
var = _op.variance(x, axis, keepdims=True, with_mean=mean)
inv_stdev = _op.divide(
_op.const(1, dtype=dtype), _op.sqrt(_op.add(var, _op.const(eps, dtype=dtype)))
)
x_norm = _op.multiply(_op.subtract(x, mean), inv_stdev)
ln = _op.multiply(x_norm, gamma)
if beta is not None:
ln = _op.add(ln, beta)
return _expr.TupleWrapper(_expr.Tuple([ln, mean, inv_stdev]), 3)
class EmbedLayerNormalization(OnnxOpConverter):
"""Operator converter for EmbedLayerNormalization from Microsoft onnxruntime contrib opset.
This layer embeds the input tokens, sums them, and applies layer normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
input_ids = inputs[0]
segment_ids = inputs[1]
word_emb = inputs[2]
pos_emb = inputs[3]
segment_emb = inputs[4]
gamma = inputs[5]
beta = inputs[6]
mask = inputs[7]
pos_ids = inputs[8]
eps = attr.get("epsilon", 1e-12)
(batch_size, seq_len) = infer_shape(input_ids)
if segment_ids:
assert segment_emb
if pos_ids is None:
pos_ids = _op.const([list(range(seq_len))] * batch_size, dtype="int32")
word_vec = _op.take(word_emb, input_ids, axis=0)
segment_vec = _op.take(segment_emb, segment_ids, axis=0)
pos_vec = _op.take(pos_emb, pos_ids, axis=0)
vec_sum = _op.add(word_vec, pos_vec)
if segment_ids:
vec_sum = _op.add(vec_sum, segment_vec)
ln = layer_norm(vec_sum, eps, gamma, beta)
mask_index = _op.const(np.zeros((batch_size,), dtype="int32"))
if mask:
# calculate number of words per sentence
mask_index = _op.sum(mask, axis=1)
# TODO(@anwang2009): onnxruntime v1.10.0 requires a third output of vec_sum
return _expr.TupleWrapper(_expr.Tuple([ln, mask_index]), 2)
class SkipLayerNormalization(OnnxOpConverter):
"""Operator converter for SkipLayerNormalization from Microsoft onnxruntime contrib opset.
This layer sums the two input tensors (along with optional bias), and applies layer
normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
skip = inputs[1]
gamma = inputs[2]
beta = inputs[3]
bias = inputs[4]
assert (
beta is not None and bias is not None
), "SkipLayerNormalization import currently only supports required beta and bias"
eps = attr.get("epsilon", 1e-12)
x = _op.add(data, skip)
if bias is not None:
x = _op.add(x, bias)
output = layer_norm(x, eps, gamma, beta)
# onnxruntime doesn't compute the other outputs, despite the documentation
placeholder = _op.const(0, dtype="float32")
return _expr.TupleWrapper(_expr.Tuple([output, placeholder, placeholder]), 3)
class Attention(OnnxOpConverter):
"""Operator converter for Attention from Microsoft onnxruntime contrib opset.
This is the self-attention mechanism used in transformer models.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
num_heads = attr["num_heads"]
assert (
"qkv_hidden_sizes" not in attr
), "different hidden sizes for Q, K, V are not currently supported"
assert "unidirectional" not in attr, "unidirectional attention not current supported"
# (batch, seq, in_hidden)
input_emb = inputs[0]
# (in_hidden, 3 * out_hidden), where out_hidden = num_heads * head_size
weight = inputs[1]
# (3 * out_hidden,)
bias = inputs[2]
# 1. ( batch, 1, max_seq, max_seq)
# 2. ( batch, past_seq + seq,)
# 3. ( batch, seq, past_seq + seq,)
# 4. ( batch,)
# 5. (2 * batch,)
# For now, we only support case 2.
mask_index = inputs[3]
# (2, batch, num_heads, past_seq, head_size)
past = inputs[4]
# (batch, num_heads, seq, seq)
extra_add = inputs[5]
(batch_size, seq_len, _) = infer_shape(input_emb)
(out_hidden_x3,) = infer_shape(bias)
assert out_hidden_x3 % 3 == 0, "bias shape should be divisible by 3"
out_hidden = out_hidden_x3 // 3
assert (
out_hidden % num_heads == 0
), "output hidden size should be divisible by number of attention heads"
head_size = out_hidden // num_heads
assert (
mask_index is not None
), "Attention import currently only supports required mask_index"
mask_index_shape = infer_shape(mask_index)
assert (
len(mask_index_shape) == 2
and mask_index_shape[0] == batch_size
and mask_index_shape[1] == seq_len
), "currently only support (batch_size, sequence_length) mask index"
assert past is None, "past K, V state is not currently supported"
assert extra_add is None, "extra add to QxK not currently supported"
# split weight and biases and do the matmuls
w_Q, w_K, w_V = _op.split(weight, 3, axis=1)
b_Q, b_K, b_V = _op.split(bias, 3, axis=0)
# need to merge batch dimensions since TVM matmul is 2D
input_emb = _op.reverse_reshape(input_emb, (-1, 0))
Q = _op.add(_op.nn.matmul(input_emb, w_Q), b_Q)
K = _op.add(_op.nn.matmul(input_emb, w_K), b_K)
V = _op.add(_op.nn.matmul(input_emb, w_V), b_V)
# massage tensors in preparation for batched matmul
def massage(tensor):
tensor = _op.reshape(tensor, (batch_size, seq_len, num_heads, head_size))
# (batch_size, num_heads, seq_len, head_size)
tensor = _op.transpose(tensor, axes=[0, 2, 1, 3])
# (batch_size * num_heads, seq_len, head_size)
return _op.reverse_reshape(tensor, (-1, 0, 0))
Q = massage(Q)
K = massage(K)
V = massage(V)
K_present = _op.reshape(K, (batch_size, num_heads, seq_len, head_size))
V_present = _op.reshape(V, (batch_size, num_heads, seq_len, head_size))
present = _op.stack([K_present, V_present], axis=0)
att_scores = _op.nn.batch_matmul(Q, K, transpose_a=False, transpose_b=True)
score_dtype = infer_type(att_scores).checked_type.dtype
att_scores = _op.divide(
att_scores,
_op.const(np.sqrt(head_size), dtype=infer_type(att_scores).checked_type.dtype),
)
att_scores = _op.reshape(att_scores, (batch_size, num_heads, seq_len, seq_len))
# build the attention mask
att_mask = _op.cast(mask_index, score_dtype)
att_mask = _op.expand_dims(att_mask, 1, num_newaxis=2)
att_mask = _op.subtract(_op.const(1, dtype=score_dtype), att_mask)
att_mask = _op.multiply(att_mask, _op.const(-10000, dtype=score_dtype))
# apply the mask
att_scores = _op.add(att_scores, att_mask)
att_scores = _op.reshape(att_scores, (batch_size * num_heads, seq_len, seq_len))
att_probs = _op.nn.softmax(att_scores, axis=-1)
output = _op.nn.batch_matmul(att_probs, V, transpose_a=False, transpose_b=False)
output = _op.reverse_reshape(output, (-1, num_heads, 0, 0))
output = _op.transpose(output, axes=[0, 2, 1, 3])
output = _op.reshape(output, (0, 0, out_hidden))
return _expr.TupleWrapper(_expr.Tuple([output, present]), 2)
class Gemm(OnnxOpConverter):
"""Operator converter for Gemm."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 3 or len(inputs) == 2, "Gemm op take 2 or 3 inputs, {} given".format(
len(inputs)
)
input0_state = infer_type(inputs[0])
dtype = input0_state.checked_type.dtype
# Y = alpha * A * B + beta * C
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
transA = int(attr.get("transA", 0))
transB = int(attr.get("transB", 0))
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
if len(input0_state.checked_type.shape) != 2:
inputs[0] = _op.nn.batch_flatten(inputs[0])
if alpha != 1.0:
inputs[0] *= _expr.const(alpha, dtype=dtype)
out = _op.nn.dense(inputs[0], inputs[1], units=channels)
if len(inputs) == 3:
out = out + _expr.const(beta, dtype=dtype) * inputs[2]
return out
class MatMul(OnnxOpConverter):
"""Operator converter for MatMul."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs))
# Need to check input shape as batch matmul must be supported.
return matmul_out_dtype(inputs, out_dtype=infer_type(inputs[0]).checked_type.dtype)
class MatMulInteger16(OnnxOpConverter):
"""Operator converter for MatMulInteger16 from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
assert len(inputs) == 2, "MatMulInteger16 op take 2 inputs, {} given".format(len(inputs))
a_dtype = infer_type(inputs[0]).checked_type.dtype
b_dtype = infer_type(inputs[1]).checked_type.dtype
# Check input data types
assert a_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for first input"
assert b_dtype in ("int16", "uint16"), "MatMulInteger16: invalid dtype for second input"
out_dtype = "int32"
if a_dtype == "uint16" and b_dtype == "uint16":
out_dtype = "uint32"
return matmul_out_dtype(inputs, out_dtype)
class Mod(OnnxOpConverter):
"""Operator converter for Mod."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Mod op take 2 inputs, {} given".format(len(inputs))
# Note: attr['fmod'] determines whether the operator should behave like np.fmod or np.mod.
# attr['fmod'] == 0 will behave as np.mod and attr['fmod'] == 1 will force fmod treatment.
# The relay equivalent of np.fmod is relay.mod and np.mod is relay.floor_mod
if attr.get("fmod", 0) == 0:
op_name = "floor_mod"
else:
op_name = "mod"
return AttrCvt(op_name)(inputs, {}, params)
class MaxPool(Pool):
"""Operator converter for MaxPool"""
name = "max_pool"
class MaxUnpool(OnnxOpConverter):
"""Operator converter for MaxUnpool"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Unpack inputs and attributes
data = inputs[0]
data_type = infer_type(data).checked_type.dtype
indices = inputs[1]
output_shape = inputs[2]
kernel_shape = attr.get("kernel_shape")
pads = attr.get("pads", None)
strides = attr.get("strides", [1] * len(kernel_shape))
# Compute the proper output shape before padding.
multiplier = _op.concatenate(
[_expr.const([1, 1], dtype="int64"), _expr.const(list(strides), dtype="int64")], axis=0
)
total_output_shape = multiplier * shape_of(data, dtype="int64")
# Add extra dimensions from kernel size and stride mismatch
total_output_shape += _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(kernel_shape), "int64")], axis=0
) - _op.concatenate(
[_expr.const([0, 0], "int64"), _expr.const(list(strides), "int64")], axis=0
)
# Compute padding amount if output shape is specified.
if output_shape is not None:
total_output_shape = output_shape
elif pads is not None:
# Get pads in the proper format for relay.
pads = _op.concatenate(
[_expr.const([0, 0, 0, 0], "int64"), _expr.const(list(pads), "int64")], axis=0
)
pads = _op.reshape(pads, [-1, 2])
# Compute the total padding per axis.
total_pad = _op.sum(pads, axis=-1)
# Reversing maxpool means that padding actually makes our output smaller.
total_output_shape = total_output_shape - total_pad
# Create a tensor of zeros then scatter our data through it.
zeros_tensor = _op.zeros(total_output_shape, data_type)
# We need to flatten all our tensors before scattering.
flat_tensor = _op.scatter(
_op.reshape(zeros_tensor, [-1]),
_op.reshape(indices, [-1]),
_op.reshape(data, [-1]),
axis=0,
)
# Now reshape back to prepadded shape.
output_tensor = _op.reshape(flat_tensor, total_output_shape)
return output_tensor
class LpPool(OnnxOpConverter):
"""A helper class for lppool op converters."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
data = inputs[0]
input_shape = infer_shape(data)
ndim = len(input_shape)
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: LpPool does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr["strides"],
attr["kernel_shape"],
[1] * ndim,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"], "LpPool"))
attr.pop("auto_pad")
if "storage_order" in attr:
attr["layout"] = onnx_storage_order2layout(
attr["storage_order"], dims=(len(input_shape) - 2), op_name="LpPool"
)
else:
attr["layout"] = onnx_default_layout(dims=(len(input_shape) - 2), op_name="LpPool")
p_value = attr.get("p", 2)
p = _expr.const(p_value, dtype)
reci_p = _expr.const(1.0 / p_value, dtype)
data = _op.power(data, p)
out = AttrCvt(
op_name=dimension_picker("avg_pool"),
transforms={"kernel_shape": "pool_size", "pads": ("padding", 0)},
extras={"count_include_pad": True},
ignores=["p"],
custom_check=dimension_constraint(),
)([data], attr, params)
kernels = attr["kernel_shape"]
out = _op.abs(out) * _expr.const(np.prod(kernels).astype(dtype))
return _op.power(out, reci_p)
class GlobalLpPool(OnnxOpConverter):
"""Operator converter for GlobalLpPool."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO: GlobalLpPool does not yet support dynamic shapes
in_shape = infer_shape(inputs[0])
attr["kernel_shape"] = in_shape[2:]
return LpPool._impl_v1(inputs, attr, params)
class Mul(Elemwise):
"""Operator converter for Multiply."""
name = "multiply"
class Pad(OnnxOpConverter):
"""Operator converter for Pad."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("paddings")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt(
_op.nn.pad,
transforms={
"value": "pad_value",
},
)(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop("pads")
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i + dims]))
attr["pad_width"] = pad_width
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if pad_mode in ["constant", "edge", "reflect"]:
attr["pad_mode"] = pad_mode
attr.pop("mode", None)
else:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return AttrCvt(
"pad",
transforms={
"value": "pad_value",
},
)(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
pads = inputs[1]
if len(inputs) == 3:
value = fold_constant(_op.take(inputs[2], _op.const(0)))
else:
value = 0.0
pad_width_expr = fold_constant(_op.transpose(_op.reshape(pads, (2, -1))))
pad_mode = attr.get("mode", b"constant").decode("utf-8")
if not pad_mode in ["constant", "edge", "reflect"]:
raise tvm.error.OpAttributeInvalid(
"Value " + pad_mode + ' in attribute "mode" is invalid for operator Pad.'
)
return _op.nn.pad(inputs[0], pad_width_expr, value, pad_mode=pad_mode)
class ParametricSoftPlus(OnnxOpConverter):
"""Operator converter for ParametricSoftPlus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(float(attr.get("alpha", 1.0)))
beta = _expr.const(float(attr.get("beta", 1.0)))
return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.0)) * alpha
class Pow(OnnxOpConverter):
"""Operator converter for Pow."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
x = inputs[0]
y = inputs[1]
x_type = infer_type(x).checked_type.dtype
output_type = x_type
y_type = infer_type(y).checked_type.dtype
if not x_type.startswith("float"):
x_type = "float32"
x = _op.cast(x, x_type)
if x_type != y_type:
y = _op.cast(y, x_type)
# TODO: come up with good default integer pow() func for common backends
result = _op.power(x, y)
if x_type != output_type:
return _op.cast(result, output_type)
return result
class Prelu(OnnxOpConverter):
"""Operator converter for Prelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs))
input_shape = shape_of(inputs[0])
alpha = _op.broadcast_to_like(inputs[1], inputs[0])
alpha = _op.reshape(alpha, [-1])
output = _op.nn.prelu(_op.reshape(inputs[0], [-1]), alpha, axis=0)
return _op.reshape(output, input_shape)
class Reciprocal(OnnxOpConverter):
"""Operator converter for Reciprocal."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = infer_type(inputs[0]).checked_type.dtype
return _expr.const(1.0, dtype=dtype) / inputs[0]
class Flatten(OnnxOpConverter):
"""Operator converter for Flatten."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ishape = shape_of(inputs[0])
ndim = infer_shape(ishape)[0]
if axis < 0:
axis = axis + ndim
if axis == 1:
out = _op.nn.batch_flatten(inputs[0])
else:
pre_shape = _op.prod(_op.strided_slice(ishape, [0], [axis], [1]), keepdims=True)
post_shape = _op.prod(_op.strided_slice(ishape, [axis], [ndim], [1]), keepdims=True)
newshape = fold_constant(_op.concatenate([pre_shape, post_shape], axis=0))
out = _op.reshape(inputs[0], newshape)
return out
class Reshape(OnnxOpConverter):
"""Operator converter for Reshape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.reshape(inputs[0], attr["shape"])
@classmethod
def _impl_v5(cls, inputs, attr, params):
allowzero = attr.get("allowzero", False)
if get_name(inputs[1]) in params:
shape = tuple(params[inputs[1].name_hint].numpy().astype("int32"))
out = _op.reshape(inputs[0], shape, allowzero=allowzero)
else:
out = _op.reshape(*inputs, allowzero=allowzero)
return out
class DepthToSpace(OnnxOpConverter):
"""Operator converter for DepthToSpace."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
mode = attr.get("mode", b"DCR").decode("utf-8")
return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)
class SpaceToDepth(OnnxOpConverter):
"""Operator converter for SpaceToDepth."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
block_size = int(attr["blocksize"])
return _op.nn.space_to_depth(inputs[0], block_size)
class Concat(OnnxOpConverter):
"""Operator converter for Concat."""
@classmethod
def _impl_v1(cls, inputs, args, params):
return AttrCvt(op_name="concatenate")((inputs,), args)
class Scale(OnnxOpConverter):
"""Operator converter for Scale."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get("scale", 1.0))
return inputs[0] * _expr.const(scale)
class Selu(OnnxOpConverter):
"""Operator converter for Selu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.67326319217681884765625))
gamma = float(attr.get("gamma", 1.05070102214813232421875))
return _expr.const(gamma) * (
_expr.const(-alpha) * _op.nn.relu(_expr.const(1.0) - _op.exp(inputs[0]))
+ _op.nn.relu(inputs[0])
)
class ScaledTanh(OnnxOpConverter):
"""Operator converter for ScaledTanh."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
beta = float(attr.get("beta", 1.0))
return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)
class Shrink(OnnxOpConverter):
"""Operator converter for Shrink."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
lambd = _op.const(attr.get("lambd", 0.5), dtype=dtype)
bias = _op.const(attr.get("bias", 0.0), dtype=dtype)
zeros = _op.zeros_like(x)
return _op.where(x < -lambd, x + bias, zeros) + _op.where(x > lambd, x - bias, zeros)
class Softsign(OnnxOpConverter):
"""Operator converter for Softsign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (_expr.const(1.0) + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
"""Operator converter for Subtract."""
name = "subtract"
class Sum(OnnxOpConverter):
"""Operator converter for Sum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Optional_(OnnxOpConverter):
"""Operator converter for Optional based on sequence construction op."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
return SequenceConstruct._impl_v11(inputs, attr, params)
class OptionalHasElement(OnnxOpConverter):
"""Operator converter for OptionalHasElement."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
shape = infer_shape(inputs[0])
return _op.const(True) if shape else _op.const(False)
class OptionalGetElement(OnnxOpConverter):
"""Operator converter for OptionalGetElement based on sequence construction op."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
opt_as_seq = Optional_._impl_v15(inputs, attr, params)
return _expr.TupleGetItem(opt_as_seq, 0)
class Affine(OnnxOpConverter):
"""Operator converter for Affine transformation."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(attr.get("alpha", 1.0))
beta = _expr.const(attr.get("beta", 0.0))
return (alpha * inputs[0]) + beta
class ThresholdedRelu(OnnxOpConverter):
"""Operator converter for ThresholdedRelu."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get("alpha", 1.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get("axis", None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr["units"] = channels
return AttrCvt("dense", ignores=["axis", "axis_w"])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
"""Operator converter for Upsample (nearest mode)."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get("scales")
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
if not scales:
# Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op takes 2 inputs, {} given".format(len(inputs))
if get_name(inputs[1]) in params:
scales = params[inputs[1].name_hint].numpy()
else:
scales = inputs[1]
if isinstance(scales, _expr.Constant):
scales = list(scales.data.numpy())
if not isinstance(scales, _expr.Expr):
assert scales[0] == 1.0 and scales[1] == 1.0
mode = attr.get("mode")
if mode == b"nearest":
method = "nearest_neighbor"
elif mode == b"linear":
method = "trilinear" if dims == 5 else "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode)
)
# in 3d case, we use the purely static op
if dims == 5:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
scale_d = _op.take(scales, _op.const(1))
else:
assert len(scales) == 5
scale_h = scales[-2]
scale_w = scales[-1]
scale_d = scales[-3]
layout = "NCDHW"
out = _op.nn.upsampling3d(
inputs[0],
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
# in 2d case, use dynamic op
else:
if isinstance(scales, _expr.Expr):
scale_h = _op.take(scales, _op.const(3))
scale_w = _op.take(scales, _op.const(4))
else:
assert len(scales) == 4
scale_h = scales[-2]
scale_w = scales[-1]
layout = "NCHW"
out = _op.nn.upsampling(
inputs[0],
scale_h,
scale_w,
layout=layout,
method=method,
align_corners=False,
)
return out
class Shape(OnnxOpConverter):
"""Operator converter for Shape."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return shape_of(inputs[0], "int64")
@classmethod
def _impl_v15(cls, inputs, attr, params):
start = attr.get("start")
end = attr.get("end")
return shape_of(inputs[0], dtype="int64", start=start, end=end)
class CumSum(OnnxOpConverter):
"""Operator converter for CumSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
dim = inputs[1]
if dim is not None:
dim = int(infer_value(dim, params).numpy())
exclusive = attr.get("exclusive", 0)
reverse = attr.get("reverse", 0)
if reverse != 0:
out = _op.reverse(data, axis=dim)
out = _op.cumsum(out, axis=dim, exclusive=exclusive)
return _op.reverse(out, axis=dim)
return _op.cumsum(data, axis=dim, exclusive=exclusive)
class Cast(OnnxOpConverter):
"""Operator converter for Cast."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
@classmethod
def _impl_v6(cls, inputs, attr, params):
try:
from onnx import TensorProto
except ImportError as e:
raise ImportError("Unable to import TensorProto from onnx {}".format(e))
# If onnx mapping is used, bfloat16 gets converted to float16
# which is not the desired behavior
if attr["to"] == int(TensorProto.BFLOAT16):
attr["to"] = "bfloat16"
else:
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr["to"] = str(TENSOR_TYPE_TO_NP_TYPE[attr["to"]])
except ImportError as e:
raise ImportError("Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name="cast", transforms={"to": "dtype"})(inputs, attr)
class CastLike(OnnxOpConverter):
"""Operator converter for CastLike."""
@classmethod
def _impl_v15(cls, inputs, attr, params):
return AttrCvt(op_name="cast_like")(inputs, attr)
class Unsqueeze(OnnxOpConverter):
"""Operator converter for Unsqueeze."""
@classmethod
def run_calculation(cls, tensor, axes):
axes = sorted(axes)
for axis in axes:
tensor = _op.expand_dims(tensor, axis=axis, num_newaxis=1)
return tensor
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls.run_calculation(inputs[0], attr["axes"])
@classmethod
def _impl_v13(cls, inputs, attr, params):
if isinstance(inputs[1], _expr.Constant):
constant_axes = list(inputs[1].data.numpy())
constant_axes = list(map(int, constant_axes))
return cls.run_calculation(inputs[0], constant_axes)
rank_input = len(infer_type(inputs[0]).checked_type.shape)
num_new_axis = int(infer_type(inputs[1]).checked_type.shape[0])
axes = relay.sort(inputs[1])
axes = relay.split(axes, num_new_axis).astuple()
result = inputs[0]
# TODO (AndrewZhaoLuo): investigate performance issues with consecutive
# dynamic expand_dims on non-llvm targets.
for i in range(num_new_axis):
axis = relay.TupleGetItem(axes, i)
# Unpack scalar
axis = relay.reshape(axis, [])
axis = relay.where(
axis >= relay.const(0, "int64"), axis, axis + relay.const(rank_input, "int64")
)
result = _op.expand_dims(result, axis)
return result
class Squeeze(OnnxOpConverter):
"""Operator converter for Squeeze."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axes", None)
return _op.squeeze(inputs[0], axis)
@classmethod
def _impl_v13(cls, inputs, attr, params):
ishape = infer_shape(inputs[0])
axis = inputs[1]
if axis is None:
# If axes is not provided, all the single dimensions will be removed from the shape.
if not ishape: # scalar
return inputs[0]
axis = [i for i in range(len(ishape)) if ishape[i] == 1]
axis = _op.const(axis)
dtype = infer_type(axis).checked_type.dtype
if isinstance(axis, _expr.Constant):
constant_axes = list(axis.data.numpy())
constant_axes = list(map(int, constant_axes))
return _op.squeeze(inputs[0], constant_axes)
rank = _op.shape_of(_op.shape_of(inputs[0], dtype), dtype)
axis = _op.where(axis < _op.const(0, dtype), axis + rank, axis)
return _op.squeeze(inputs[0], fold_constant(axis))
class Split(OnnxOpConverter):
"""Operator converter for Split."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
splits = attr.get("split", None)
if splits is not None and len(splits) > 1:
indices = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
# When splits isnt specified divide evenly over axis.
else:
indices = attr["tvm_custom"]["num_outputs"]
output = _op.split(inputs[0], indices, attr.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
@classmethod
def _impl_v13(cls, inputs, attr, params):
splits = inputs[1]
splits_rank = None
if splits is not None:
splits_rank = len(infer_shape(splits))
if splits is not None and splits_rank > 0:
if isinstance(splits, _expr.Constant):
splits = splits.data.asnumpy()
indices = []
index = 0
for i in splits[:-1]:
index += i
indices.append(index)
else:
raise ValueError("Dynamic Split not yet supported")
# When splits isnt specified divide evenly over axis.
else:
indices = attr["tvm_custom"]["num_outputs"]
output = _op.split(inputs[0], indices, attr.get("axis", 0))
# If the output of split is a single value, unpack if from the TupleWrapper
if len(output) == 1:
output = output[0]
return output
class Slice(OnnxOpConverter):
"""Operator converter for Slice."""
@classmethod
def _common(cls, starts, ends, axes):
N = max(axes) + 1
new_axes = list(range(N))
new_starts = [0] * N
new_ends = [np.iinfo(np.int32).max] * N
for i, axis in enumerate(axes):
new_starts[axis] = starts[i]
new_ends[axis] = ends[i]
return new_starts, new_ends, new_axes
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr["starts"], int):
attr["starts"] = (attr["starts"],)
attr["ends"] = (attr["ends"],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr["axes"], int):
attr["axes"] = (attr["axes"],)
new_starts, new_ends, new_axes = cls._common(attr["starts"], attr["ends"], attr["axes"])
attr["axes"] = new_axes
attr["starts"] = new_starts
attr["ends"] = new_ends
except KeyError:
pass
begin = list(attr["starts"])
end = list(attr["ends"])
return _op.strided_slice(inputs[0], begin=begin, end=end)
@classmethod
def _impl_v10(cls, inputs, attr, params):
starts = inputs[1]
ends = inputs[2]
axes = inputs[3]
steps = inputs[4]
ishape = infer_shape(inputs[0])
data_rank = len(ishape)
if axes is not None:
# Normalize for negative axes
axes_dtype = infer_type(axes).checked_type.dtype
axes = fold_constant(
_op.where(
axes < _op.const(0, axes_dtype), axes + _op.const(data_rank, axes_dtype), axes
)
)
def has_static_axes():
return (
isinstance(axes, _expr.Constant)
and isinstance(starts, _expr.Constant)
and isinstance(ends, _expr.Constant)
and (steps is None or isinstance(steps, _expr.Constant))
)
if axes is not None and has_static_axes():
axes_np = axes.data.numpy().astype("int64")
begin_np = starts.data.numpy().astype("int64")
end_np = ends.data.numpy().astype("int64")
if steps is None:
strides_np = np.ones_like(begin_np).astype("int64")
else:
strides_np = steps.data.numpy().astype("int64")
if all([isinstance(ishape[i], int) for i in axes_np]):
return _op.strided_slice(
inputs[0], list(begin_np), list(end_np), list(strides_np), axes=list(axes_np)
)
# Update the starts and ends according to axes if required.
if axes is not None:
data_shape = shape_of(inputs[0], dtype=infer_type(ends).checked_type.dtype)
starts = _op.scatter(
_op.const([0] * data_rank, dtype=infer_type(starts).checked_type.dtype),
axes,
starts,
axis=0,
)
ends = _op.scatter(data_shape, axes, ends, axis=0)
if steps is not None:
steps = _op.scatter(
_op.const([1] * data_rank, dtype=infer_type(steps).checked_type.dtype),
axes,
steps,
axis=0,
)
if steps is None:
steps = _op.const([1] * data_rank, dtype=infer_type(starts).checked_type.dtype)
return _op.strided_slice(
inputs[0], fold_constant(starts), fold_constant(ends), fold_constant(steps)
)
def normalize_gather_indices(data, indices, axis):
"""Make sure gather indices aren't negative"""
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
s = _op.take(_op.shape_of(data, dtype=ind_dtype), _op.const(axis, dtype="int64"))
cond = fold_constant(indices < _op.const(0, ind_dtype))
if isinstance(cond, _expr.Constant):
val = cond.data.numpy()
if val.size == 1:
cond = val.item()
if cond:
indices = indices + s
return indices
indices = _op.where(cond, indices + s, indices)
return indices
class Gather(OnnxOpConverter):
"""Operator converter for Gather."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 0)
data = inputs[0]
indices = inputs[1]
indices = normalize_gather_indices(data, indices, axis)
return _op.take(data, indices, axis)
class GatherElements(OnnxOpConverter):
"""Operator converter for GatherElements."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
indices = inputs[1]
axis = attr.get("axis", 0)
indices = normalize_gather_indices(data, indices, axis)
return _op.gather(data, axis, indices)
class GatherND(OnnxOpConverter):
"""Operator converter for GatherND."""
@classmethod
def _impl_common(cls, data, indices, batch_dims=0):
indices_dims = len(infer_shape(indices))
indices_shape = infer_shape(indices)
indices = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1)))
index_rank = indices_shape[-1]
return _op.gather_nd(
data,
indices,
batch_dims=batch_dims,
index_rank=index_rank,
)
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls._impl_common(inputs[0], inputs[1])
@classmethod
def _impl_v12(cls, inputs, attr, params):
batch_dims = attr.get("batch_dims", 0)
return cls._impl_common(inputs[0], inputs[1], batch_dims)
class Compress(OnnxOpConverter):
"""Operator converter for compress"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
input_tensor, condition_tensor = inputs
axis = attr.get("axis", None)
# Change one hot tensor to indices e.g. [0, 1, 1, 0, 1] -> [1, 2, 4]
condition_tensor = _op.reshape(_op.argwhere(condition_tensor), (-1,))
if axis is not None:
return _op.take(input_tensor, condition_tensor, axis=axis)
# if axis is None, flatten input tensor before selection
input_tensor = _op.reshape(input_tensor, (-1,))
return _op.take(input_tensor, condition_tensor, axis=0)
class Scatter(OnnxOpConverter):
"""Operator converter for Scatter."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 0)
return _op.scatter(inputs[0], inputs[1], inputs[2], axis)
class ScatterND(OnnxOpConverter):
"""Operator converter for ScatterND."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
indices_dim = len(infer_shape(inputs[1]))
axes = list(range(indices_dim))
return _op.scatter_nd(
inputs[0], _op.transpose(inputs[1], axes[-1:] + axes[:-1]), inputs[2], "update"
)
class EyeLike(OnnxOpConverter):
"""Operator converter for EyeLike."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
if dtype is None:
in_checked_type = infer_type(inputs[0]).checked_type
in_dtype = in_checked_type.dtype
dtype = in_dtype
else:
dtype = get_type(dtype)
in_shape = _op.shape_of(inputs[0])
zeros = _op.zeros(in_shape, dtype)
dim = _op.take(in_shape, _op.const(0))
indices = _op.arange(_op.const(0), dim, dtype="int32")
ones = _op.full(_op.const(1), _op.reshape(dim, (1,)), dtype=dtype)
k = _op.const(attr.get("k", 0), dtype="int32")
return _op.scatter_nd(zeros, _op.stack([indices, indices + k], axis=0), ones, "update")
class LRN(OnnxOpConverter):
"""Operator converter for Local Response Normalization."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/main/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get("alpha", 0.0001)
beta = attr.get("beta", 0.75)
bias = attr.get("bias", 1.0)
nsize = attr.get("size")
attr = {"size": nsize, "axis": axis, "alpha": alpha, "beta": beta, "bias": bias}
return AttrCvt("lrn")(inputs, attr)
class Maximum(OnnxOpConverter):
"""Operator converter for Maximum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt("maximum")([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
"""Operator converter for Minimum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt("minimum")([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
"""Operator converter for Mean."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) == 1:
return inputs[0]
# avoid overflow
concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)
return _op.mean(concat, axis=0, keepdims=False)
class MeanVarianceNormalization(OnnxOpConverter):
"""Operator converter for MeanVarianceNormalization."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axes", (0, 2, 3))
data_mean = _op.mean(inputs[0], axis=axis, keepdims=True)
data_mean_squared = _op.power(data_mean, _expr.const(2, "float32"))
data_squared = _op.power(inputs[0], _expr.const(2, "float32"))
data_squared_mean = _op.mean(data_squared, axis=axis, keepdims=True)
return (inputs[0] - data_mean) / _op.sqrt(data_squared_mean - data_mean_squared)
class HardSigmoid(OnnxOpConverter):
"""Operator converter for HardSigmoid."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get("alpha", 0.2)
beta = attr.get("beta", 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return AttrCvt("clip")([transformX], attr)
class HardSwish(OnnxOpConverter):
"""Operator converter for HardSwish."""
@classmethod
def _impl_v14(cls, inputs, attr, params):
alpha = attr.get("alpha", 1 / 6)
beta = attr.get("beta", 0.5)
transformX = inputs[0] * _expr.const(alpha) + _expr.const(beta)
attr = {"a_min": 0, "a_max": 1}
return inputs[0] * AttrCvt("clip")([transformX], attr)
class Reduce(OnnxOpConverter):
"""Operator converter for reduce ops."""
name = ""
@classmethod
def run_calculation(cls, inputs, axis, keepdims):
attr = {"axis": axis, "keepdims": keepdims}
return AttrCvt(cls.name)(inputs, attr)
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
return cls.run_calculation(inputs, axis, attr.get("keepdims", True))
@classmethod
def _impl_v12(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if len(inputs) == 2:
if isinstance(inputs[1], _expr.Constant):
# Get axis and unpack scalar
constant_axis = int(inputs[1].data.numpy()[0])
return cls.run_calculation([inputs[0]], constant_axis, attr.get("keepdims", True))
raise ValueError("Dynamic Reduce is not supported yet!")
return cls._impl_v1(inputs, attr, params)
@classmethod
def _impl_v13(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
noop_with_empty_axes = attr.get("noop_with_empty_axes", 0)
num_axis = int(infer_type(inputs[1]).checked_type.shape[0]) if inputs[1] is not None else 0
if noop_with_empty_axes and num_axis == 0:
return inputs[0]
if len(inputs) == 2:
if isinstance(inputs[1], _expr.Constant):
# Get axis and unpack scalar
constant_axis = int(inputs[1].data.numpy()[0])
return cls.run_calculation([inputs[0]], constant_axis, attr.get("keepdims", True))
if num_axis > 0:
raise ValueError("Dynamic Reduce is not supported yet!")
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
return cls.run_calculation([inputs[0]], axis, attr.get("keepdims", True))
return cls._impl_v1(inputs, attr, params)
class ReduceMax(Reduce):
"""Operator converter for ReduceMax."""
name = "max"
class ReduceMin(Reduce):
"""Operator converter for ReduceMin."""
name = "min"
class ReduceSum(Reduce):
"""Operator converter for ReduceSum."""
name = "sum"
class ReduceMean(Reduce):
"""Operator converter for ReduceMean."""
name = "mean"
class ReduceProd(Reduce):
"""Operator converter for ReduceProd."""
name = "prod"
class ReduceLogSumExp(Reduce):
"""Operator converter for ReduceLogSumExp."""
name = "logsumexp"
class ReduceSumSquare(OnnxOpConverter):
"""Operator converter for ReduceSumSquare."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
return AttrCvt("sum")(inputs, attr)
class ReduceL1(OnnxOpConverter):
"""Operator converter for ReduceL1."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = _op.abs(inputs[0])
return AttrCvt("sum")(inputs, attr)
class ReduceL2(OnnxOpConverter):
"""Operator converter for ReduceL2."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
inputs[0] = inputs[0] * inputs[0]
out = AttrCvt("sum")(inputs, attr)
return _op.sqrt(out)
class ReduceLogSum(OnnxOpConverter):
"""Operator converter for ReduceLogSum."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not infer_shape(inputs[0]): # promote scalar to 1-D tensor
inputs[0] = _op.expand_dims(inputs[0], axis=0)
if "axes" in attr:
axis = attr.get("axes", 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {"axis": axis, "keepdims": attr.get("keepdims", True)}
out = AttrCvt("sum")(inputs, attr)
return _op.log(out)
class ArgMax(OnnxOpConverter):
"""Operator converter for ArgMax."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
select_last_index = attr.get("select_last_index", False)
attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index}
return _op.cast(AttrCvt("argmax")(inputs, attr), "int64")
class ArgMin(OnnxOpConverter):
"""Operator converter for ArgMin."""
@classmethod
def _impl_v13(cls, inputs, attr, params):
axis = attr.get("axis", 0)
keepdims = attr.get("keepdims", True)
select_last_index = attr.get("select_last_index", False)
attr = {"axis": axis, "keepdims": keepdims, "select_last_index": select_last_index}
return _op.cast(AttrCvt("argmin")(inputs, attr), "int64")
class Softmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
in_shape = infer_shape(inputs[0])
ndim = len(in_shape)
if axis < 0:
axis += ndim
if axis == 0:
reshape_shape = [-1]
elif axis == ndim - 1:
return _op.nn.softmax(inputs[0], axis=axis)
else:
axis_val = [in_shape[i] for i in range(axis)]
reshape_shape = [np.prod(axis_val)] + [-1]
data_reshape = _op.reshape(inputs[0], newshape=reshape_shape)
out = _op.nn.softmax(data_reshape, axis=-1)
out = _op.reshape(out, newshape=in_shape)
return out
@classmethod
def _impl_v13(cls, inputs, attr, _):
axis = attr.get("axis", -1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
return _op.nn.softmax(inputs[0], axis=axis)
class LogSoftmax(OnnxOpConverter):
"""Operator converter for Softmax."""
@classmethod
def run_calculation(cls, inputs, attr, params, opset):
"""Run the calculation for Log Softmax calculation."""
res = Softmax.get_converter(opset)(inputs, attr, params)
return _op.log(res)
@classmethod
def _impl_v1(cls, inputs, attr, params):
return cls.run_calculation(inputs, attr, params, opset=1)
@classmethod
def _impl_v13(cls, inputs, attr, params):
return cls.run_calculation(inputs, attr, params, opset=13)
class Hardmax(OnnxOpConverter):
"""Operator converter for Hardmax."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get("axis", 1)
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
dtype = infer_type(inputs[0]).checked_type.dtype
if axis == 0:
pre = _op.const([1], "int64")
else:
pre = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [0], [axis], [1]), axis=0, keepdims=True
)
post = _op.prod(
_op.strided_slice(shape_of(inputs[0]), [axis], [2147483647], [1]), axis=0, keepdims=True
)
newshape = _op.concatenate([pre, post], axis=0)
x = _op.reshape(inputs[0], fold_constant(newshape))
argmax = _op.argmax(x, axis=1)
onehot = _op.one_hot(
argmax,
_op.const(1.0, dtype),
_op.const(0.0, dtype),
fold_constant(_op.take(shape_of(x), _op.const([1], "int64"))),
1,
dtype,
)
return _op.reshape(onehot, shape_of(inputs[0]))
@classmethod
def _impl_v13(cls, inputs, attr, params) -> relay.Expr:
inferred_type = infer_type(inputs[0])
dtype = inferred_type.checked_type.dtype
ndim = len(inferred_type.checked_type.shape)
axis = attr.get("axis", -1) % ndim
argmax = _op.argmax(inputs[0], axis=axis)
return _op.one_hot(
argmax,
_op.const(1.0, dtype),
_op.const(0.0, dtype),
fold_constant(_op.take(shape_of(inputs[0]), _op.const([axis], "int64"))),
axis,
dtype,
)
class OneHot(OnnxOpConverter):
"""Operator converter for OneHot."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
# Extract relay one_hot inputs.
indices, depth, values = inputs
ndim = len(infer_shape(indices))
# Split onnx on off values into two separate expressions.
off_value, on_value = _op.take(values, _op.const(0)), _op.take(values, _op.const(1))
# Extract the datatype of the output from on_value.
dtype = infer_type(on_value).checked_type.dtype
ind_dtype = infer_type(indices).checked_type.dtype
# Normalize the indices to a positive range
indices = _op.where(
indices < _op.const(0, ind_dtype), indices + _op.cast(depth, ind_dtype), indices
)
# set default value when axis is not set in the model
if "axis" not in attr:
attr["axis"] = -1
axis = attr["axis"]
if axis < 0:
axis += ndim + 1
return _op.one_hot(indices, on_value, off_value, depth, axis, dtype=dtype)
class ConstantOfShape(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" in attr:
np_value = get_numpy(attr.pop("value"))[0]
value = _expr.const(np_value)
dtype = np_value.dtype.name
else:
value = _expr.const(0)
dtype = "float32"
output = _op.full(value, inputs[0], dtype=dtype)
return output
class Constant(OnnxOpConverter):
"""Operator converter for ConstantOfShape."""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if "value" not in attr:
raise tvm.errors.OpAttributeRequired("no value in Constant")
value = attr.pop("value")
# Constants may rarely have string types. These are likely exported
# from other frameworks and not actually used in TVM. We'll just use
# a zero valued constant for compatibility.
if isinstance(value, bytes):
np_value = np.asarray([0]).astype("int64")
else:
np_value = get_numpy(value)
dtype = np_value.dtype.name
value = _expr.const(np_value, dtype)
return value
class Sign(OnnxOpConverter):
"""Operator converter for Sign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.sign(inputs[0])
class Equal(Elemwise):
"""Operator converter for Equal."""
name = "equal"
class Not(Elemwise):
"""Operator converter for Not."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_not(inputs[0])
class And(Elemwise):
"""Operator converter for And."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_and(inputs[0], inputs[1])
class Tile(Elemwise):
"""Operator converter for Tile"""
@classmethod
def _impl_v6(cls, inputs, attr, params):
return _op.tile(inputs[0], inputs[1])
class Erf(OnnxOpConverter):
"""Operator converter for Erf"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.erf(inputs[0])
class Where(OnnxOpConverter):
"""Operator converter for Where"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
return _op.where(*inputs)
class Or(Elemwise):
"""Operator converter for Or."""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.logical_or(inputs[0], inputs[1])
class Expand(OnnxOpConverter):
"""Operator converter for Expand."""
@classmethod
def _impl_v8(cls, inputs, attr, params):
dtype = infer_type(inputs[1]).checked_type.dtype
in_shape = shape_of(inputs[0], dtype=dtype)
shape = inputs[1]
# Currently 'op.broadcast_to' expect the rank of the given 'shape'
# (the 2nd input) is always higher than that of the given 'input' (the 1st input)
# However, ONNX Expand supports multi-directional broadcasting, which allows
# above pattern and also some extent of 'shape' can be smaller than the corresponding
# extent of 'input'. In this case, the extent of 'shape' must be 1.
# https://github.com/onnx/onnx/blob/main/docs/Broadcasting.md
# In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand'
# so, here we solved this problem by expanding the given 'shape' itself.
def expand_shape(in_shape, shape):
"""A function expands the shape when the rank is lower than that of the given
intput. Also it replaces the extent of the shape with the corresponding extent
of the intput when it is 1.
"""
in_dims = infer_shape(in_shape)[0]
new_dims = infer_shape(shape)[0]
if in_dims < new_dims:
in_shape = _op.concatenate(
[
_expr.const(
[
1,
]
* (new_dims - in_dims),
dtype=dtype,
),
in_shape,
],
axis=0,
)
elif new_dims < in_dims:
shape = _op.concatenate(
[
_expr.const(
[
1,
]
* (in_dims - new_dims),
dtype=dtype,
),
shape,
],
axis=0,
)
new_shape = _op.maximum(in_shape, shape)
return new_shape
shape = fold_constant(expand_shape(in_shape, shape))
return _op.broadcast_to(inputs[0], shape=shape)
class RNN(OnnxOpConverter):
"""Operator converter for RNNs such as RNN, LSTM and GRU."""
@classmethod
def _activation_helper(cls, activation, alpha, beta):
convert_map = _get_convert_map(1)
attrs = {}
if alpha is not None:
attrs["alpha"] = alpha
if beta is not None:
attrs["beta"] = beta
return lambda x: convert_map[activation.decode("utf-8")]([x], attrs, {})
@classmethod
def _activation_needs_alpha(cls, activation):
needs_alpha = [
"Affine",
"LeakyRelu",
"ThresholdedRelu",
"ScaledTanh",
"HardSigmoid",
"Elu",
]
return activation.decode("utf-8") in needs_alpha
@classmethod
def _activation_needs_beta(cls, activation):
needs_beta = [
"Affine",
"ScaledTanh",
"HardSigmoid",
]
return activation.decode("utf-8") in needs_beta
@classmethod
def bidir_rnn_cell(
cls,
input_seqs,
weight_dicts,
acts,
):
"""
Bidirectional RNN cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = rnn_cell(
input_seqs,
**weight_dicts[0],
act=acts[0],
)
reverse_outputs, rev_H_t = rnn_cell(
input_seqs,
**weight_dicts[1],
act=acts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (
_op.stack(final_outputs, axis=0),
_op.stack([fw_H_t, rev_H_t], axis=0),
)
@classmethod
def _default_activations(cls, num_directions):
return [_op.tanh] * num_directions
@classmethod
def _get_activations(cls, attr, multiplier, num_directions, rnn_type):
"""
Activation functions
"""
if "activations" in attr:
activations = attr["activations"]
if len(activations) != multiplier * num_directions:
raise NotImplementedError(
"{} assumes {} * num_directions activation functions are provided".format(
rnn_type, multiplier
)
)
alpha_loc = 0
alphas = attr.get("activation_alpha", [])
if isinstance(alphas, float):
alphas = [alphas]
beta_loc = 0
betas = attr.get("activation_beta", [])
if isinstance(betas, float):
betas = [betas]
acts = []
for i in range(multiplier * num_directions):
alpha = None
beta = None
activation = activations[i]
if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:
alpha = alphas[alpha_loc]
alpha_loc += 1
if cls._activation_needs_beta(activation) and len(betas) > beta_loc:
beta = betas[beta_loc]
beta_loc += 1
acts.append(cls._activation_helper(activation, alpha, beta))
else:
acts = cls._default_activations(num_directions)
return acts
@classmethod
def _inputs_helper(cls, inputs, layout):
"""
Process inputs
"""
# Unpack inputs, note that if optional and not provided then value will be None.
X = inputs[0]
Wp = inputs[1]
Rp = inputs[2]
Bp = inputs[3]
# Sequence length currently unused as it can be inferred from shapes.
# sequence_lens = inputs['sequence_lens']
Hp_0 = inputs[5]
num_directions = infer_shape(Wp)[0]
if num_directions not in [1, 2]:
raise ValueError("num_directions must be either 1 or 2!")
if layout == 1:
X = _op.transpose(X, axes=(1, 0))
# Initialize state if not provided.
if Hp_0 is None:
W_dtype = infer_type(Wp).checked_type.dtype
X_shape = infer_shape(X)
hidden_size = infer_shape(Rp)[-1]
batch_size = X_shape[1]
Hp_0 = _op.zeros((num_directions, batch_size, hidden_size), W_dtype)
elif layout == 1:
Hp_0 = _op.transpose(Hp_0, axes=(1, 0))
# TODO (vvchernov): It can be replaced by _op.split if issue #8412 is resolved
X_steps = unbind(X, axis=0)
H_ts = _op.split(Hp_0, num_directions)
Ws = _op.split(Wp, num_directions)
Rs = _op.split(Rp, num_directions)
Bs = None
if Bp is not None:
Bs = _op.split(Bp, num_directions)
return X_steps, H_ts, Ws, Rs, Bs, num_directions
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions = cls._inputs_helper(inputs, layout)
acts = cls._get_activations(attr, 1, num_directions, "RNN")
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["w_inp"] = _op.squeeze(Ws[i], axis=[0])
weights_dict["w_hid"] = _op.squeeze(Rs[i], axis=[0])
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
weights_dict["b_inp"] = _op.squeeze(Bi, axis=[0])
weights_dict["b_hid"] = _op.squeeze(Bh, axis=[0])
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H = RNN.bidir_rnn_cell(
input_seqs=X_steps,
weight_dicts=weights_dicts,
acts=acts,
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H = rnn_cell(
input_seqs=X_steps,
**weights_dicts[0],
act=acts[0],
)
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H)), 2)
@classmethod
def _impl_v7(cls, inputs, attr, params):
return cls._impl_common(inputs, attr, 0)
@classmethod
def _impl_v14(cls, inputs, attr, params):
layout = attr.get("layout", 0)
return cls._impl_common(inputs, attr, layout)
class LSTM(RNN):
"""Operator converter for LSTM"""
@classmethod
def bidir_lstm_cell(
cls,
input_seqs,
weight_dicts,
acts,
):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(
input_seqs,
**weight_dicts[0],
f_act=acts[0],
g_act=acts[1],
h_act=acts[2],
)
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs,
**weight_dicts[1],
f_act=acts[3],
g_act=acts[4],
h_act=acts[5],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (
_op.stack(final_outputs, axis=0),
_op.stack([fw_H_t, rev_H_t], axis=0),
_op.stack([fw_C_t, rev_C_t], axis=0),
)
@classmethod
def _default_activations(cls, num_directions):
return [_op.sigmoid, _op.tanh, _op.tanh] * num_directions
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions = cls._inputs_helper(inputs, layout)
acts = cls._get_activations(attr, 3, num_directions, "LSTM")
# cell state
Cp_0 = inputs[6]
if Cp_0 is None:
C_ts = _expr.TupleWrapper(
_expr.Tuple([_op.zeros_like(H_ts[i]) for i in range(num_directions)]),
num_directions,
)
else:
if layout == 1:
Cp_0 = _op.transpose(Cp_0, axes=(1, 0))
C_ts = _op.split(Cp_0, num_directions)
# peepholes
Pp = inputs[7]
if Pp is not None:
p_i, p_o, p_f = _op.split(Pp, 3, axis=1)
p_is = _op.split(p_i, num_directions)
p_fs = _op.split(p_f, num_directions)
p_os = _op.split(p_o, num_directions)
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["cell_state"] = _op.squeeze(C_ts[i], axis=[0])
# Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o
mati, mato, matf, matc = _op.split(_op.squeeze(Ws[i], axis=[0]), 4)
weights_dict["w_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0)
mati, mato, matf, matc = _op.split(_op.squeeze(Rs[i], axis=[0]), 4)
weights_dict["w_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0)
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
mati, mato, matf, matc = _op.split(_op.squeeze(Bi, axis=[0]), 4)
weights_dict["b_inp"] = _op.concatenate([mati, matf, matc, mato], axis=0)
mati, mato, matf, matc = _op.split(_op.squeeze(Bh, axis=[0]), 4)
weights_dict["b_hid"] = _op.concatenate([mati, matf, matc, mato], axis=0)
if Pp is not None:
weights_dict["p_i"] = _op.squeeze(p_is[i], axis=[0])
weights_dict["p_f"] = _op.squeeze(p_fs[i], axis=[0])
weights_dict["p_o"] = _op.squeeze(p_os[i], axis=[0])
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H, C = LSTM.bidir_lstm_cell(
input_seqs=X_steps,
weight_dicts=weights_dicts,
acts=acts,
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H, C = lstm_cell(
input_seqs=X_steps,
**weights_dicts[0],
f_act=acts[0],
g_act=acts[1],
h_act=acts[2],
)
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
C = _op.expand_dims(C, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
C = _op.transpose(C, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H, C)), 3)
class GRU(RNN):
"""Operator convert for GRU"""
@classmethod
def bidir_gru_cell(
cls,
input_seqs,
weight_dicts,
acts,
):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(
input_seqs,
**weight_dicts[0],
rz_act=acts[0],
n_act=acts[1],
)
reverse_outputs, rev_H_t = gru_cell(
input_seqs,
**weight_dicts[1],
rz_act=acts[2],
n_act=acts[3],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.stack([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=0)
)
return (
_op.stack(final_outputs, axis=0),
_op.stack([fw_H_t, rev_H_t], axis=0),
)
@classmethod
def _default_activations(cls, num_directions):
return [_op.sigmoid, _op.tanh] * num_directions
@classmethod
def _impl_common(cls, inputs, attr, layout):
X_steps, H_ts, Ws, Rs, Bs, num_directions = cls._inputs_helper(inputs, layout)
acts = cls._get_activations(attr, 2, num_directions, "GRU")
linear_before_reset = attr.get("linear_before_reset", 0)
weights_dicts = []
for i in range(num_directions):
weights_dict = {}
weights_dict["hidden_state"] = _op.squeeze(H_ts[i], axis=[0])
weights_dict["linear_before_reset"] = linear_before_reset
# Weights permutation: onnx format i-o-f-c, lstm cell format i-f-c-o
matz, matr, matn = _op.split(_op.squeeze(Ws[i], axis=[0]), 3)
weights_dict["w_inp"] = _op.concatenate([matr, matz, matn], axis=0)
matz, matr, matn = _op.split(_op.squeeze(Rs[i], axis=[0]), 3)
weights_dict["w_hid"] = _op.concatenate([matr, matz, matn], axis=0)
if Bs is not None:
Bi, Bh = _op.split(Bs[i], 2, -1)
matz, matr, matn = _op.split(_op.squeeze(Bi, axis=[0]), 3)
weights_dict["b_inp"] = _op.concatenate([matr, matz, matn], axis=0)
matz, matr, matn = _op.split(_op.squeeze(Bh, axis=[0]), 3)
weights_dict["b_hid"] = _op.concatenate([matr, matz, matn], axis=0)
weights_dicts.append(weights_dict)
if num_directions == 2:
output, H = GRU.bidir_gru_cell(
input_seqs=X_steps,
weight_dicts=weights_dicts,
acts=acts,
)
else:
# outputs shape = [seqs_num, (batch_size, hidden_size)]
outputs, H = gru_cell(
input_seqs=X_steps,
**weights_dicts[0],
rz_act=acts[0],
n_act=acts[1],
)
# output shape = (seqs_num, num_directions, batch_size, hidden_size)
output = _op.expand_dims(_op.stack(outputs, axis=0), axis=1)
H = _op.expand_dims(H, axis=0)
if layout == 1:
output = _op.transpose(output, axes=(1, 0))
H = _op.transpose(H, axes=(1, 0))
return _expr.TupleWrapper(_expr.Tuple((output, H)), 2)
class Resize(OnnxOpConverter):
"""Operator converter for Resize"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "linear"
elif mode == "cubic":
method = "cubic"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode)
)
scale = inputs[1]
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
ndims = len(infer_shape(inputs[0]))
out = None
if ndims == 3:
out_size = fold_constant(_op.strided_slice(size, [2], [3]))
out = _op.image.resize1d(inputs[0], out_size, None, "NCW", method, "asymmetric")
elif ndims == 4:
out_size = fold_constant(_op.strided_slice(size, [2], [4]))
out = _op.image.resize2d(inputs[0], out_size, None, "NCHW", method, "asymmetric")
elif ndims == 5:
out_size = fold_constant(_op.strided_slice(size, [2], [5]))
out = _op.image.resize3d(inputs[0], out_size, None, "NCDHW", method, "asymmetric")
else:
raise NotImplementedError("Resize only supports 3, 4, or 5 dims")
return out
@classmethod
def _impl_v11(cls, inputs, attr, params):
scale = inputs[2]
scale_shape = infer_shape(scale)
if len(inputs) == 4:
assert (
len(scale_shape) == 0 or scale_shape[0] == 0
), "One of scale or size should be passed, not both."
size = inputs[3]
else:
assert len(scale_shape) != 0, "One of scale or size should be passed."
size = _op.cast(shape_of(inputs[0]), infer_type(scale).checked_type.dtype) * scale
return cls.v11_13_common(inputs, size, attr, params)
@classmethod
def _impl_v13(cls, inputs, attr, params):
scale = inputs[2]
size = inputs[3]
# Some versions of onnx exporters produce an opset 13 model with the opset 11
# resize op, handle that edge case
if scale is not None and size is not None:
return cls._impl_v11(inputs, attr, params)
if size is not None:
assert scale is None, "One of scale or size should be passed, not both."
else:
scale_type = infer_type(scale)
scale_shape = scale_type.checked_type.shape
scale_dtype = scale_type.checked_type.dtype
assert len(scale_shape) != 0, "One of scale or size should be passed."
size = _op.cast(shape_of(inputs[0]), scale_dtype) * scale
return cls.v11_13_common(inputs, size, attr, params)
@classmethod
def v11_13_common(cls, inputs, size, attr, params):
"""
Resize v11 and Resize v13 are identical except in how
they handle the passing of scale and size. This utility
provides the implementation for both
"""
roi = inputs[1]
if roi is not None and infer_shape(roi)[0] == 0:
roi = None
ndims = len(infer_shape(inputs[0]))
mode = attr.get("mode").decode("ascii")
if mode == "nearest":
method = "nearest_neighbor"
elif mode == "linear":
method = "linear"
elif mode == "cubic":
method = "cubic"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode)
)
coord_trans = attr.get("coordinate_transformation_mode", b"half_pixel").decode("ascii")
nearest_mode = attr.get("nearest_mode", b"round_prefer_floor").decode("ascii")
alpha = attr.get("cubic_coeff_a", -0.75)
exclude = attr.get("exclude_outside", 0)
extrapolation_value = attr.get("extrapolation_value", 0.0)
if roi is not None:
roi = fold_constant(
_op.concatenate(
[
_op.strided_slice(roi, [2], [ndims]),
_op.strided_slice(roi, [ndims + 2], [2 * ndims]),
],
axis=0,
)
)
out_size = fold_constant(_op.strided_slice(size, [2], [ndims]))
out = None
if ndims == 3:
out = _op.image.resize1d(
inputs[0],
out_size,
roi,
"NCW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
elif ndims == 4:
out = _op.image.resize2d(
inputs[0],
out_size,
roi,
"NCHW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
elif ndims == 5:
out = _op.image.resize3d(
inputs[0],
out_size,
roi,
"NCDHW",
method,
coord_trans,
nearest_mode,
alpha,
exclude,
extrapolation_value,
)
else:
raise NotImplementedError("Resize only supports 3, 4, or 5 dims")
return out
class NonZero(OnnxOpConverter):
"""Operator converter for NonZero"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if len(inputs) > 1:
raise ValueError("Expect 1 input only")
output = AttrCvt(op_name="argwhere")(inputs, attr, params)
# ONNX NonZero always outputs int64
output = _op.cast(output, "int64")
return _op.transpose(output, axes=(1, 0))
class ReverseSequence(OnnxOpConverter):
"""Operator converter for ReverseSequence"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
return _op.reverse_sequence(inputs[0], inputs[1], attr["time_axis"], attr["batch_axis"])
class TopK(OnnxOpConverter):
"""Operator converter for TopK"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Expect 2 input only")
axis = attr.get("axis", -1)
largest = attr.get("largest", 1)
if largest == 0:
# TODO(mbrookhart): optimize this by adding a smallest attribute to topi if this
# ever becomes a bottleneck
ndim = len(infer_shape(inputs[0]))
if axis < 0:
axis += ndim
sort = _op.sort(inputs[0], axis=axis)
argsort = _op.argsort(inputs[0], axis=axis, dtype="int64")
begin = [0] * ndim
stride = [1] * ndim
end = _op.concatenate(
[
_op.const([np.iinfo(np.int64).max] * axis, dtype="int64"),
inputs[1],
_op.const([np.iinfo(np.int64).max] * (ndim - axis - 1), dtype="int64"),
],
axis=0,
)
return _expr.TupleWrapper(
_expr.Tuple(
[
_op.strided_slice(sort, begin, end, stride),
_op.strided_slice(argsort, begin, end, stride),
]
),
2,
)
return _op.topk(inputs[0], inputs[1], axis=axis, dtype="int64")
class Range(OnnxOpConverter):
"""Operator converter for Range"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 input only")
return _op.arange(
inputs[0], inputs[1], inputs[2], dtype=infer_type(inputs[0]).checked_type.dtype
)
class IsInf(OnnxOpConverter):
"""Operator converter for IsInf"""
@classmethod
def _impl_v10(cls, inputs, attr, params):
detect_negative = attr.get("detect_negative", 1)
detect_positive = attr.get("detect_positive", 1)
dtype = infer_type(inputs[0]).checked_type.dtype
isinf = _op.isinf(inputs[0])
if not detect_negative:
isinf = isinf * (inputs[0] > _op.const(0, dtype))
if not detect_positive:
isinf = isinf * (inputs[0] < _op.const(0, dtype))
return isinf
class Celu(OnnxOpConverter):
"""Operator convereter for celu"""
@classmethod
def _impl_v12(cls, inputs, attr, params):
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(attr.get("alpha", 1.0), dtype)
zero = _op.const(0, dtype)
one = _op.const(1, dtype)
out = _op.maximum(zero, x) + _op.minimum(zero, alpha * (_op.exp(x / alpha) - one))
return out
class MaxRoiPool(OnnxOpConverter):
"""Operator converter for MaxRoiPool."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MMaxRoiPool op take 2 inputs, {} given".format(len(inputs))
data = inputs[0]
rois = inputs[1]
pooled_shape = attr.get("pooled_shape")
spatial_scale = attr.get("spatial_scale", 1.0)
return _vision.roi_pool(data, rois, pooled_shape, spatial_scale)
class RoiAlign(OnnxOpConverter):
"""Operator converter for RoiAlign."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 inputs only")
x = inputs[0]
rois = inputs[1]
batch_indices = inputs[2]
mode = attr.get("mode", b"avg")
if mode not in (b"avg", b"max"):
raise NotImplementedError("RoiAlign in Relay only uses avg and max modes")
output_height = attr.get("output_height", 1)
output_width = attr.get("output_width", 1)
sampling_ratio = attr.get("sampling_ratio", 0)
spatial_scale = attr.get("spatial_scale", 1.0)
batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1)
batch_indices = _op.cast(batch_indices, infer_type(rois).checked_type.dtype)
rois = _op.concatenate([batch_indices, rois], 1)
return _vision.roi_align(
x, rois, [output_height, output_width], spatial_scale, sampling_ratio, mode=mode
)
class Clip(OnnxOpConverter):
"""Operator converter for Clip."""
@staticmethod
def convert_attributes(inputs, attr, params):
convert = AttrCvt("clip", transforms={"min": "a_min", "max": "a_max"})
return convert(inputs, attr, params)
@classmethod
def _impl_v1(cls, inputs, attr, params):
if "min" not in attr:
attr["min"] = -np.inf
if "max" not in attr:
attr["max"] = np.inf
return Clip.convert_attributes(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) == 3 and isinstance(inputs[2], _expr.Constant):
attr["max"] = inputs[2].data.numpy().item()
inputs = inputs[0:2]
if len(inputs) >= 2 and isinstance(inputs[1], _expr.Constant):
attr["min"] = inputs[1].data.numpy().item()
inputs = inputs[0:1]
if "min" in attr and "max" in attr:
return Clip.convert_attributes(inputs, attr, params)
assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max"
result = inputs[0]
for i, op in enumerate([_op.tensor.maximum, _op.tensor.minimum]):
if i < len(inputs) - 1:
if inputs[i + 1] is not None:
result = op(result, inputs[i + 1])
return result
class Softplus(OnnxOpConverter):
"""Operator converter for Softplus."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
data = _op.exp(data) + _expr.const(1, dtype=data_dtype)
return _op.log(data)
class Loop(OnnxOpConverter):
"""Operator converter for Loop"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
max_loop_count = inputs[0]
cond = inputs[1]
loop_deps = inputs[2:]
num_deps = len(loop_deps)
# Create a copy of the body function to prevent the original
# from being modified.
body = copy.copy(attr["body"])
iter_dtype = infer_type(max_loop_count).checked_type.dtype
# Determine what condition mode we're in.
assert cond is not None or max_loop_count is not None
is_for_loop = max_loop_count is not None and cond is None
is_condition_for_loop = cond is not None and max_loop_count is not None
# Loop inputs will be packed as
# [iter_count, max_count, condition, loop_deps, scan_outputs]
def cond_fn(*loop_inputs):
i = loop_inputs[0]
max_count = loop_inputs[1]
w = loop_inputs[2]
if cond is not None:
out_while = _op.equal(w, _expr.const(True, "bool"))
if max_loop_count is not None:
out_loop = _op.less(i, max_count)
if is_condition_for_loop:
return _op.logical_and(out_while, out_loop)
if is_for_loop:
return out_loop
return out_while
# Get the current graph proto and create a clone for the subgraph
graph_scope = GraphProto.current
subgraph_scope = GraphProto(
graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params
)
# Load nodes from outer graph into inner graph.
subgraph_scope._nodes = graph_scope._nodes.copy()
# Create a list of variables for each value updated in the loop.
def get_var(name, val, scan=False):
checked_type = infer_type(val)
if hasattr(checked_type, "type_annotation"):
checked_type = checked_type.type_annotation
if hasattr(checked_type, "checked_type"):
checked_type = checked_type.checked_type
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(_ty.Any())
else:
actual_shape.append(dim)
if scan:
return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
loop_vars = [
_expr.var(body.input[0].name, shape=(), dtype=iter_dtype), # iteration count
_expr.var("max_count", shape=(), dtype=iter_dtype), # iteration count
get_var(body.input[1].name, cond), # exit condition
]
loop_vars += [get_var(body.input[i + 2].name, v) for i, v in enumerate(loop_deps)]
loop_var_names = [v.name_hint for v in loop_vars]
num_scan_outputs = len(body.output) - (1 + num_deps)
# Construct variables and initial empty tensors for any scan outputs.
# To do this, we'll figure out the output shapes of the body subgraph by importing
# it and doing type inference.
scan_output_vars = []
scan_output_init = []
if num_scan_outputs > 0:
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))
for i in range(num_scan_outputs):
name, _, _, _ = get_info(body.output[i + 1 + num_deps])
output_node = infer_type(loop_outputs[i + 1 + num_deps])
shape = get_const_tuple(output_node.checked_type.shape)
dtype = output_node.checked_type.dtype
scan_output_vars.append(
_expr.var(name, shape=([_ty.Any()] * (len(shape) + 1)), dtype=dtype)
)
scan_output_init.append(
_op.reshape(_expr.const(np.array([]).astype(dtype)), [0] + [1] * len(shape))
)
# Now we can remove loop iter variables from our inner loop's inputs.
# This is kind of a hack since we have graph inputs that we don't
# want to treat as actual inputs.
while len(body.input) != 0:
body.input.pop(0)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Unpack inputs
loop_count = loop_inputs[0]
max_count = loop_inputs[1]
cond = loop_inputs[2]
current_vars = list(loop_inputs[3 : (3 + num_deps)])
scan_outputs = loop_inputs[(3 + num_deps) :]
# Prepare body inputs by adding them to node dictionary.
new_inputs = [loop_count, max_count, cond] + current_vars
for i, inp in enumerate(new_inputs):
subgraph_scope._nodes[loop_var_names[i]] = inp
# Get the output of the current loop using the updated inputs.
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
# Unpack the body outputs and prepare variables for next iteration.
new_cond = loop_outputs[0]
new_loop_vars = [loop_outputs[i] for i in range(1, 1 + num_deps)]
new_scan_outputs = [loop_outputs[i] for i in range(1 + num_deps, len(loop_outputs))]
# Add new scan outputs to tracking
combined_scan_outputs = []
for i, scan in enumerate(scan_outputs):
rank = len(infer_shape(scan)) - 1
new_scan = new_scan_outputs[i]
expand_scan = _op.expand_dims(new_scan, axis=0)
# For non scalar outputs we need to broadcast the initial value.
if rank > 0:
new_scan_shape = shape_of(new_scan, dtype=iter_dtype)
scan_broadcast = _op.concatenate(
[_op.reshape(loop_count, [1]), new_scan_shape], axis=0
)
scan = _op.broadcast_to(scan, scan_broadcast)
combined_scan = _op.concatenate([scan, expand_scan], axis=0)
combined_scan_outputs.append(combined_scan)
# Increment counter.
if max_loop_count is not None:
incr = _expr.const(1, dtype=iter_dtype)
loop_count = loop_count + incr
# Pack loop outputs for next iteration
# [iter_count, cond, loop_deps, loop_scans]
return [loop_count, max_count, new_cond] + new_loop_vars + combined_scan_outputs
# Create the loop function.
loop = fold_constant(_loops.while_loop(cond_fn, loop_vars + scan_output_vars, body_fn))
# Now need to run initial values through the graph.
init_count = _expr.const(0, dtype=iter_dtype)
loop_vals = loop(init_count, max_loop_count, cond, *loop_deps, *scan_output_init)
# Extract final iteration outputs.
if num_deps + num_scan_outputs == 1:
outputs = _expr.TupleGetItem(loop_vals, 3)
else:
outputs = _expr.TupleWrapper(
_expr.Tuple(
[
_expr.TupleGetItem(loop_vals, i + 3)
for i in range(num_deps + num_scan_outputs)
]
),
num_deps + num_scan_outputs,
)
# Update outer graph with constants found in the subgraph.
free_vars = analysis.free_vars(loop)
graph_scope._params.update(subgraph_scope._params)
graph_scope._nodes.update(subgraph_scope._nodes)
for var in free_vars:
graph_scope._nodes.update({var.name_hint: var})
return outputs
class If(OnnxOpConverter):
"""Operator converter for If"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
cond = inputs[0]
# Convert array to bool if needed.
if len(infer_shape(cond)) > 0:
cond = _op.take(cond, _expr.const(0, dtype="int64"))
then_branch = attr.get("then_branch", None)
else_branch = attr.get("else_branch", None)
assert then_branch is not None and else_branch is not None
# Create graph converters for both branches.
graph_scope = GraphProto.current
then_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)
then_graph._nodes = graph_scope._nodes.copy()
else_graph = GraphProto(graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params)
else_graph._nodes = graph_scope._nodes.copy()
# Convert each branch to a relay expression.
with then_graph:
then_expr = then_graph.from_onnx(then_branch, graph_scope.opset, get_output_expr=True)
with else_graph:
else_expr = else_graph.from_onnx(else_branch, graph_scope.opset, get_output_expr=True)
# Add constants from both branches to parent graph.
graph_scope._params.update(then_graph._params)
graph_scope._nodes.update(then_graph._nodes)
then_free_vars = analysis.free_vars(then_expr)
for var in then_free_vars:
graph_scope._nodes.update({var.name_hint: var})
graph_scope._params.update(else_graph._params)
graph_scope._nodes.update(else_graph._nodes)
else_free_vars = analysis.free_vars(else_expr)
for var in else_free_vars:
graph_scope._nodes.update({var.name_hint: var})
# Now we can construct the relay if statement and return.
ret = _expr.If(cond, then_expr, else_expr)
if len(then_branch.output) > 1:
ret = _expr.TupleWrapper(ret, len(then_branch.output))
return ret
class Scan(OnnxOpConverter):
"""Operator converter for Scan"""
@classmethod
def _impl_v8(cls, inputs, attr, params):
new_inputs = inputs[1:]
batch_num = infer_shape(inputs[1])[0]
out = []
for i in range(batch_num):
v9_inputs = [
_op.take(new_inputs[j], _expr.const(i), axis=0) for j in range(len(new_inputs))
]
results = cls._impl_v9(v9_inputs, attr, params)
results = [_op.expand_dims(results[j], axis=0) for j in range(len(results))]
if i == 0:
out = results
else:
out = [_op.concatenate([out[j], results[j]], axis=0) for j in range(len(results))]
out = _expr.TupleWrapper(_expr.Tuple(out), len(out))
return out
@classmethod
def _impl_v9(cls, inputs, attr, params):
body = attr.get("body")
num_scan_inputs = attr.get("num_scan_inputs")
num_all_inputs = len(inputs)
num_state_inputs = len(body.input) - num_scan_inputs
num_state_outputs = num_state_inputs
num_all_outputs = len(body.output)
num_scan_outputs = num_all_outputs - num_state_outputs
scan_input_axes = attr.get("scan_input_axes", [0] * num_scan_inputs)
scan_input_directions = attr.get("scan_input_directions", [0] * num_scan_inputs)
scan_output_axes = list(attr.get("scan_output_axes", [0] * num_scan_outputs))
scan_output_directions = attr.get("scan_output_directions", [0] * num_scan_outputs)
# loop count are the same for all scan inputs, so get loop count by first input scan
# strided_slice not support dynamic axes, so assume input shape are static
max_loop_count = infer_shape(inputs[num_state_inputs])[scan_input_axes[0]]
# Create a copy of the body function to prevent the original
# from being modified.
body = copy.copy(attr["body"])
# Loop inputs will be packed as
# [iter_count, loop_deps, scan_outputs]
def cond_fn(*loop_inputs):
i = loop_inputs[0]
return _op.less(i, relay.const(max_loop_count, "int32"))
# Get the current graph proto and create a clone for the subgraph
graph_scope = GraphProto.current
subgraph_scope = GraphProto(
graph_scope._shape, graph_scope._dtype, graph_scope._freeze_params
)
# Load nodes from outer graph into inner graph.
subgraph_scope._nodes = graph_scope._nodes.copy()
# Create a list of variables for each value updated in the loop.
def get_var(name, val, scan=False):
checked_type = infer_type(val)
if hasattr(checked_type, "type_annotation"):
checked_type = checked_type.type_annotation
if hasattr(checked_type, "checked_type"):
checked_type = checked_type.checked_type
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(_ty.Any())
else:
actual_shape.append(dim)
if scan:
return _expr.var(name, shape=[_ty.Any()] + actual_shape, dtype=checked_type.dtype)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
# Construct variables and initial empty tensors for any scan outputs.
# To do this, we'll figure out the output shapes of the body subgraph by importing
# it and doing type inference.
scan_output_vars = []
scan_output_init = []
if num_scan_outputs > 0:
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
loop_outputs = _expr.TupleWrapper(loop_outputs, len(body.output))
for i in range(num_scan_outputs):
name, _, _, _ = get_info(body.output[i + num_state_outputs])
output_node = infer_type(loop_outputs[i + num_state_outputs])
shape = list(get_const_tuple(output_node.checked_type.shape))
if scan_output_axes[i] < 0:
scan_output_axes[i] = len(shape) + scan_output_axes[i] + 1
shape.insert(scan_output_axes[i], max_loop_count)
dtype = output_node.checked_type.dtype
scan_output_vars.append(_expr.var(name, shape=shape, dtype=dtype))
scan_output_init.append(_op.zeros(shape, dtype))
# loop vars = [iter_count, scan_state, scan_out]
loop_vars = [
_expr.var("iter", shape=(), dtype="int32"), # iteration count
]
loop_vars += [
get_var(body.input[i].name, v) for i, v in enumerate(inputs) if i < num_state_inputs
]
loop_vars += scan_output_vars
body_input_var_names = ["iter"] + [body.input[i].name for i in range(len(body.input))]
# # Now we can remove loop iter variables from our inner loop's inputs.
# # This is kind of a hack since we have graph inputs that we don't
# # want to treat as actual inputs.
while len(body.input) != 0:
body.input.pop(0)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Unpack inputs
loop_count = loop_inputs[0]
state_vars = list(loop_inputs[1 : 1 + num_state_inputs])
scan_vars = list(loop_inputs[1 + num_state_inputs :])
# body take scan graph scan inputs as original input
input_scan_exprs = []
for i in range(num_state_inputs, num_all_inputs):
if scan_input_directions[i - num_state_inputs] != 0:
input_scan_exprs.append(
relay.take(
inputs[i],
relay.const(max_loop_count - 1, "int32") - loop_count,
axis=scan_input_axes[i - num_state_inputs],
)
)
else:
input_scan_exprs.append(
relay.take(
inputs[i],
loop_count,
axis=scan_input_axes[i - num_state_inputs],
)
)
# Prepare body inputs by adding them to node dictionary.
body_inputs = [loop_count] + state_vars + input_scan_exprs
for i, inp in enumerate(body_inputs):
subgraph_scope._nodes[body_input_var_names[i]] = inp
# Get the output of the current loop using the updated inputs.
with subgraph_scope:
loop_outputs = subgraph_scope.from_onnx(
body, graph_scope.opset, get_output_expr=True
)
# Unpack the body outputs and prepare variables for next iteration.
new_state_vars = [loop_outputs[i] for i in range(num_state_outputs)]
new_scan_vars = [loop_outputs[i] for i in range(num_state_outputs, num_all_outputs)]
# Add new scan outputs to tracking
combined_scan_outputs = []
for i in range(num_scan_outputs):
if scan_output_directions[i] == 0:
# append new scan output
combined_scan = _op.concatenate(
[scan_vars[i], _op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i])],
axis=scan_output_axes[i],
)
# pop head scan output
combined_scan = _op.strided_slice(
combined_scan,
begin=[1],
end=[max_loop_count + 1],
strides=[1],
axes=[scan_output_axes[i]],
)
else:
# prepend new scan output
combined_scan = _op.concatenate(
[_op.expand_dims(new_scan_vars[i], axis=scan_output_axes[i]), scan_vars[i]],
axis=scan_output_axes[i],
)
# pop tail scan output
combined_scan = _op.strided_slice(
combined_scan,
begin=[0],
end=[max_loop_count],
strides=[1],
axes=[scan_output_axes[i]],
)
combined_scan_outputs.append(combined_scan)
incr = _expr.const(1, dtype="int32")
loop_count = loop_count + incr
# Pack loop outputs for next iteration
# [iter_count, state_var, scan_var]
return [loop_count] + new_state_vars + combined_scan_outputs
# Create the loop function.
loop = fold_constant(_loops.while_loop(cond_fn, loop_vars, body_fn))
# Now need to run initial values through the graph.
init_count = _expr.const(0, dtype="int32")
input_states = [inputs[i] for i in range(num_state_inputs)]
loop_vals = loop(init_count, *input_states, *scan_output_init)
outputs = _expr.TupleWrapper(
_expr.Tuple([_expr.TupleGetItem(loop_vals, i + 1) for i in range(num_all_outputs)]),
num_all_outputs,
)
# Update outer graph with constants found in the subgraph.
free_vars = analysis.free_vars(loop)
graph_scope._params.update(subgraph_scope._params)
graph_scope._nodes.update(subgraph_scope._nodes)
for var in free_vars:
graph_scope._nodes.update({var.name_hint: var})
return outputs
class LinearRegressor(OnnxOpConverter):
"""Operator converter for LinearRegressor."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
data = inputs[0]
coefficients = attr.get("coefficients", 0)
data_shape = infer_shape(data)
targets = attr.get("targets", 1)
coefficients = _expr.const(list(coefficients), dtype="float32")
coefficients_shape = infer_shape(coefficients)
coefficients = _op.reshape(coefficients, (targets, coefficients_shape[0] // targets))
if coefficients_shape[0] // targets < data_shape[-1]:
data = _op.split(data, [coefficients_shape[0] // targets], -1)[0]
mm_out = _op.nn.dense(data, coefficients)
if "intercepts" in attr:
intercepts = attr.get("intercepts", 0)
intercepts = _expr.const(list(intercepts), dtype="float32")
if targets == 1:
return _op.nn.bias_add(mm_out, intercepts, axis=-1)
return get_relay_op("add")(mm_out, intercepts)
return mm_out
class NonMaxSuppression(OnnxOpConverter):
"""Operator converter for NonMaxSuppression."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
# Get parameter values
boxes = inputs[0]
scores = inputs[1]
max_output_boxes_per_class = inputs[2]
iou_threshold = inputs[3]
score_threshold = inputs[4]
boxes_dtype = infer_type(boxes).checked_type.dtype
if attr.get("center_point_box", 0) != 0:
xc, yc, w, h = _op.split(boxes, 4, axis=2)
half_w = w / _expr.const(2.0, boxes_dtype)
half_h = h / _expr.const(2.0, boxes_dtype)
x1 = xc - half_w
x2 = xc + half_w
y1 = yc - half_h
y2 = yc + half_h
boxes = _op.concatenate([y1, x1, y2, x2], axis=2)
if iou_threshold is None:
iou_threshold = _expr.const(0.0, dtype="float32")
if score_threshold is None:
score_threshold = _expr.const(0.0, dtype="float32")
def conditionally_squeeze_scalar(x):
rank = len(infer_shape(x))
assert rank <= 1, "nms thresholds must be scalars"
if rank == 1:
return _op.squeeze(x, [0])
return x
max_output_boxes_per_class = conditionally_squeeze_scalar(max_output_boxes_per_class)
iou_threshold = conditionally_squeeze_scalar(iou_threshold)
score_threshold = conditionally_squeeze_scalar(score_threshold)
nms_out = _op.vision.all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold
)
return _op.strided_slice(nms_out[0], _op.const([0], dtype="int64"), nms_out[1])
class ATen(OnnxOpConverter):
"""Operator converter for Pytorch ATen ops."""
@classmethod
def _op_dispatch(cls, operator, inputs, attr, params):
op_map = {
"size": cls._size,
"arange": cls._arange,
"index_put": cls._index_put,
"reshape": cls._reshape,
"embedding_bag": cls._embedding_bag,
}
assert operator in op_map, "Operator %s is not supported." % operator
return op_map[operator](inputs, attr, params)
@classmethod
def _size(cls, inputs, attr, params):
return _op.take(
_op.shape_of(inputs[0], dtype="int64"),
_expr.const(-1, dtype="int64"),
axis=0,
mode="wrap",
)
@classmethod
def _arange(cls, inputs, attr, params):
return _op.arange(inputs[0], inputs[1], inputs[2], dtype="int64")
@classmethod
def _check_index(cls, indices, values):
def unfolding_indices(indices, values):
n = len(indices)
flatten_indices = []
slices_size = []
for index in indices:
flatten_indices.append(_op.reshape(index, _op.const([-1])))
slices_size.append(infer_shape(flatten_indices[-1])[0])
repeat_size = [1]
tile_size = [1]
for i in range(1, n):
repeat_size.append(slices_size[-i] * repeat_size[-1])
tile_size.append(slices_size[i - 1] * tile_size[-1])
repeat_size.reverse()
unflod_slices = []
for i in range(n):
unflod_slices.append(
fold_constant(
_op.repeat(_op.tile(flatten_indices[i], (tile_size[i],)), repeat_size[i], 0)
)
)
return unflod_slices, _op.reshape(values, _op.const([-1]))
values_shape = infer_shape(values)
if len(values_shape) != 1:
return unfolding_indices(indices, values)
return indices, values
@classmethod
def _index_put(cls, inputs, attr, params):
in_tensor = inputs[0]
indices, values = cls._check_index(inputs[1 : len(inputs) - 2], inputs[len(inputs) - 2])
accumulate = inputs[len(inputs) - 1].data.asnumpy() != 0
if not accumulate:
mode = "update"
else:
mode = "add"
index_tensor = _op.stack(indices, axis=0)
return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)
@classmethod
def _reshape(cls, inputs, attr, params):
return _op.reshape(inputs[0], inputs[1])
@classmethod
def _embedding_bag(cls, inputs, attr, params):
mode_map = {0: _op.sum, 1: _op.mean, 2: _op.max}
mode = attr.get("mode", 1)
reduction_fn = mode_map[mode]
weights, indices, offsets = inputs[0], inputs[1], inputs[2]
offsets_shape = _op.shape_of(offsets, dtype="int64")
indices_shape = _op.stack(
[
_op.take(offsets_shape, _expr.const(0, dtype="int64")),
_expr.const(-1, dtype="int64"),
],
axis=0,
)
indices = _op.reshape(indices, indices_shape)
embedding = _op.take(weights, indices.astype("int64"), axis=0)
rembedding = reduction_fn(embedding, axis=1)
# EmbeddingBag has 4 outputs for some reason despite only one ever being used.
# Fill the rest with 0s.
unused_output = _expr.const(0, dtype="float32")
return _expr.TupleWrapper(
_expr.Tuple((rembedding, unused_output, unused_output, unused_output)), 4
)
@classmethod
def _impl_v1(cls, inputs, attr, params):
operator = attr.get("operator", None).decode("utf-8")
assert operator, "ATen Operator not found"
return cls._op_dispatch(operator, inputs, attr, params)
class QuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, out_dtype)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
out_dtype = infer_type(zp).checked_type.dtype
axis = attr.get("axis", 1)
if len(infer_shape(data)) < 2:
axis = 0
return _qnn.op.quantize(data, scale, _op.cast(zp, "int32"), axis, out_dtype)
class DequantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data, scale, zp = inputs
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), 0)
@classmethod
def _impl_v13(cls, inputs, attr, params):
data, scale, zp = inputs
axis = attr.get("axis", 1)
if len(infer_shape(data)) <= 1:
axis = 0
return _qnn.op.dequantize(data, scale, _op.cast(zp, "int32"), axis)
class DynamicQuantizeLinear(OnnxOpConverter):
"""Operator converter for QuantizeLinear."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
"""This op is deprecated an only supports uint8"""
data = inputs[0]
data_dtype = infer_type(data).checked_type.dtype
zero = _op.const(0, dtype=data_dtype)
maximum = _op.maximum(zero, _op.max(data))
minimum = _op.minimum(zero, _op.min(data))
scale = (maximum - minimum) / _op.const(255, dtype=data_dtype)
zp = zero - _op.min(data) / scale
zp = _op.cast(_op.round(_op.clip(zp, 0, 255)), "uint8")
return _expr.TupleWrapper(
_expr.Tuple(
[_qnn.op.quantize(data, scale, _op.cast(zp, "int32"), 0, "uint8"), scale, zp]
),
size=3,
)
class QLinearConv(OnnxOpConverter):
"""Operator converter for QLinearConv."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data = inputs[0]
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, "int32")
weight = inputs[3]
w_scale = get_scalar_or_1d_tensor(inputs[4], params)
w_zero_point = get_scalar_or_1d_tensor(inputs[5], params, "int32")
y_scale = fold_constant(get_scalar(inputs[6], params))
y_zero_point = get_scalar(inputs[7], params, "int32")
# Check shapes for per channel quantization
w_scale_shape = infer_shape(w_scale)
w_zero_point_shape = infer_shape(w_zero_point)
if len(w_scale_shape) == 1 or len(w_zero_point_shape) == 1:
m = infer_shape(weight)[0]
if m != w_scale_shape[0] or m != w_zero_point_shape[0]:
raise tvm.error.OpAttributeInvalid(
"The number of elements should be equal to the number of output channels"
)
input_shape = infer_shape(data)
ndim = len(input_shape)
kernel_type = infer_type(weight)
kernel_shapes = [get_const_tuple(kernel_type.checked_type.shape)]
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shapes[0][2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
zp = fold_constant(x_zero_point)
assert isinstance(zp, relay.Constant), "Zero point expected to be a constant"
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
pad_value=zp.data,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")
out_channels = kernel_shapes[0][0]
dilation = attr.get("dilations", [1] * (ndim - 2))
strides = attr.get("strides", [1] * (ndim - 2))
padding = attr["pads"] if "pads" in attr else 0
groups = attr["group"] if "group" in attr else 1
if ndim != 4:
raise tvm.error.OpAttributeInvalid(
"Only 2D kernels are supported for operator QLinearConv."
)
out = _qnn.op.conv2d(
data,
weight,
x_zero_point,
w_zero_point,
x_scale,
w_scale,
kernel_size=attr["kernel_shape"],
channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
)
use_bias = len(inputs) == 9
if use_bias:
out = _op.nn.bias_add(out, inputs[8])
out_dtype = infer_type(inputs[7]).checked_type.dtype
requantize_scale = _op.multiply(x_scale, w_scale)
# requantize requires y_scale to be constant,
# if y_scale is not constant, doing dequantize -> quantize
if isinstance(y_scale, _expr.Constant):
out = _qnn.op.requantize(
out,
requantize_scale,
_op.const(0, dtype="int32"),
y_scale,
y_zero_point,
out_dtype=out_dtype,
axis=1,
)
else:
out = _qnn.op.dequantize(out, requantize_scale, _op.const(0, dtype="int32"), axis=1)
out = _qnn.op.quantize(out, y_scale, y_zero_point, axis=1, out_dtype=out_dtype)
return out
class QLinearAdd(OnnxOpConverter):
"""Operator converter for QLinearAdd from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
b = inputs[3]
b_scale = get_scalar(inputs[4], params)
b_zero_point = get_scalar(inputs[5], params, "int32")
c_scale = get_scalar(inputs[6], params)
c_zero_point = get_scalar(inputs[7], params, "int32")
dtype = infer_type(a).checked_type.dtype
## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
## and then requantize afer
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qladd.cpp
a = _qnn.op.dequantize(
inputs[0], a_scale, a_zero_point
) # , c_scale, c_zero_point, out_dtype = dtype)
b = _qnn.op.dequantize(
inputs[3], b_scale, b_zero_point
) # , c_scale, c_zero_point, out_dtype = dtype)
out = _op.add(a, b)
return _qnn.op.quantize(out, c_scale, c_zero_point, out_dtype=dtype)
class QLinearMatMul(OnnxOpConverter):
"""
Operator converter for QLinearMatMul from Microsoft onnxruntime contrib opset.
Limitations:
- Not guaranteed to meet the integer-overflow behavior stipulated in the
ONNX documentation for this operator.
The QLinearMatMul converter is re-used for MatMulInteger and is adapted for
the latter with the optional `expected_out_dtypes` argument.
"""
@classmethod
def _impl_v10(cls, inputs, attr, params, expected_out_dtypes=None):
if expected_out_dtypes is None:
# The default QLinearMatMul converter is expected to have one of
# these output dtypes.
expected_out_dtypes = ["int8", "uint8"]
# Some of the ops used below take scalar-like inputs, and may require either
# of the following:
#
# - the input is Const node (not merely an expression that *could* be reduced
# to a single Const at graph-compilation time)
#
# - the input has a specific dtype
#
# This function attempts to present 'x' in a form that meets both of those
# requirements.
def try_resolve_to_const(x, dtype_override=None):
x2 = try_resolve_var_to_const(x, params)
num_elem = np.prod(infer_shape(x))
if num_elem == 1:
x2 = ensure_scalar_shape(x2)
x_dtype = infer_type(x).checked_type.dtype
if (dtype_override is not None) and (dtype_override != x_dtype):
x2 = _op.cast(x2, dtype_override)
x3 = fold_constant(x2)
return x3
# Unpack the inputs and obtain some type info...
a, a_scale, a_zp, b, b_scale, b_zp, y_scale, y_zp = inputs
a_type = infer_type(a).checked_type # 'T1' in ONNX doc for this op
a_scale_type = infer_type(a_scale).checked_type
a_zp_type = infer_type(a_zp).checked_type
b_type = infer_type(b).checked_type # 'T2' in ONNX doc for this op
b_scale_type = infer_type(b_scale).checked_type
b_zp_type = infer_type(b_zp).checked_type
y_scale_type = infer_type(y_scale).checked_type
y_zp_type = infer_type(y_zp).checked_type # 'T3' in ONNX doc for this op
# Verify type assumptions, based on the ONNX doc for this op...
assert a_type.dtype in ["int8", "uint8"]
assert a_scale_type.dtype == "float32"
assert a_zp_type.dtype == a_type.dtype
assert b_type.dtype in ["int8", "uint8"]
assert b_scale_type.dtype == "float32"
assert b_zp_type.dtype == b_type.dtype
assert y_scale_type.dtype == "float32"
assert y_zp_type.dtype in expected_out_dtypes
# _qnn.op.dense requires the zero-point values to have dtype int32.
a_scale_scalar = try_resolve_to_const(a_scale)
a_zp_scalar = try_resolve_to_const(a_zp, "int32")
b_scale_scalar = try_resolve_to_const(b_scale)
b_zp_scalar = try_resolve_to_const(b_zp, "int32")
y_scale_scalar = try_resolve_to_const(y_scale)
y_zp_scalar = try_resolve_to_const(y_zp, "int32")
# TODO: Confirm that we're using 'num_hidden_units' correctly / as intended with
# the '_qnn.op.dense' instance below.
num_hidden_units = infer_shape(b)[-1]
# - Specify the matmul result dtype as int32, so that hopefully the matmul will use
# a 32-bit accumulator as seems to be required by the ONNX op's documentation.
#
# TL;DR:
# The ONNX documentation for this op is clear about acceptable overflow
# behavior during the matmul operation:
# - The scalar multiplication ops MAY NOT overflow.
# - The scalar addition ops, which sum the results of the scalar multiplication,
# MAY overflow, but if they do so, it must behave as one would expect during
# 32-bit integer-addition overflow.
# As of this writing, Relay's qnn.op.dense operator doesn't expose a way for us to
# express these constraints.
#
# TODO: Extend TVM / Relay / TIR / etc. to allow this kind of constraint to be
# expressed in a Relay graph. And then update this importer and various TVM
# backends accordingly.
matmul_result_dtype = "int32"
# TODO(vvchernov): possibly it is better to use unsigned type for result
# if input types are unsigned:
# if a_type.dtype == "uint8" and b_type.dtype == "uint8":
# matmul_result_dtype = "uint32"
matmul_result = qmatmul(
a,
b,
a_zp_scalar,
b_zp_scalar,
a_scale_scalar,
b_scale_scalar,
num_hidden_units,
matmul_result_dtype,
)
# This information might only be found in the C++ code-comments for the
# dense.matmul op, but the quantized tensor returned by _qnn.op.dense
# has scale==(a_scale_scalar * b_scale_scalar), and zero_point==0.
#
# 'matmul_result_zp_scalar' has type 'int32' to satisfy input requirements
# of the [de/re]quantize ops below.
matmul_result_scale_scalar = fold_constant(_op.multiply(a_scale_scalar, b_scale_scalar))
matmul_result_zp_scalar = _op.const(0, dtype="int32")
if "int32" in expected_out_dtypes:
# This is the adaptation of the QLinearMatMul converter for MatMulInteger,
# in the MatMulInteger case we skip the unnecessary requantization step.
return matmul_result
# requantize requires y_scale to be constant,
# if y_scale is not constant, doing dequantize -> quantize
if isinstance(y_scale_scalar, _expr.Constant):
y = _qnn.op.requantize(
matmul_result,
matmul_result_scale_scalar,
matmul_result_zp_scalar,
y_scale_scalar,
y_zp_scalar,
axis=-1,
rounding="TONEAREST",
out_dtype=y_zp_type.dtype,
)
else:
matmul_result_deq = _qnn.op.dequantize(
matmul_result, matmul_result_scale_scalar, matmul_result_zp_scalar, axis=0
)
y = _qnn.op.quantize(
matmul_result_deq, y_scale_scalar, y_zp_scalar, axis=0, out_dtype=y_zp_type.dtype
)
return y
class MatMulInteger(OnnxOpConverter):
"""Operator converter for MatMulInteger."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
b = inputs[1]
a_dtype = infer_type(a).checked_type.dtype
b_dtype = infer_type(b).checked_type.dtype
assert a_dtype in ("int8", "uint8"), "MatMulInteger: invalid dtype for first input"
assert b_dtype in ("int8", "uint8"), "MatMulInteger: invalid dtype for second input"
assert a_dtype == b_dtype, "MatMulInteger: input dtypes must match"
a_scale = _op.const(1.0, dtype="float32")
b_scale = _op.const(1.0, dtype="float32")
out_scale = _op.const(1.0, dtype="float32")
a_zero_point = _op.const(0.0, dtype=a_dtype)
b_zero_point = _op.const(0.0, dtype=b_dtype)
out_zero_point = _op.const(0.0, dtype="int32")
if len(inputs) == 4:
a_zero_point = inputs[2]
b_zero_point = inputs[3]
a_zp_dtype = infer_type(a_zero_point).checked_type.dtype
b_zp_dtype = infer_type(b_zero_point).checked_type.dtype
assert (
a_zp_dtype == a_dtype and b_zp_dtype == b_dtype
), "MatMulInteger: input dtype doesn't match zero point dtype"
elif len(inputs) != 2:
raise AssertionError(
"MatMulInteger op takes 2 or 4 inputs, {} given".format(len(inputs))
)
inputs = [
a,
a_scale,
a_zero_point,
b,
b_scale,
b_zero_point,
out_scale,
out_zero_point,
]
return QLinearMatMul.get_converter(10)(inputs, attr, params, expected_out_dtypes=["int32"])
class QLinearMul(OnnxOpConverter):
"""Operator converter for QLinearMul from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a = inputs[0]
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
b = inputs[3]
b_scale = get_scalar(inputs[4], params)
b_zero_point = get_scalar(inputs[5], params, "int32")
y_scale = fold_constant(get_scalar(inputs[6], params))
y_zero_point = get_scalar(inputs[7], params, "int32")
dtype = infer_type(a).checked_type.dtype
## Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
## and then requantize afer
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/mlas/lib/qlmul.cpp
a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)
b = _qnn.op.dequantize(inputs[3], b_scale, b_zero_point)
out = _op.multiply(a, b)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearLeakyRelu(OnnxOpConverter):
"""Operator converter for QLinearLeakyRelu from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
a_scale = get_scalar(inputs[1], params)
a_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
alpha = float(attr.get("alpha", 1.0))
dtype = infer_type(inputs[0]).checked_type.dtype
# Onnxruntime doesn't actually do this op in integer, they dequantize to fp32
# and then requantize afer (according to documentation below)
# https://github.com/microsoft/onnxruntime/blob/master/docs/ContribOperators.md#com.microsoft.QLinearLeakyRelu
a = _qnn.op.dequantize(inputs[0], a_scale, a_zero_point)
out = _op.nn.leaky_relu(a, alpha)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearSigmoid(OnnxOpConverter):
"""Operator converter for QLinearSigmoid from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
x = inputs[0]
x_scale = get_scalar(inputs[1], params)
x_zero_point = get_scalar(inputs[2], params, "int32")
y_scale = fold_constant(get_scalar(inputs[3], params))
y_zero_point = get_scalar(inputs[4], params, "int32")
dtype = infer_type(x).checked_type.dtype
## Apparently, onnxruntime doesn't do this op in integer, they dequantize to fp32
## and then requantize after:
## https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/core/
## providers/dml/DmlExecutionProvider/src/GraphTransformer.cpp#L245
x = _qnn.op.dequantize(x, x_scale, x_zero_point)
out = _op.sigmoid(x)
return _qnn.op.quantize(out, y_scale, y_zero_point, out_dtype=dtype)
class QLinearConcat(OnnxOpConverter):
"""Operator converter for QLinearConcat from Microsoft onnxruntime contrib opset."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# which axis to concat on
axis = attr["axis"]
y_scale = fold_constant(get_scalar(inputs[0], params))
y_zero_point = get_scalar(inputs[1], params, "int32")
# input tensors, scales, zero_points
assert (
len(inputs) % 3 == 2
), "Additional input count must be a multiple of 3 -- tensor/scale/zero_point tuples"
tensors = []
scales = []
zero_points = []
for i in range(2, len(inputs), 3):
tensors.append(inputs[i])
scales.append(get_scalar(inputs[i + 1], params))
zero_points.append(get_scalar(inputs[i + 2], params, "int32"))
return _qnn.op.concatenate(tensors, scales, zero_points, y_scale, y_zero_point, axis)
class ConvInteger(OnnxOpConverter):
"""Operator converter for ConvInteger."""
@classmethod
def _impl_v10(cls, inputs, attr, params):
data = inputs[0]
weight = inputs[1]
data_zp = inputs[2]
weight_zp = inputs[3]
if data_zp is None:
data_zp = _expr.const(0, "int32")
if weight_zp is None:
weight_zp = _expr.const(0, "int32")
input_type = infer_type(data)
input_shape = get_const_tuple(input_type.checked_type.shape)
ndim = len(input_shape)
kernel_type = infer_type(weight)
kernel_shape = get_const_tuple(kernel_type.checked_type.shape)
if "kernel_shape" not in attr:
attr["kernel_shape"] = kernel_shape[2:]
if "auto_pad" in attr:
attr["auto_pad"] = attr["auto_pad"].decode("utf-8")
if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"):
# Warning: Convolution does not yet support dynamic shapes,
# one will need to run dynamic_to_static on this model after import
data = autopad(
data,
attr.get("strides", [1] * (ndim - 2)),
attr["kernel_shape"],
attr.get("dilations", [1] * (ndim - 2)),
pad_value=data_zp,
mode=attr["auto_pad"],
)
elif attr["auto_pad"] == "VALID":
attr["pads"] = tuple([0 for i in range(ndim - 2)])
elif attr["auto_pad"] == "NOTSET":
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr["auto_pad"]))
attr.pop("auto_pad")
out_channels = kernel_shape[0]
dilation = attr.get("dilations", [1] * (ndim - 2))
strides = attr.get("strides", [1] * (ndim - 2))
padding = attr["pads"] if "pads" in attr else 0
groups = attr["group"] if "group" in attr else 1
if ndim != 4:
raise tvm.error.OpAttributeInvalid(
"Only 2D kernels are supported for operator ConvInteger."
)
return _qnn.op.conv2d(
data,
weight,
_op.cast(data_zp, "int32"),
_op.cast(weight_zp, "int32"),
_expr.const(1.0, "float32"),
_expr.const(1.0, "float32"),
kernel_size=attr["kernel_shape"],
channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
)
class BitShift(OnnxOpConverter):
"""Operator converter for NonZero"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Bitshift expects 2 inputs")
direction = attr.get("direction", "LEFT").decode("ascii")
if direction == "LEFT":
out = _op.left_shift(*inputs)
elif direction == "RIGHT":
out = _op.right_shift(*inputs)
else:
raise ValueError("Unsupported Shift Direction: " + direction)
return out
class Unique(OnnxOpConverter):
"""Operator converter for unique"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
if len(inputs) != 1:
raise ValueError("Unique expects 1 input")
data = inputs[0]
axis = attr.get("axis", None)
if axis is None: # If axis is None, flatten the input before calling unique
data = _op.reshape(data, _op.const([-1]))
else:
data_shape = infer_shape(data)
if len(data_shape) != 1:
raise ValueError("TVM only supports 1D Unique operator.")
is_sorted = attr.get("sorted", 1) # sorted is 0 or 1, 1 by default
# ONNX documentation lists return_counts as optional but there is no input to specify
# whether it is returned. Therefore we'll just always return it.
unique = _op.unique(data, is_sorted=(is_sorted == 1), return_counts=True)
num_unique = unique[3]
trim_unique_lambda = lambda input: _op.strided_slice(input, _op.const([0]), num_unique)
unique_vals = trim_unique_lambda(unique[0])
indices = _op.cast(trim_unique_lambda(unique[1]), "int64") # ONNX always returns int64
inverse_indices = _op.cast(unique[2], "int64") # ONNX always returns int64
counts = _op.cast(trim_unique_lambda(unique[4]), "int64") # ONNX always returns int64
# ONNX unique returns unique, indices, inverse_indices, (optional) counts
return _expr.TupleWrapper(_expr.Tuple([unique_vals, indices, inverse_indices, counts]), 4)
class Einsum(OnnxOpConverter):
"""Operator converter for Einsum"""
@classmethod
def _impl_v12(cls, inputs, attr, params):
equation = attr["equation"].decode("utf-8")
return _op.einsum(inputs, equation)
class Trilu(OnnxOpConverter):
"""Operator converter for Trilu"""
@classmethod
def _impl_v14(cls, inputs, attr, params):
upper = attr.get("upper", True)
if len(inputs) == 2:
data, k = inputs
else:
data = inputs[0]
k = 0
return _op.trilu(data, k, upper)
class GridSample(OnnxOpConverter):
"""Operator converter for GridSample"""
@classmethod
def _impl_v16(cls, inputs, attr, params):
grid = inputs[1]
# onnx grid is of shape (N, H, W, 2) which should be transposed to (N, 2, H, W) for relay
grid = _op.transform.transpose(grid, axes=(0, 3, 1, 2))
method: str = attr.get("mode", b"bilinear").decode("utf-8")
padding_mode: str = attr.get("padding_mode", b"zeros").decode("utf-8")
# onnx default is 0 which should be changed to False in relay
align_corners = attr.get("align_corners", 0) != 0
return _op.image.grid_sample(
inputs[0], grid, method, padding_mode=padding_mode, align_corners=align_corners
)
class RandomNormal(OnnxOpConverter):
"""Operator converter for random_normal"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = get_type(attr.get("dtype", 1))
mean = attr.get("mean", 0.0)
scale = attr.get("scale", 1.0)
seed = attr.get("seed", None)
shape = attr["shape"]
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomNormalLike(OnnxOpConverter):
"""Operator converter for random_normal_like"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
scale = attr.get("scale", 1.0)
mean = attr.get("mean", 0.0)
seed = attr.get("seed", None)
shape = infer_shape(inputs[0])
if dtype is None:
dtype = infer_type(inputs[0]).checked_type.dtype
else:
dtype = get_type(dtype)
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.normal(key, shape, dtype=dtype, mean=mean, scale=scale)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomUniform(OnnxOpConverter):
"""Operator converter for random_uniform"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = get_type(attr.get("dtype", 1))
high = attr.get("high", 1.0)
low = attr.get("low", 0.0)
seed = attr.get("seed", None)
shape = attr["shape"]
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class RandomUniformLike(OnnxOpConverter):
"""Operator converter for random_uniform_like"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
dtype = attr.get("dtype", None)
high = attr.get("high", 1.0)
low = attr.get("low", 0.0)
seed = attr.get("seed", None)
shape = infer_shape(inputs[0])
if dtype is None:
dtype = infer_type(inputs[0]).checked_type.dtype
else:
dtype = get_type(dtype)
assert dtype in [
"float32",
"float64",
], "Only float random value generation is currently supported."
if seed is None:
seed = np.random.randint(1e6)
else:
seed = int(seed)
key = _random.threefry_key(seed)
output = _op.random.uniform(key, shape, dtype=dtype, low=low, high=high)
_, vals = _expr.TupleWrapper(output, 2)
return vals
class Multinomial(OnnxOpConverter):
"""Operator converter for multinomial"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
dtype = attr.get("dtype", "int64")
sample_size = attr.get("sample_size", 1)
seed = attr.get("seed", None)
if seed is None:
seed = np.random.randint(1e6)
key = _op.random.threefry_key(seed)
output = _op.random.multinomial(key, inputs[0], sample_size)
_, indices = _expr.TupleWrapper(output, 2)
return _op.cast(indices, get_type(dtype))
class NegativeLogLikelihoodLoss(OnnxOpConverter):
"""Operator converter for NegativeLogLikehoodLoss"""
VALID_REDUCTIONS = {"mean", "sum", "none"}
@classmethod
def run_calculation(
cls: "NegativeLogLikelihoodLoss",
input_tensor: relay.Expr,
target_tensor: relay.Expr,
weight_tensor: Optional[relay.Expr],
ignore_index: int,
):
"""Run calculation for NegativeLogLikelihood, returning output tensor and
weight tensor used for mean-style reductions.
"""
# Convert negative indices --> positive indices for gather ops, note we have to
# use the original target tensor to interact with ignore_index to have proper behavior.
normalized_target_tensor = normalize_gather_indices(input_tensor, target_tensor, 1)
if weight_tensor is None:
channels = infer_shape(input_tensor)[1]
weight_tensor = relay.ones(
[channels],
dtype=infer_type(input_tensor).checked_type.dtype,
)
loss = -relay.gather(
input_tensor,
axis=1,
indices=relay.expand_dims(normalized_target_tensor, 1),
)
loss = relay.squeeze(loss, axis=[1])
expanded_normalized_target_tensor = relay.expand_dims(normalized_target_tensor, 0)
expanded_normalized_target_tensor = relay.nn.batch_flatten(
expanded_normalized_target_tensor
)
flattened_weights = relay.gather_nd(weight_tensor, expanded_normalized_target_tensor)
select_weights = relay.reshape_like(flattened_weights, loss)
loss *= select_weights
if ignore_index is not None:
# "Ignore" values whose target is the ignore_index
mask_tensor = relay.equal(
target_tensor, relay.const(ignore_index, dtype=target_tensor.type_annotation.dtype)
)
mask_tensor = relay.const(1, dtype="int8") - relay.cast(mask_tensor, "int8")
loss = relay.where(
mask_tensor, loss, relay.const(0, infer_type(loss).checked_type.dtype)
)
# This is not explained super clearly in the onnx spec, but masked values don't
# contribute toward the final value in reduction
select_weights *= relay.cast_like(mask_tensor, select_weights)
weight_total = relay.sum(select_weights)
return loss, weight_total
@classmethod
def _impl_v13(cls, inputs, attr, params):
ignore_index = attr.get("ignore_index", None)
reduction = attr.get("reduction", b"mean").decode("utf-8")
if reduction not in cls.VALID_REDUCTIONS:
raise ValueError(
f"Unknown reduction type {reduction}, choices are {cls.VALID_REDUCTIONS}"
)
input_tensor, target_tensor = inputs[0], inputs[1]
if len(inputs) == 3:
weight_tensor = inputs[2]
else:
weight_tensor = None
loss, weight_total = cls.run_calculation(
input_tensor,
target_tensor,
weight_tensor=weight_tensor,
ignore_index=ignore_index,
)
if reduction == "mean":
return relay.sum(loss) / weight_total
if reduction == "sum":
return relay.sum(loss)
# Case reduction == 'none'
return loss
class SoftmaxCrossEntropyLoss(OnnxOpConverter):
"""Operator converter for SCE_loss"""
@classmethod
def _impl_v13(cls, inputs, attr, params):
ignore_index = attr.get("ignore_index", None)
reduction = attr.get("reduction", b"mean").decode("utf-8")
input_tensor, target_tensor = inputs[0], inputs[1]
if len(inputs) == 3:
weight_tensor = inputs[2]
else:
weight_tensor = None
get_log_prob = attr["tvm_custom"]["num_outputs"] == 2
log_softmax_attr = {"axis": 1}
log_softmax_tensor = LogSoftmax.get_converter(13)([input_tensor], log_softmax_attr, None)
loss, weight_total = NegativeLogLikelihoodLoss.run_calculation(
log_softmax_tensor,
target_tensor,
weight_tensor,
ignore_index=ignore_index,
)
if reduction == "mean":
loss = relay.sum(loss) / weight_total
elif reduction == "sum":
loss = relay.sum(loss)
if get_log_prob:
return relay.TupleWrapper(relay.Tuple((loss, log_softmax_tensor)), 2)
return loss
class Adagrad(OnnxOpConverter):
"""Operator converter for adagrad op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
decay_factor = attr.get("decay_factor", 0.0)
epsilon = attr.get("epsilon", 0.0)
norm_coefficient = attr.get("norm_coefficient", 0.0)
R = inputs[0]
T = inputs[1]
# convert attributes to constants, proper types
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
decay_factor = relay.const(decay_factor, dtype=dtype_inputs)
epsilon = relay.const(epsilon, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
T = relay.cast_like(T, inputs[3])
assert (
len(inputs) - 2
) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}"
# Remaining inputs are:
# [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_sq_g, x_2_sq_g...]
num_input_tensors = (len(inputs) - 2) // 3
output_tensors = []
output_accumulated_squared_gradients = []
for i in range(num_input_tensors):
x = inputs[i + 2]
gradient = inputs[i + 2 + num_input_tensors]
accumulated_squared_gradient = inputs[i + 2 + 2 * num_input_tensors]
r = R / (relay.const(1.0, dtype=dtype_inputs) + T * decay_factor)
g_regularized = norm_coefficient * x + gradient
new_accumulated_squared_gradient = (
accumulated_squared_gradient + g_regularized * g_regularized
)
h_adaptive = relay.sqrt(new_accumulated_squared_gradient) + epsilon
x_new = x - r * g_regularized / h_adaptive
output_tensors.append(x_new)
output_accumulated_squared_gradients.append(new_accumulated_squared_gradient)
# append lists together, momentums come after result tensors
result = output_tensors + output_accumulated_squared_gradients
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Adam(OnnxOpConverter):
"""Operator converter for Adam op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get("alpha", 0.9)
beta = attr.get("beta", 0.999)
# Note in the docs epsilon default is 0.0 but in the tests it is set to 1e-2:
# https://git.io/Ju5C4
epsilon = attr.get("epsilon", 1e-2)
norm_coefficient = attr.get("norm_coefficient", 0.0)
norm_coefficient_post = attr.get("norm_coefficient_post", 0.0)
R = inputs[0]
T = inputs[1]
assert (
len(inputs) - 2
) % 4 == 0, f"Expect 4-lets for remaining inputs, found {len(inputs) - 2}"
# convert attributes to constants, proper types
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
inverse_alpha = relay.const(1 - alpha, dtype=dtype_inputs)
alpha = relay.const(alpha, dtype=dtype_inputs)
inverse_beta = relay.const(1 - beta, dtype=dtype_inputs)
beta = relay.const(beta, dtype=dtype_inputs)
epsilon = relay.const(epsilon, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
norm_coefficient_post = relay.const(norm_coefficient_post, dtype=dtype_inputs)
one = relay.const(1, dtype=dtype_inputs)
T = relay.cast_like(T, inputs[3])
# Remaining inputs are:
# [x_1, x_2 ..., x_1_grad, x_2_grad, ... x_1_g_accum, x_2_g_accum..., x_1_g_sq_accum, ...]
num_input_tensors = (len(inputs) - 2) // 4
output_tensors = []
output_accumulated_gradients = []
output_accumulated_squared_gradients = []
for i in range(num_input_tensors):
x = inputs[i + 2]
g = inputs[i + 2 + num_input_tensors]
v = inputs[i + 2 + 2 * num_input_tensors]
h = inputs[i + 2 + 3 * num_input_tensors]
g_regularized = norm_coefficient * x + g
v_new = alpha * v + inverse_alpha * g_regularized
h_new = beta * h + inverse_beta * g_regularized * g_regularized
h_sqrt = relay.sqrt(h_new) + epsilon
true_branch = R * relay.sqrt(one - relay.power(beta, T)) / (one - relay.power(alpha, T))
R_adjusted = relay.If(T > relay.const(0, dtype=dtype_inputs), true_branch, R)
x_new = x - R_adjusted * (v_new / h_sqrt)
x_result = (one - norm_coefficient_post) * x_new
output_tensors.append(x_result)
output_accumulated_gradients.append(v_new)
output_accumulated_squared_gradients.append(h_new)
# append lists together to get final result
result = (
output_tensors + output_accumulated_gradients + output_accumulated_squared_gradients
)
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Momentum(OnnxOpConverter):
"""Operator converter for Momentum op."""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr["alpha"]
beta = attr["beta"]
mode = attr["mode"].decode("utf-8")
norm_coefficient = attr["norm_coefficient"]
assert mode in ["nesterov", "standard"], f"Unknown momentum mode {mode}"
R = inputs[0]
T = inputs[1]
assert (
len(inputs) - 2
) % 3 == 0, f"Expect triplets for remaining inputs, found {len(inputs) - 2}"
# Remaining inputs are:
# [x_1, x_2 ..., x_1_gradient, x_2_gradient, ... x_1_momentum, x_2_momentum...]
num_input_tensors = (len(inputs) - 2) // 3
# convert attributes to constants
dtype_inputs = infer_type(inputs[3]).checked_type.dtype
alpha = relay.const(alpha, dtype=dtype_inputs)
beta = relay.const(beta, dtype=dtype_inputs)
norm_coefficient = relay.const(norm_coefficient, dtype=dtype_inputs)
default_beta = relay.const(1.0, dtype=dtype_inputs)
# Calculate updated values for every input
output_tensors = []
output_momentums = []
for i in range(num_input_tensors):
x = inputs[i + 2]
gradient = inputs[i + 2 + num_input_tensors]
momentum = inputs[i + 2 + 2 * num_input_tensors]
g_regularized = norm_coefficient * x + gradient
beta_adjusted = relay.If(T > relay.const(0, dtype="int64"), beta, default_beta)
new_momentum = alpha * momentum + beta_adjusted * g_regularized
if mode == "standard":
x_output = x - R * new_momentum
else:
# mode == 'nesterov'
x_output = x - R * (g_regularized + alpha * new_momentum)
output_tensors.append(x_output)
output_momentums.append(new_momentum)
# append lists together, momentums come after result tensors
result = output_tensors + output_momentums
return _expr.TupleWrapper(_expr.Tuple(result), len(result))
class Round(OnnxOpConverter):
"""Operator converter for round op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Onnx round uses Banker's rounding which rounds .5 to the nearest even integer
x = inputs[0]
dtype = infer_type(x).checked_type.dtype
half = _expr.const(0.5, dtype=dtype)
one = _expr.const(1, dtype=dtype)
two = _expr.const(2, dtype=dtype)
rounded = _op.ceil(x - half)
bankers_mask = one - (_op.ceil(x + half) - _op.floor(x + half))
non_even = _op.abs(_op.mod(rounded, two))
return rounded + (bankers_mask * non_even)
class SequenceConstruct(OnnxOpConverter):
"""Operator converter for sequence construction op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Construct a tuple from input tensors.
return _expr.Tuple(inputs)
class SequenceInsert(OnnxOpConverter):
"""Operator converter for sequence insert op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
# Insert a new tensor into a tuple of tensors.
input_sequence = inputs[0]
new_tensor = inputs[1]
if len(inputs) == 3:
position = inputs[2]
# Non constant position is not supported.
if isinstance(position, _expr.Constant):
position = position.data.numpy()
elif position.name_hint in params:
position = params[position.name_hint].numpy()
else:
raise NotImplementedError("Position must be a constant.")
else:
position = -1
if position < 0:
position = len(input_sequence) + position + 1
# Convert sequence to a list, insert new tensor, and repackage as Tuple.
tensor_list = [input_sequence[i] for i in range(len(input_sequence))]
# Insert new tensor.
tensor_list.insert(position, new_tensor)
# Create new tuple and return.
return _expr.Tuple(tensor_list)
class ConcatFromSequence(OnnxOpConverter):
"""Operator converter for sequence concatenation op."""
@classmethod
def _impl_v11(cls, inputs, attr, params):
axis = attr.get("axis", 0)
new_axis = attr.get("new_axis", 0)
# If a new axis should be created, just stack input tensors.
if new_axis == 1:
return _op.stack(inputs[0], axis=axis)
return _op.concatenate(inputs[0], axis=axis)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
"Identity": Renamer("copy"),
"Optional": Optional_.get_converter(opset),
"OptionalHasElement": OptionalHasElement.get_converter(opset),
"OptionalGetElement": OptionalGetElement.get_converter(opset),
"Affine": Affine.get_converter(opset),
"BitShift": BitShift.get_converter(opset),
"ThresholdedRelu": ThresholdedRelu.get_converter(opset),
"ScaledTanh": ScaledTanh.get_converter(opset),
"ParametricSoftplus": ParametricSoftPlus.get_converter(opset),
"Constant": Constant.get_converter(opset),
"ConstantOfShape": ConstantOfShape.get_converter(opset),
# 'GivenTensorFill'
"FC": AttrCvt("dense", ignores=["axis", "axis_w"]),
"Scale": Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
# 'ImageScaler'
"MeanVarianceNormalization": MeanVarianceNormalization.get_converter(opset),
# 'Crop'
# 'Embedding'
"Upsample": Upsample.get_converter(opset),
"SpatialBN": BatchNorm.get_converter(opset),
# defs/generator
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
# 'RandomNormalLike'
# defs/logical
# defs/math
"Add": Add.get_converter(opset),
"Sub": Sub.get_converter(opset),
"Mul": Mul.get_converter(opset),
"Div": Div.get_converter(opset),
"Neg": Renamer("negative"),
"Abs": Absolute.get_converter(opset),
"Reciprocal": Reciprocal.get_converter(opset),
"Floor": Renamer("floor"),
"Ceil": Renamer("ceil"),
"Round": Round.get_converter(opset),
"IsInf": IsInf.get_converter(opset),
"IsNaN": Renamer("isnan"),
"Sqrt": Renamer("sqrt"),
"Relu": Renamer("relu"),
"Celu": Celu.get_converter(opset),
"LeakyRelu": Renamer("leaky_relu"),
"Selu": Selu.get_converter(opset),
"Elu": Elu.get_converter(opset),
"Gelu": Gelu.get_converter(opset),
"FastGelu": FastGelu.get_converter(opset),
"BiasGelu": BiasGelu.get_converter(opset),
"LayerNormalization": LayerNormalization.get_converter(opset),
# TODO: We need a better way to handle different domains, in case
# of name collisions. EmbedLayerNormalization, SkipLayerNormalization, and Attention
# are in the `com.microsoft` domain.
"EmbedLayerNormalization": EmbedLayerNormalization.get_converter(opset),
"SkipLayerNormalization": SkipLayerNormalization.get_converter(opset),
"Attention": Attention.get_converter(opset),
"Exp": Renamer("exp"),
"Greater": Renamer("greater"),
"GreaterOrEqual": Renamer("greater_equal"),
"Less": Renamer("less"),
"LessOrEqual": Renamer("less_equal"),
"Log": Renamer("log"),
"Acos": Renamer("acos"),
"Acosh": Renamer("acosh"),
"Asin": Renamer("asin"),
"Asinh": Renamer("asinh"),
"Atan": Renamer("atan"),
"Atanh": Renamer("atanh"),
"Cos": Renamer("cos"),
"Cosh": Renamer("cosh"),
"Sin": Renamer("sin"),
"Sinh": Renamer("sinh"),
"Tan": Renamer("tan"),
"Tanh": Renamer("tanh"),
"Pow": Pow.get_converter(opset),
"PRelu": Prelu.get_converter(opset),
"Sigmoid": Renamer("sigmoid"),
"HardSigmoid": HardSigmoid.get_converter(opset),
"HardSwish": HardSwish.get_converter(opset),
"Max": Maximum.get_converter(opset),
"Min": Minimum.get_converter(opset),
"Sum": Sum.get_converter(opset),
"Mean": Mean.get_converter(opset),
"Clip": Clip.get_converter(opset),
"Softplus": Softplus.get_converter(opset),
# softmax default axis is different in onnx
"Softmax": Softmax.get_converter(opset),
"LogSoftmax": LogSoftmax.get_converter(opset),
"OneHot": OneHot.get_converter(opset),
"Hardmax": Hardmax.get_converter(opset),
"Shrink": Shrink.get_converter(opset),
"Softsign": Softsign.get_converter(opset),
"Gemm": Gemm.get_converter(opset),
"MatMul": MatMul.get_converter(opset),
"MatMulInteger": MatMulInteger.get_converter(opset),
"MatMulInteger16": MatMulInteger16.get_converter(opset),
"Mod": Mod.get_converter(opset),
"Xor": Renamer("logical_xor"),
# defs/nn
"AveragePool": AveragePool.get_converter(opset),
"LpPool": LpPool.get_converter(opset),
"GlobalLpPool": GlobalLpPool.get_converter(opset),
"MaxPool": MaxPool.get_converter(opset),
"MaxUnpool": MaxUnpool.get_converter(opset),
"Conv": Conv.get_converter(opset),
"ConvTranspose": ConvTranspose.get_converter(opset),
"GlobalAveragePool": GlobalAveragePool.get_converter(opset),
"GlobalMaxPool": GlobalMaxPool.get_converter(opset),
"BatchNormalization": BatchNorm.get_converter(opset),
"InstanceNormalization": InstanceNorm.get_converter(opset),
# 'LpNormalization'
"Dropout": AttrCvt("dropout", {"ratio": "rate"}, ignores=["is_test"]),
"Flatten": Flatten.get_converter(opset),
"LRN": LRN.get_converter(opset),
# Recurrent Layers
"RNN": RNN.get_converter(opset),
"LSTM": LSTM.get_converter(opset),
"GRU": GRU.get_converter(opset),
# defs/vision
"MaxRoiPool": MaxRoiPool.get_converter(opset),
"RoiAlign": RoiAlign.get_converter(opset),
"NonMaxSuppression": NonMaxSuppression.get_converter(opset),
# defs/reduction
"ReduceMax": ReduceMax.get_converter(opset),
"ReduceMin": ReduceMin.get_converter(opset),
"ReduceSum": ReduceSum.get_converter(opset),
"ReduceMean": ReduceMean.get_converter(opset),
"ReduceProd": ReduceProd.get_converter(opset),
"ReduceLogSumExp": ReduceLogSumExp.get_converter(opset),
"ReduceLogSum": ReduceLogSum.get_converter(opset),
"ReduceSumSquare": ReduceSumSquare.get_converter(opset),
"ReduceL1": ReduceL1.get_converter(opset),
"ReduceL2": ReduceL2.get_converter(opset),
# defs/sorting
"ArgMax": ArgMax.get_converter(opset),
"ArgMin": ArgMin.get_converter(opset),
"TopK": TopK.get_converter(opset),
# defs/tensor
"Cast": Cast.get_converter(opset),
"CastLike": CastLike.get_converter(opset),
"Reshape": Reshape.get_converter(opset),
"Expand": Expand.get_converter(opset),
"Concat": Concat.get_converter(opset),
"Split": Split.get_converter(opset),
"Slice": Slice.get_converter(opset),
"Transpose": AttrCvt("transpose", {"perm": "axes"}),
"DepthToSpace": DepthToSpace.get_converter(opset),
"SpaceToDepth": SpaceToDepth.get_converter(opset),
"Gather": Gather.get_converter(opset),
"GatherElements": GatherElements.get_converter(opset),
"GatherND": GatherND.get_converter(opset),
"Compress": Compress.get_converter(opset),
"Size": AttrCvt("ndarray_size", extras={"dtype": "int64"}),
"Scatter": Scatter.get_converter(opset),
"ScatterElements": Scatter.get_converter(opset),
"ScatterND": ScatterND.get_converter(opset),
"EyeLike": EyeLike.get_converter(opset),
"Squeeze": Squeeze.get_converter(opset),
"Unsqueeze": Unsqueeze.get_converter(opset),
"Pad": Pad.get_converter(opset),
"Shape": Shape.get_converter(opset),
"Sign": Sign.get_converter(opset),
"Equal": Equal.get_converter(opset),
"Not": Not.get_converter(opset),
"And": And.get_converter(opset),
"Tile": Tile.get_converter(opset),
"Erf": Erf.get_converter(opset),
"Where": Where.get_converter(opset),
"Or": Or.get_converter(opset),
"Resize": Resize.get_converter(opset),
"NonZero": NonZero.get_converter(opset),
"Range": Range.get_converter(opset),
"CumSum": CumSum.get_converter(opset),
"Unique": Unique.get_converter(opset),
"Einsum": Einsum.get_converter(opset),
"Trilu": Trilu.get_converter(opset),
"GridSample": GridSample.get_converter(opset),
# defs/control_flow
"Loop": Loop.get_converter(opset),
"If": If.get_converter(opset),
# Torch ATen Dispatcher.
"ATen": ATen.get_converter(opset),
# Quantization
"QuantizeLinear": QuantizeLinear.get_converter(opset),
"DequantizeLinear": DequantizeLinear.get_converter(opset),
"DynamicQuantizeLinear": DynamicQuantizeLinear.get_converter(opset),
"ReverseSequence": ReverseSequence.get_converter(opset),
"QLinearConv": QLinearConv.get_converter(opset),
"QLinearConcat": QLinearConcat.get_converter(opset),
"QLinearAdd": QLinearAdd.get_converter(opset),
"QLinearMatMul": QLinearMatMul.get_converter(opset),
"QLinearMul": QLinearMul.get_converter(opset),
"QLinearSigmoid": QLinearSigmoid.get_converter(opset),
"ConvInteger": ConvInteger.get_converter(opset),
"QLinearAveragePool": QLinearAveragePool.get_converter(opset),
"QLinearGlobalAveragePool": QLinearGlobalAveragePool.get_converter(opset),
"QLinearLeakyRelu": QLinearLeakyRelu.get_converter(opset),
# Random number generation.
"RandomNormal": RandomNormal.get_converter(opset),
"RandomNormalLike": RandomNormalLike.get_converter(opset),
"RandomUniform": RandomUniform.get_converter(opset),
"RandomUniformLike": RandomUniformLike.get_converter(opset),
"Multinomial": Multinomial.get_converter(opset),
# Loss functions / training
"NegativeLogLikelihoodLoss": NegativeLogLikelihoodLoss.get_converter(opset),
"SoftmaxCrossEntropyLoss": SoftmaxCrossEntropyLoss.get_converter(opset),
"Adagrad": Adagrad.get_converter(opset),
"Adam": Adam.get_converter(opset),
"Momentum": Momentum.get_converter(opset),
"Scan": Scan.get_converter(opset),
# ML
"LinearRegressor": LinearRegressor.get_converter(opset),
# Sequence operators
"SequenceConstruct": SequenceConstruct.get_converter(opset),
"SequenceInsert": SequenceInsert.get_converter(opset),
"ConcatFromSequence": ConcatFromSequence.get_converter(opset),
}
class GraphProto:
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/main/onnx/onnx.proto
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
"""
current = None
def __init__(self, shape, dtype, freeze_params=False):
self._nodes = {}
self._params = {}
self._inputs = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
self._shape = shape.copy() if shape else {}
self._input_names = []
self._dtype = dtype
self.opset = None
self._freeze_params = freeze_params
def __enter__(self):
self._old_manager = GraphProto.current
GraphProto.current = self
return self
def __exit__(self, ptype, value, trace):
GraphProto.current = self._old_manager
def freeze(self, func, params):
bind_map = {}
for name in params.keys():
if name in self._nodes.keys():
bind_map[self._nodes[name]] = _expr.const(params[name])
body = _expr.bind(func.body, bind_map)
fn = _function.Function(analysis.free_vars(body), body)
return fn, {}
def from_onnx(self, graph, opset, get_output_expr=False):
"""Construct Relay expression from ONNX graph.
Onnx graph is a python protobuf object.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
get_output_expr: bool
If set to true, this conversion will return each output expression rather
than a packaged module. This can be useful when converting subgraphs to
relay.
Returns
-------
mod : tvm.IRModule
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
self.opset = opset
self._parse_graph_initializers(graph)
self._parse_graph_input(graph)
self._check_user_inputs_in_outermost_graph_scope()
self._check_for_unsupported_ops(graph)
self._construct_nodes(graph)
# now return the outputs
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
# If requested, directly return the converted expressions.
if get_output_expr:
return outputs
## Maintain the order of inputs and parameters from the ONNX graph, but only include
## those parameters that are needed to execute the relay graph
free_vars = analysis.free_vars(outputs)
nodes = {v: k for k, v in self._nodes.items()}
free_vars = [nodes[var] for var in free_vars]
for i_name in self._params:
if i_name in free_vars and i_name not in self._inputs:
self._inputs[i_name] = self._nodes[i_name]
# Create a function from our output expression and all input variables.
func = _function.Function([v for k, v in self._inputs.items()], outputs)
return IRModule.from_expr(func), self._params
def _parse_graph_initializers(self, graph):
"""Parse network inputs to relay, aka parameters."""
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
array = self._parse_array(init_tensor)
if self._freeze_params:
self._nodes[init_tensor.name] = _expr.const(array)
else:
self._params[init_tensor.name] = array
self._nodes[init_tensor.name] = new_var(
init_tensor.name,
shape=self._params[init_tensor.name].shape,
dtype=self._params[init_tensor.name].dtype,
)
def _parse_graph_input(self, graph):
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name, i_shape, d_type, i_shape_name = get_info(i)
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._nodes[i_name] = new_var(
i_name, shape=self._params[i_name].shape, dtype=self._params[i_name].dtype
)
elif i_name in self._nodes:
continue
else:
self._num_input += 1
self._input_names.append(i_name)
if i_name in self._shape:
i_shape = self._shape[i_name]
else:
if "?" in str(i_shape):
warning_msg = (
"Input %s has unknown dimension shapes: %s. "
"Specifying static values may improve performance"
% (i_name, str(i_shape_name))
)
warnings.warn(warning_msg)
if isinstance(self._dtype, dict):
dtype = self._dtype[i_name] if i_name in self._dtype else d_type
else:
dtype = d_type
self._nodes[i_name] = new_var(i_name, shape=i_shape, dtype=dtype)
self._inputs[i_name] = self._nodes[i_name]
def _check_user_inputs_in_outermost_graph_scope(self):
"""Only check user inputs in the outer-most graph scope."""
if self._old_manager is None:
assert all(
[name in self._input_names for name in self._shape.keys()]
), "User specified the shape for inputs that weren't found in the graph: " + str(
self._shape
)
def _check_for_unsupported_ops(self, graph):
convert_map = _get_convert_map(self.opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if (
op_name not in convert_map
and op_name != "Constant"
and op_name not in _identity_list
):
unsupported_ops.add(op_name)
if unsupported_ops:
msg = "The following operators are not supported for frontend ONNX: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
def _construct_nodes(self, graph):
"""Nodes are stored as directed acyclic graph."""
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
# Create and populate input list.
inputs = onnx_input()
for i in node.input:
if i != "":
inputs.append(self._nodes[self._renames.get(i, i)])
else:
inputs.append(None)
i_name = self._parse_value_proto(node)
node_output = self._fix_outputs(op_name, node.output)
attr["tvm_custom"] = {}
attr["tvm_custom"]["name"] = i_name
attr["tvm_custom"]["num_outputs"] = len(node_output)
op = self._convert_operator(op_name, inputs, attr, self.opset)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
if outputs_num == 1:
op = fold_constant(op)
else:
op = _expr.TupleWrapper(fold_constant(op.astuple()), len(op))
if outputs_num > 1:
# ONNX supports optional outputs for some nodes.
# This block searches for missing outputs in the ONNX graph
# and removes any unneeded ops
valid_outputs = [False] * outputs_num
for i, output in enumerate(node_output):
if output != "":
valid_outputs[i] = True
# If we have outputs ONNX isn't expecting, we need to drop them
if not all(valid_outputs):
tup = op.astuple()
# TupleWrapper can also wrap ops with TupleType outputs
if isinstance(tup, _expr.Tuple):
# For tuples, we extract the fields instead of using GetTupleItem
outputs = [tup.fields[i] for i, valid in enumerate(valid_outputs) if valid]
else:
# For call nodes, we need to GetTupleItem
outputs = [op[i] for i, valid in enumerate(valid_outputs) if valid]
# Create the new op with valid outputs
if len(outputs) == 1:
op = outputs[0]
elif len(outputs) != outputs_num:
op = _expr.TupleWrapper(_expr.Tuple(outputs), len(outputs))
# Drop invalid outputs for the onnx node
outputs_num = len(outputs)
node_output = [output for output in node_output if output != ""]
assert (
len(node_output) == outputs_num
), "Number of output mismatch {} vs {} in {}.".format(
len(node_output), outputs_num, op_name
)
if outputs_num == 1:
self._nodes[node_output[0]] = op
else:
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_array(self, tensor_proto):
np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims))
return _nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ["f", "i", "s", "g"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["floats", "ints", "strings"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["t"]:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ["tensors"]:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ["graphs"]:
if list(getattr(a, f)):
raise NotImplementedError("Field {} is not supported in relay.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
def _convert_operator(self, op_name, inputs, attrs, opset):
"""Convert ONNX operator into a Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of inputs.
attrs : dict
Dict of operator attributes
opset : int
Opset version
Returns
-------
sym : tvm.relay.function.Function
Converted relay function
"""
convert_map = _get_convert_map(opset)
if op_name in _identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == "Dropout":
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def from_onnx(
model, shape=None, dtype="float32", opset=None, freeze_params=True, convert_config=None
):
"""Convert a ONNX model into an equivalent Relay Function.
ONNX graphs are represented as Python Protobuf objects.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
By default, ONNX defines models in terms of dynamic shapes. The ONNX importer
retains that dynamism upon import, and the compiler attempts to convert the
model into a static shapes at compile time. If this fails, there may still
be dynamic operations in the model. Not all TVM kernels currently support
dynamic shapes, please file an issue on discuss.tvm.apache.org
if you hit an error with dynamic kernels.
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
opset : int, optional
Override to autodetected opset.
This can be helpful for some testing.
freeze_params: bool
If this parameter is true, the importer will take any provided
onnx input values (weights, shapes, etc) and embed them into the relay model
as Constants instead of variables. This allows more aggressive optimizations
at compile time and helps in making models static if certain inputs represent
attributes relay would traditionally consider compile-time constants.
convert_config : Optional[Dict[str, Any]]
Default config:
use_nt_batch_matmul : bool = True
True to convert qualified onnx `matmul` to `nn.batch_matmul` strict to NT format
(transpose_a=False, transpose_b=True).
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
global ONNX_DEFAULT_CONFIGS
if convert_config is not None:
ONNX_DEFAULT_CONFIGS.update(convert_config)
try:
import onnx
if hasattr(onnx.checker, "check_model"):
# try use onnx's own model checker before converting any model
try:
onnx.checker.check_model(model)
except Exception as e: # pylint: disable=c-extension-no-member, broad-except
# the checker is a bit violent about errors, so simply print warnings here
warnings.warn(str(e))
except ImportError:
pass
g = GraphProto(shape, dtype, freeze_params)
graph = model.graph
try:
opset_in_model = 1
if model.opset_import:
# TODO: for now we only really support ai.onnx op set
# TODO: handle other namespaces well see https://github.com/apache/tvm/issues/10950
for opset_identifier in model.opset_import:
# As per https://github.com/onnx/onnx/blob/main/docs/IR.md
# All operator sets except the default one must specify the operator version
if str(opset_identifier.domain) in ["ai.onnx", ""]:
opset_in_model = opset_identifier.version
break
except AttributeError:
opset_in_model = 1
if opset is None:
opset = opset_in_model
elif opset < opset_in_model:
warnings.warn(
""
f"You are overwritting original opset ver = {opset_in_model} by lower ver = {opset}. "
f"That might cause model conversion errors."
)
# Use the graph proto as a scope so that ops can access other nodes if needed.
with g:
mod, params = g.from_onnx(graph, opset)
if freeze_params:
mod = relay.transform.DynamicToStatic()(mod)
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/paddlepaddle.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""Paddle: PArallel Distributed Deep LEarning."""
import warnings
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import ty as _ty
from .. import expr as _expr
from .. import function as _function
from .. import ty as _ty
from .. import op as _op
from .common import (
autopad,
fold_constant,
get_relay_op,
infer_shape,
infer_type,
infer_value,
shape_of,
try_infer_value,
new_var,
)
__all__ = ["from_paddle"]
def _dtype_shape_promotion(inputs):
"""Promote data type and shape for list of tensors."""
dtype_order = ["bool", "int8", "int16", "int32", "int64", "float32", "float64"]
ranks = [len(infer_shape(x)) for x in inputs]
if set(ranks) == set([1, 0]):
for i, r in enumerate(ranks):
if r == 0:
inputs[i] = _op.expand_dims(inputs[i], axis=0)
dtypes = set(dtype_order.index(infer_type(x).checked_type.dtype) for x in inputs)
if len(dtypes) == 1:
return inputs
max_dtype = dtype_order[max(dtypes)]
for i, input_op in enumerate(inputs):
if infer_type(input_op).checked_type.dtype != max_dtype:
inputs[i] = input_op.astype(max_dtype)
return inputs
def _convert_dtype_value(val):
"""Converts a Paddle type id to a string."""
convert_dtype_map = {
21: "int8",
20: "uint8",
6: "float64",
5: "float32",
4: "float16",
3: "int64",
2: "int32",
1: "int16",
0: "bool",
}
if val not in convert_dtype_map:
msg = "Paddle data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
return convert_dtype_map[val]
def convert_unary_op(g, op, block):
"""Operator converter for all the unary operators."""
# op_map stores mapping relationship between paddlepaddle and relay
op_map = {
"isinf_v2": _op.isinf,
"isfinite_v2": _op.isfinite,
"isnan_v2": _op.isnan,
}
if op.type in op_map:
unary_func = op_map[op.type]
else:
# while paddle operator's name is same with relay
unary_func = get_relay_op(op.type)
out = unary_func(g.get_node(op.input("X")[0]))
g.add_node(op.output("Out")[0], out)
def convert_binary_logical_op(g, op, block):
"""Operator converter for logical op."""
ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0, ipt1)
g.add_node(op.output("Out")[0], out)
def convert_addmm(g, op, block):
"""Operator converter for addmm."""
input_x = g.get_node(op.input("Input")[0])
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
alpha = op.attr("Alpha")
beta = op.attr("Beta")
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _expr.const(alpha, dtype)
x *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _expr.const(beta, dtype)
input_x *= beta
transposed_y = _op.transpose(y, axes=[1, 0])
dense_out = _op.nn.dense(x, transposed_y)
out = dense_out + input_x
g.add_node(op.output("Out")[0], out)
def convert_arg_max_min(g, op, block):
"""Operator converter for arg_max and arg_min."""
axis = op.attr("axis")
keepdims = op.attr("keepdims")
flatten = op.attr("flatten")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
func = _op.argmax if op.type == "arg_max" else _op.argmin
x = g.get_node(op.input("X")[0])
if axis is None or flatten:
x = _op.reshape(x, [-1])
out = func(x, axis=None, keepdims=True)
else:
out = func(x, axis=axis, keepdims=keepdims)
if dtype != infer_type(out).checked_type.dtype:
out = _op.cast(out, dtype)
g.add_node(op.output("Out")[0], out)
def convert_argsort(g, op, block):
"""Operator converter for argsort."""
x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
descending = op.attr("descending")
out_indices = _op.argsort(x, axis, not descending, dtype="int64")
out = _op.gather(x, axis, out_indices)
g.add_node(op.output("Out")[0], out)
g.add_node(op.output("Indices")[0], out_indices)
def convert_assign(g, op, block):
"""Operator converter for assign."""
out = g.get_node(op.input("X")[0])
g.add_node(op.output("Out")[0], out)
def convert_assign_value(g, op, block):
"""Operator converter for assign_value."""
keys = ["bool_values", "fp32_values", "int32_values", "int64_values"]
dtypes = ["bool", "float32", "int32", "int64"]
for i, key in enumerate(keys):
dtype = dtypes[i]
value = np.array(op.attr(key)).astype(dtype)
if value is not None and value.size >= 1:
break
shape = op.attr("shape")
value = value.reshape(shape)
out = _op.const(value, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_batch_norm(g, op, block):
"""Operator converter for batch_norm."""
ipt_name = op.input("X")[0]
scale_name = op.input("Scale")[0]
bias_name = op.input("Bias")[0]
mean_name = op.input("Mean")[0]
variance_name = op.input("Variance")[0]
epsilon = op.attr("epsilon")
out = _op.nn.batch_norm(
g.get_node(ipt_name),
g.get_node(scale_name),
g.get_node(bias_name),
g.get_node(mean_name),
g.get_node(variance_name),
epsilon=epsilon,
)
g.add_node(op.output("Y")[0], out[0])
def convert_bmm(g, op, block):
"""Operator converter for bmm."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
y = _op.transpose(y, [0, 2, 1])
out = _op.nn.batch_matmul(x, y)
g.add_node(op.output("Out")[0], out)
def convert_brelu(g, op, block):
"""Operator converter for brelu."""
x = g.get_node(op.input("X")[0])
t_max = op.attr("t_max")
t_min = op.attr("t_min")
out = _op.tensor.clip(x, t_min, t_max)
g.add_node(op.output("Out")[0], out)
def convert_cast(g, op, block):
"""Operator converter for cast."""
dtype = op.attr("out_dtype")
dtype = _convert_dtype_value(dtype)
x = g.get_node(op.input("X")[0])
out = _op.cast(x, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_clip(g, op, block):
"""Operator converter for clip."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
# if the min/max value is a tensor
min_max_is_tensor = False
if op.input("Min"):
min_value = g.get_node(op.input("Min")[0])
min_value, infered = try_infer_value(min_value, g.get_params())
if infered:
min_value = min_value.tolist()[0]
if isinstance(min_value, _expr.Expr):
min_max_is_tensor = True
else:
min_value = op.attr("min")
if op.input("Max"):
max_value = g.get_node(op.input("Max")[0])
max_value, infered = try_infer_value(max_value, g.get_params())
if infered:
max_value = max_value.tolist()[0]
if isinstance(max_value, _expr.Expr):
min_max_is_tensor = True
else:
max_value = op.attr("max")
if min_max_is_tensor:
if not isinstance(min_value, _expr.Expr):
min_value = _op.const(min_value, dtype)
if not isinstance(max_value, _expr.Expr):
max_value = _op.const(max_value, dtype)
out = _op.maximum(x, min_value)
out = _op.minimum(out, max_value)
else:
out = _op.clip(x, min_value, max_value)
g.add_node(op.output("Out")[0], out)
def convert_concat(g, op, block):
"""Operator converter for concat."""
inputs = [g.get_node(op.input("X")[i]) for i in range(len(op.input("X")))]
axis = op.attr("axis")
inputs = _dtype_shape_promotion(inputs)
out = _op.concatenate(inputs, axis=axis)
g.add_node(op.output("Out")[0], out)
def convert_conv2d(g, op, block):
"""Operator converter for conv2d."""
dilations = op.attr("dilations")
groups = op.attr("groups")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
strides = op.attr("strides")
kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
out_channels, _, k_h, k_w = infer_shape(kernel)
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
# Handle history issue of PaddlePaddle
# while padding_algorithm == "SAME"
# dilations will be set to [1, 1]
dilations = [1, 1]
input_x = autopad(input_x, strides, [k_h, k_w], dilations)
paddings = [0, 0]
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
out = _op.nn.conv2d(
input_x,
kernel,
strides=strides,
padding=paddings,
dilation=dilations,
groups=groups,
channels=out_channels,
kernel_size=[k_h, k_w],
)
g.add_node(op.output("Output")[0], out)
def convert_conv2d_transpose(g, op, block):
"""Operator converter for conv2d_transpose."""
dilations = op.attr("dilations")
groups = op.attr("groups")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
strides = op.attr("strides")
output_padding = op.attr("output_padding") if op.attr("output_padding") else [0, 0]
kernel = g.get_node(op.input("Filter")[0])
input_x = g.get_node(op.input("Input")[0])
_, out_channels, k_h, k_w = infer_shape(kernel)
k_size = [k_h, k_w]
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
# SAME padding of conv2d_transpose is not same with conv2d
# We cannot use auto_pad here, only static shape is supported now
dilations = [1, 1]
input_shape = shape_of(input_x)
h_w = _op.strided_slice(input_shape, [2], [4])
try:
h_w = infer_value(h_w, g.get_params()).numpy().tolist()
except Exception as e:
msg = "The SAME padding algorithm of conv2d_transpose not support dynamic shape"
raise tvm.error.OpAttributeInvalid(msg) from e
paddings = []
for i in range(2):
if strides[i] == 1 or h_w[i] % strides[i] == 0:
pad = max(k_size[i] - strides[i], 0)
else:
pad = max(k_size[i] - (h_w[i] % strides[i]), 0)
pad_before = pad // 2
pad_after = pad - pad_before
paddings.insert(-1, pad_before)
paddings.append(pad_after)
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
out = _op.nn.conv2d_transpose(
input_x,
kernel,
strides=strides,
padding=paddings,
dilation=dilations,
groups=groups,
channels=out_channels * groups,
kernel_size=k_size,
output_padding=output_padding,
)
g.add_node(op.output("Output")[0], out)
def convert_cumsum(g, op, block):
"""Operator converter for cumsum."""
axis = op.attr("axis")
exclusive = op.attr("exclusive")
flatten = op.attr("flatten")
reverse = op.attr("reverse")
x = g.get_node(op.input("X")[0])
if axis is None or flatten:
x = _op.reshape(x, [-1])
if reverse:
x = _op.reverse(x, axis=axis)
out = _op.cumsum(x, axis=axis, exclusive=exclusive)
out = _op.reverse(out, axis=axis)
else:
out = _op.cumsum(x, axis=axis, exclusive=exclusive)
g.add_node(op.output("Out")[0], out)
def convert_dropout(g, op, block):
"""Operator converter for dropout."""
x = g.get_node(op.input("X")[0])
g.add_node(op.output("Out")[0], x)
def convert_dot(g, op, block):
"""Operator converter for dot."""
# x, y should be 1D or 2D tensor
# when it's 2D tensor, the first dimension means batch dimension
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
out = _op.sum(_op.multiply(x, y), axis=[-1], keepdims=True)
g.add_node(op.output("Out")[0], out)
def convert_elementwise_op(g, op, block):
"""Operator converter for all the elementwise operators."""
op_map = {
"elementwise_div": "divide",
"elementwise_add": "add",
"elementwise_mul": "multiply",
"elementwise_sub": "subtract",
"elementwise_mod": "mod",
"elementwise_max": "maximum",
"elementwise_min": "minimum",
"elementwise_pow": "power",
"elementwise_floordiv": "floor_divide",
"equal": "equal",
"greater_equal": "greater_equal",
"greater_than": "greater",
"less_equal": "less_equal",
"less_than": "less",
"not_equal": "not_equal",
}
op_func = op_map[op.type]
ipt0 = g.get_node(op.input("X")[0])
ipt1 = g.get_node(op.input("Y")[0])
ipt0_shape = infer_shape(ipt0)
ipt1_shape = infer_shape(ipt1)
axis = op.attr("axis")
if len(ipt0_shape) != len(ipt1_shape):
if axis < 0:
axis = axis + len(ipt0_shape)
if axis != len(ipt0_shape) - 1:
ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))
op_func = get_relay_op(op_func)
out = op_func(ipt0, ipt1)
g.add_node(op.output("Out")[0], out)
def convert_elu(g, op, block):
"""Operator converter for elu."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = op.attr("alpha")
alpha = _expr.const(-1.0 * alpha, dtype=dtype)
out = alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(x)) + _op.nn.relu(x)
g.add_node(op.output("Out")[0], out)
def convert_expand(g, op, block):
"""Operator converter for expand."""
x = g.get_node(op.input("X")[0])
if op.input("Shape"):
sizes = g.get_node(op.input("Shape")[0])
else:
sizes = op.attr("shape")
if isinstance(sizes, _expr.Expr):
sizes = try_infer_value(sizes, parameters=g.get_params())[0]
if isinstance(sizes, np.ndarray):
sizes = sizes.tolist()
out = _op.broadcast_to(x, sizes)
g.add_node(op.output("Out")[0], out)
def convert_expand_as(g, op, block):
"""Operator converter for expand_as."""
x = g.get_node(op.input("X")[0])
target_shape = op.attr("target_shape")
out = _op.broadcast_to(x, target_shape)
g.add_node(op.output("Out")[0], out)
def convert_feed(g, op, block):
"""Converter for model input node."""
if block is not None:
ipt_name = op.output("Out")[0]
ipt_shape = block.var(ipt_name).shape
ipt_dtype = block.var(ipt_name).dtype
ipt_dtype = str(ipt_dtype).strip().split(".")[1]
else:
ipt_shape = op.shape
ipt_dtype = str(op.dtype).strip().split(".")[1]
ipt_name = op.name
if g.shape_dict is not None:
ipt_shape = g.shape_dict[ipt_name]
if isinstance(ipt_shape, tuple):
ipt_shape = list(ipt_shape)
for i, s in enumerate(ipt_shape):
if s < 0:
ipt_shape[i] = _ty.Any()
out = new_var(ipt_name, shape=ipt_shape, dtype=ipt_dtype)
g.add_node(ipt_name, out)
def convert_fill_any_like(g, op, block):
"""Operator converter for fill_any_like."""
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
x = g.get_node(op.input("X")[0])
value = _expr.const(op.attr("value"), dtype=dtype)
out = _op.transform.full_like(x, value).astype(dtype)
g.add_node(op.output("Out")[0], out)
def convert_fill_constant(g, op, block):
"""Operator converter for fill_constant."""
value = op.attr("value")
shape = block.var(op.output("Out")[0]).shape
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
value = _expr.const(value).astype(dtype)
if "ValueTensor" in op.input_names and op.input("ValueTensor"):
shape = g.get_node(op.input("ValueTensor")[0])
if "ShapeTensor" in op.input_names and op.input("ShapeTensor"):
shape = g.get_node(op.input("ShapeTensor")[0])
if isinstance(shape, _expr.Expr):
shape = try_infer_value(shape, parameters=g.get_params())[0]
if isinstance(shape, np.ndarray):
shape = shape.tolist()
out = _op.full(value, shape=shape, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_fill_constant_batch_size_like(g, op, block):
"""Operator converter for fill_constant_batch_size_like."""
x = g.get_node(op.input("Input")[0])
value = op.attr("value")
shape = op.attr("shape")
input_dim_idx = op.attr("input_dim_idx")
output_dim_idx = op.attr("output_dim_idx")
dtype = op.attr("dtype")
dtype = _convert_dtype_value(dtype)
input_shape = shape_of(x)
batch = _op.strided_slice(input_shape, begin=[input_dim_idx], end=[input_dim_idx + 1]).astype(
"int32"
)
shape_before = shape[:output_dim_idx]
shape_before = _expr.const(shape_before, dtype="int32")
shape_after = shape[output_dim_idx + 1 :]
shape_after = _expr.const(shape_after, dtype="int32")
out_shape = _op.concatenate([shape_before, batch, shape_after], axis=0)
out_shape, infered = try_infer_value(out_shape, g.get_params())
if infered:
out_shape = out_shape.tolist()
constant = _expr.const(value, dtype=dtype).astype(dtype)
out = _op.full(constant, out_shape, dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_flatten(g, op, block):
"""Operator converter for flatten."""
x = g.get_node(op.input("X")[0])
input_shape = list(infer_shape(x))
start = op.attr("start_axis")
end = op.attr("stop_axis")
ndim = len(input_shape)
if end < 0:
end += ndim
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(x, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
g.add_node(op.output("Out")[0], out)
def convert_gather(g, op, block):
"""Operator converter for gather."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
axis = op.attr("axis")
out = _op.take(x, index, axis)
g.add_node(op.output("Out")[0], out)
def convert_gather_nd(g, op, block):
"""Operator converter for gather_nd."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
shape = infer_shape(index)
perm = list(range(0, len(shape) - 1))
perm.insert(0, len(shape) - 1)
index = _op.transpose(index, axes=perm)
out = _op.gather_nd(x, index, 0, shape[-1])
g.add_node(op.output("Out")[0], out)
def convert_gelu(g, op, block):
"""Operator converter for gelu."""
x = g.get_node(op.input("X")[0])
out = x * (
_expr.const(0.5, dtype="float32")
+ _op.erf(x * _expr.const(0.5**0.5, dtype="float32")) * _expr.const(0.5, dtype="float32")
)
g.add_node(op.output("Out")[0], out)
def convert_group_norm(g, op, block):
"""Operator converter for group_norm."""
x = g.get_node(op.input("X")[0])
num_groups = op.attr("groups")
epsilon = op.attr("epsilon")
gamma = g.get_node(op.input("Scale")[0])
beta = g.get_node(op.input("Bias")[0])
out = _op.nn.group_norm(
x,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
g.add_node(op.output("Y")[0], out)
def convert_hard_shrink(g, op, block):
"""Operator converter for hard_shrink."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
threshold = op.attr("threshold")
threshold = _op.const(threshold, dtype)
out = _op.logical_or(x < _op.const(-1.0, dtype) * threshold, x > threshold)
out = _op.cast(out, dtype) * x
g.add_node(op.output("Out")[0], out)
def convert_hard_sigmoid(g, op, block):
"""Operator converter for hard_sigmoid."""
slope = op.attr("slope")
x = g.get_node(op.input("X")[0])
out = x * _expr.const(slope) + _expr.const(0.5)
out = _op.clip(out, 0, 1)
g.add_node(op.output("Out")[0], out)
def convert_hard_swish(g, op, block):
"""Operator converter for hard_swish."""
offset = op.attr("offset")
scale = op.attr("scale")
threshold = op.attr("threshold")
assert np.isclose(offset, 3.0), "Only support offset==3.0 for PaddlePaddle's hard_swish"
assert np.isclose(scale, 6.0), "Only support scale==6.0 for PaddlePaddle's hard_swish"
assert np.isclose(threshold, 6.0), "Only support threshold==6.0 for PaddlePaddle's hard_swish"
x = g.get_node(op.input("X")[0])
out = _op.clip(x, -1 * offset, offset)
out = out / _expr.const(threshold) + _expr.const(0.5)
out = x * out
g.add_node(op.output("Out")[0], out)
def convert_interpolate(g, op, block):
"""Operator converter for interpolate."""
def get_interpolate_mode(op):
"""Get parameters for interpolation methods."""
interp_method = op.attr("interp_method")
align_corners = op.attr("align_corners")
align_mode = op.attr("align_mode")
rounding_method = ""
if interp_method == "nearest":
interp_method = "nearest_neighbor"
coordinate_transformation_mode = "asymmetric"
rounding_method = "floor"
elif interp_method == "bilinear":
interp_method = "linear"
if not align_corners and align_mode == 0:
coordinate_transformation_mode = "half_pixel"
else:
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "asymmetric"
elif interp_method == "bicubic":
interp_method = "cubic"
if align_corners:
coordinate_transformation_mode = "align_corners"
else:
coordinate_transformation_mode = "half_pixel"
else:
msg = "interp_method {} is not supported for PaddlePaddle's interpolate"
raise tvm.error.OpAttributeInvalid(msg.format(interp_method))
return rounding_method, interp_method, coordinate_transformation_mode
layout = op.attr("data_layout")
out_h = op.attr("out_h")
out_w = op.attr("out_w")
scale = op.attr("scale")
if not isinstance(scale, (list, tuple)):
scale = [scale, scale]
x = g.get_node(op.input("X")[0])
x_shape = infer_shape(x)
assert len(x_shape) == 4, "Only 4D input tensor is supported for PaddlePaddle's interpolate"
input_out_size = op.input("OutSize")
input_size_tensor = op.input("SizeTensor")
input_scale = op.input("Scale")
rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op)
if input_size_tensor:
# if out_size is a list of tensor
out_size = list()
for name in input_size_tensor:
size = g.get_node(name)
if len(infer_shape(size)) == 0:
size = _op.reshape(size, [-1])
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_scale:
# if out_size is not defined, but scale is defined
input_scale = g.get_node(input_scale[0])
input_shape = shape_of(x).astype("float32")
if layout.startswith("NC"):
out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale
else:
out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale
out_size = out_size.astype("int32")
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif scale and scale[0] > 0 and scale[1] > 0:
# use attribute scale
input_shape = shape_of(x).astype("float32")
input_scale = _expr.const(np.array([scale[0], scale[1]]).astype("float32"))
if layout.startswith("NC"):
out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale
else:
out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale
out_size = out_size.astype("int32")
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
elif input_out_size:
# if out_size is a tensor
out_size = g.get_node(input_out_size[0])
out_size, infered = try_infer_value(out_size, parameters=g.get_params())
if infered:
out_size = out_size.tolist()
else:
# if out_size is a constant value
out_size = [out_h, out_w]
out = _op.image.resize2d(
x,
size=out_size,
layout=layout,
method=interp_method,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
cubic_alpha=-0.75,
)
g.add_node(op.output("Out")[0], out)
def convert_instance_norm(g, op, block):
"""Operator converter for instance_norm."""
x = g.get_node(op.input("X")[0])
gamma = g.get_node(op.input("Scale")[0])
beta = g.get_node(op.input("Bias")[0])
epsilon = op.attr("epsilon")
scale = center = True
out = _op.nn.instance_norm(x, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale)
g.add_node(op.output("Y")[0], out)
def convert_layer_norm(g, op, block):
"""Operator converter for layer_norm."""
begin_norm_axis = op.attr("begin_norm_axis")
epsilon = op.attr("epsilon")
x = g.get_node(op.input("X")[0])
bias_input = op.input("Bias")
scale_input = op.input("Scale")
x_shape = infer_shape(x)
assert begin_norm_axis in (
len(x_shape) - 1,
-1,
), "Support only normalization over last one dimension."
if bias_input:
bias = g.get_node(bias_input[0])
else:
bias = _expr.const(np.zeros(x_shape[begin_norm_axis]))
if scale_input:
scale = g.get_node(scale_input[0])
else:
scale = _expr.const(np.ones(x_shape[begin_norm_axis]))
out = _op.nn.layer_norm(
x, gamma=scale, beta=bias, axis=begin_norm_axis, epsilon=epsilon, center=True, scale=True
)
g.add_node(op.output("Y")[0], out)
def convert_leaky_relu(g, op, block):
"""Operator converter for leaky_relu."""
alpha = op.attr("alpha")
x = g.get_node(op.input("X")[0])
out = _op.nn.leaky_relu(x, alpha=alpha)
g.add_node(op.output("Out")[0], out)
def convert_log1p(g, op, block):
"""Operator converter for log1p."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
one = _expr.const(1, dtype=dtype)
out = _op.log(x + one)
g.add_node(op.output("Out")[0], out)
def convert_logical_not(g, op, block):
"""Operator converter for logical_not op."""
ipt0 = g.get_node(op.input("X")[0])
op_func = get_relay_op(op.type)
out = op_func(ipt0)
g.add_node(op.output("Out")[0], out)
def convert_logsigmoid(g, op, block):
"""Operator converter for logsigmoid."""
x = g.get_node(op.input("X")[0])
out = _op.log(_op.tensor.sigmoid(x))
g.add_node(op.output("Out")[0], out)
def convert_logsoftmax(g, op, block):
"""Operator converter for logsoftmax."""
x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
ndim = len(infer_shape(x))
if axis < 0:
axis += ndim
m = _op.max(x, [axis], keepdims=True)
e = _op.exp(x - m)
s = _op.sum(e, [axis], keepdims=True)
out = x - m - _op.log(s)
g.add_node(op.output("Out")[0], out)
def convert_logsumexp(g, op, block):
"""Operator converter for logsumexp."""
input_x = g.get_node(op.input("X")[0])
axis = op.attr("axis")
if op.attr("reduce_all"):
axis = None
keepdims = op.attr("keepdim")
out = get_relay_op("logsumexp")(input_x, axis=axis, keepdims=keepdims)
if not axis and not keepdims:
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_lookup_table(g, op, block):
"""Operator converter for lookup_table_v2."""
indices = g.get_node(op.input("Ids")[0])
padding_idx = op.attr("padding_idx")
weights = g.get_node(op.input("W")[0])
if padding_idx != -1:
if op.input("W")[0] in g.get_params():
weights = g.get_params(op.input("W")[0])
weights[padding_idx] = 0.0
weights = _expr.const(weights)
else:
shape, infered = try_infer_value(shape_of(weights), g.get_params())
if infered:
shape = shape.tolist()
assert not isinstance(
shape, _expr.Expr
), "Shape of weight has to be fixed for PaddlePaddle's lookup_table"
filters = np.ones(shape).astype(infer_type(weights).checked_type.dtype)
filters[padding_idx] = 0.0
filters = _expr.const(filters)
weights = weights * filters
out = _op.take(weights, indices.astype("int32"), axis=0)
g.add_node(op.output("Out")[0], out)
def convert_matmul(g, op, block):
"""Operator converter for matmul."""
inputs = [g.get_node(op.input("X")[0]), g.get_node(op.input("Y")[0])]
a_shape = infer_shape(inputs[0])
b_shape = infer_shape(inputs[1])
if op.has_attr("trans_x"):
# for matmul_v2
trans_x = op.attr("trans_x")
trans_y = op.attr("trans_y")
else:
# for matmul
trans_x = op.attr("transpose_X")
trans_y = op.attr("transpose_Y")
if trans_x:
perm = list(range(len(a_shape)))
perm[-2] = len(a_shape) - 1
perm[-1] = len(a_shape) - 2
inputs[0] = _op.transpose(inputs[0], axes=perm)
if trans_y:
perm = list(range(len(b_shape)))
perm[-2] = len(b_shape) - 1
perm[-1] = len(b_shape) - 2
inputs[1] = _op.transpose(inputs[1], axes=perm)
# This implemention almost keeps same with ONNX
# Need to check input shape as batch matmul must be supported.
a_shape = shape_of(inputs[0], dtype="int32")
a_rank = infer_shape(a_shape)[0]
b_shape = shape_of(inputs[1], dtype="int32")
b_rank = infer_shape(b_shape)[0]
# When performing a batch matmul, we need to properly handle N-dim shapes.
if a_rank > 2 or b_rank > 2:
def flatten_to_nd(x, x_shape, nd=3):
ndims = infer_shape(x_shape)[0]
if ndims == nd:
return x
newshape = _op.concatenate(
[
_expr.const([-1], dtype=infer_type(x_shape).checked_type.dtype),
_op.strided_slice(x_shape, [ndims - nd + 1], [ndims]),
],
0,
)
out = _op.reshape(x, fold_constant(newshape))
return out
b_type = infer_type(inputs[1])
# Convert to dense if the second matrix is 2d and non-dynamic
if b_rank == 2 and not _ty.is_dynamic(b_type.checked_type):
a = flatten_to_nd(inputs[0], a_shape, 2)
b = _op.transpose(inputs[1])
output = _op.nn.dense(a, b)
else:
# Convert a and b into 3 dimensional tensors.
a = flatten_to_nd(inputs[0], a_shape, 3)
b = flatten_to_nd(inputs[1], b_shape, 3)
# Transpose matrix dimensions of b.
b = _op.transpose(b, [0, 2, 1])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Determine the output batch dimension.
if a_rank > b_rank:
out_batch = _op.strided_slice(a_shape, [0], [a_rank - 2])
elif a_rank < b_rank:
out_batch = _op.strided_slice(b_shape, [0], [b_rank - 2])
# If its unclear how broadcasting should be applied, the output
# shape is determined by choosing the maximum value from each input.
else:
out_batch = _op.concatenate(
[
_op.maximum(
_op.strided_slice(a_shape, [i], [i + 1]),
_op.strided_slice(b_shape, [i], [i + 1]),
)
for i in range(a_rank - 2)
],
0,
)
# Reshape output to original dimensions.
final_shape = _op.concatenate(
[
out_batch,
_op.strided_slice(
a_shape, [infer_shape(a_shape)[0] - 2], [infer_shape(a_shape)[0] - 1]
),
_op.strided_slice(
b_shape, [infer_shape(b_shape)[0] - 1], [infer_shape(b_shape)[0]]
),
],
0,
)
out = _op.reshape(output, fold_constant(final_shape))
else:
if b_rank == 1:
inputs[1] = _op.expand_dims(inputs[1], 1, 1)
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
out = _op.nn.dense(inputs[0], input_1_t)
if b_rank == 1:
out = _op.squeeze(out, axis=[-1])
if op.has_attr("alpha"):
alpha = op.attr("alpha")
if not np.isclose(alpha, 1.0):
out = out * _expr.const(alpha).astype("float32")
g.add_node(op.output("Out")[0], out)
def convert_meshgrid(g, op, block):
"""Operator converter for meshgrid."""
inputs = op.input("X")
x = [g.get_node(i) for i in inputs]
outs = _op.meshgrid(x, indexing="ij")
for i, out in enumerate(outs):
g.add_node(op.output("Out")[i], out)
def convert_mul(g, op, block):
"""Operator converter for mul."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Y")[0])
x_num_col_dims = op.attr("x_num_col_dims")
y_num_col_dims = op.attr("y_num_col_dims")
x_shape = shape_of(x, dtype="int32")
y_shape = shape_of(y, dtype="int32")
x_dim = infer_shape(x_shape)[0]
y_dim = infer_shape(y_shape)[0]
if x_num_col_dims < 0:
x_num_col_dims += x_dim
if y_num_col_dims < 0:
y_num_col_dims += y_dim
if x_num_col_dims == 1:
x = _op.nn.batch_flatten(x)
else:
pre_shape = _op.prod(_op.strided_slice(x_shape, [0], [x_num_col_dims], [1]), keepdims=True)
post_shape = _op.prod(
_op.strided_slice(x_shape, [x_num_col_dims], [x_dim], [1]), keepdims=True
)
new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
new_shape = fold_constant(new_shape)
x = _op.reshape(x, new_shape)
if y_num_col_dims == 1:
y = _op.nn.batch_flatten(y)
else:
pre_shape = _op.prod(_op.strided_slice(y_shape, [0], [y_num_col_dims], [1]), keepdims=True)
post_shape = _op.prod(
_op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1]), keepdims=True
)
new_shape = _op.concatenate([pre_shape, post_shape], axis=0)
new_shape = fold_constant(new_shape)
y = _op.reshape(y, new_shape)
y = _op.transpose(y)
out = _op.nn.dense(x, y)
out_pre_shape = _op.strided_slice(x_shape, [0], [x_num_col_dims], [1])
out_post_shape = _op.strided_slice(y_shape, [y_num_col_dims], [y_dim], [1])
out_shape = _op.concatenate([out_pre_shape, out_post_shape], axis=0)
out_shape = fold_constant(out_shape)
out = _op.reshape(out, out_shape)
g.add_node(op.output("Out")[0], out)
def convert_mv(g, op, block):
"""Operator converter for mv."""
x = g.get_node(op.input("X")[0])
y = g.get_node(op.input("Vec")[0])
y = _op.expand_dims(y, axis=-1)
y = _op.transpose(y)
out = _op.nn.dense(x, y)
out = _op.squeeze(out, axis=[-1])
g.add_node(op.output("Out")[0], out)
def convert_padding(g, op, block):
"""Operator converter for padding."""
input_x = g.get_node(op.input("X")[0])
input_padding = op.input("Paddings")
if input_padding:
padding = g.get_node(input_padding[0])
padding = infer_value(padding, g.get_params()).numpy().tolist()
else:
padding = op.attr("paddings")
padding = op.attr("paddings")
value = op.attr("value")
data_format = op.attr("data_format")
mode = op.attr("mode")
assert mode != "circular", "Don't support mod='circular' for PaddlePaddle's padding"
if mode == "replicate":
mode = "edge"
pad_len = len(padding)
new_paddings = [0] * (pad_len + 4)
for i in range(0, pad_len, 2):
index = -1 - i
if data_format[:2] != "NC":
index = -3 - i
new_paddings[index] = padding[i + 1]
new_paddings[index - 1] = padding[i]
new_paddings = [new_paddings[i : i + 2] for i in range(0, len(new_paddings), 2)]
out = _op.nn.pad(input_x, new_paddings, pad_value=value, pad_mode=mode)
g.add_node(op.output("Out")[0], out)
def convert_pixel_shuffle(g, op, block):
"""Operator converter for pixel_shuffle."""
x = g.get_node(op.input("X")[0])
upscale_factor = op.attr("upscale_factor")
out = _op.nn.depth_to_space(x, upscale_factor, mode="CRD")
g.add_node(op.output("Out")[0], out)
def convert_pool2d(g, op, block):
"""Operator converter for pool2d."""
adaptive = op.attr("adaptive")
ceil_mode = op.attr("ceil_mode")
global_pooling = op.attr("global_pooling")
ksize = op.attr("ksize")
paddings = op.attr("paddings")
padding_algorithm = op.attr("padding_algorithm")
pooling_type = op.attr("pooling_type")
data_format = op.attr("data_format")
if global_pooling:
adaptive = True
ksize = [1, 1]
input_x = g.get_node(op.input("X")[0])
_, _, in_h, in_w = infer_shape(input_x)
op_map = {
"avg": "avg_pool2d",
"max": "max_pool2d",
}
strides = op.attr("strides")
if isinstance(strides, int):
strides = [strides, strides]
if isinstance(ksize, int):
ksize = [ksize, ksize]
if isinstance(paddings, int):
paddings = [paddings] * 2
if padding_algorithm == "VALID":
paddings = [0, 0]
elif padding_algorithm == "SAME":
input_x = autopad(input_x, strides, ksize)
paddings = [0, 0]
elif padding_algorithm == "EXPLICIT":
if len(paddings) == 2:
paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]
elif len(paddings) == 4:
paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]
else:
msg = 'Value {} in attribute "padding" of operator Pool2d is not "valid."'
raise tvm.error.OpAttributeInvalid(msg.format(padding_algorithm))
# handle with special case
# while kernel size less than input size
# shrink kernel size to input size
if (
not isinstance(in_h, _op.Expr)
and padding_algorithm == "EXPLICIT"
and in_h + paddings[0] + paddings[2] < ksize[0]
):
ksize[0] = in_h
if (
not isinstance(in_w, _op.Expr)
and padding_algorithm == "EXPLICIT"
and in_w + paddings[1] + paddings[3] < ksize[1]
):
ksize[1] = in_w
if not adaptive:
if pooling_type == "avg":
exclusive = op.attr("exclusive")
out = _op.nn.avg_pool2d(
input_x,
pool_size=ksize,
strides=strides,
padding=paddings,
ceil_mode=ceil_mode,
count_include_pad=not exclusive,
)
else:
out = getattr(_op.nn, op_map[pooling_type])(
input_x, pool_size=ksize, strides=strides, padding=paddings, ceil_mode=ceil_mode
)
else:
out = getattr(_op.nn, "adaptive_" + op_map[pooling_type])(
input_x, output_size=ksize, layout=data_format
)
g.add_node(op.output("Out")[0], out)
def convert_pow(g, op, block):
"""Operator converter for pow."""
x = g.get_node(op.input("X")[0])
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
factor = op.attr("factor")
factor = _expr.const(factor, dtype=dtype)
out = _op.power(x, factor)
g.add_node(op.output("Out")[0], out)
def convert_prelu(g, op, block):
"""Operator converter for prelu."""
x = g.get_node(op.input("X")[0])
alpha = g.get_node(op.input("Alpha")[0])
ndims = len(infer_shape(x))
axis = 0 if ndims <= 1 else 1
mode = op.attr("mode")
if mode == "all":
if ndims == 1:
shape = _op.strided_slice(shape_of(x), [0], [1])
else:
shape = _op.strided_slice(shape_of(x), [1], [2])
alpha = _op.broadcast_to(alpha, fold_constant(shape))
out = _op.nn.prelu(x, alpha, axis)
g.add_node(op.output("Out")[0], out)
def convert_range(g, op, block):
"""Operator converter for range."""
start = g.get_node(op.input("Start")[0])
stop = g.get_node(op.input("End")[0])
step = g.get_node(op.input("Step")[0])
dtype = infer_type(start).checked_type.dtype
params = []
for param in (start, stop, step):
param, infered = try_infer_value(param, g.get_params())
if infered:
param = param.tolist()
if isinstance(param, list):
param = param[0]
if isinstance(param, _expr.Expr):
param = _op.squeeze(param)
else:
param = _op.const(param, dtype=dtype)
params.append(param)
out = _op.transform.arange(params[0], params[1], params[2], dtype=dtype)
g.add_node(op.output("Out")[0], out)
def convert_reciprocal(g, op, block):
"""Operator converter for reciprocal."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = _expr.const(1.0, dtype) / x
g.add_node(op.output("Out")[0], out)
def convert_reduce(g, op, block):
"""Operator converter for series of reduce operators."""
op_map = {
"reduce_all": "all",
"reduce_any": "any",
"reduce_max": "max",
"reduce_min": "min",
"reduce_prod": "prod",
"reduce_sum": "sum",
"reduce_mean": "mean",
}
op_name = op_map[op.type]
input_x = g.get_node(op.input("X")[0])
axis = op.attr("dim")
if op.attr("reduce_all"):
axis = None
keepdims = op.attr("keep_dim")
out = get_relay_op(op_name)(input_x, axis=axis, keepdims=keepdims)
if not axis and not keepdims:
# use `expand_dims` to solve the following situation
# for TVM, the shape of `out` will be (, )
# for Paddle, the shape of `out` will be [1]
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_relu6(g, op, block):
"""Operator converter for relu6."""
x = g.get_node(op.input("X")[0])
out = _op.clip(x, 0.0, 6.0)
g.add_node(op.output("Out")[0], out)
def convert_reshape(g, op, block):
"""Operator converter for reshape."""
input_shape = op.input("Shape")
input_shape_tensor = op.input("ShapeTensor")
data = g.get_node(op.input("X")[0])
if input_shape:
new_shape = g.get_node(input_shape[0])
elif input_shape_tensor:
new_shape = []
for shape_name in input_shape_tensor:
shape = g.get_node(shape_name)
if len(infer_shape(shape)) == 0:
shape = _op.reshape(shape, [-1])
new_shape.append(shape)
new_shape = _op.concatenate(new_shape, axis=0)
new_shape, infered = try_infer_value(new_shape, parameters=g.get_params())
if infered:
new_shape = new_shape.tolist()
else:
new_shape = op.attr("shape")
out = _op.reshape(data, new_shape)
g.add_node(op.output("Out")[0], out)
def convert_rnn(g, op, block):
"""Operator converter for rnn."""
def generate_lstm(
input_seqs,
hidden_state,
cell_state,
w_inp,
w_hid,
b_inp,
b_hid,
f_act,
g_act,
h_act,
backwards=False,
):
"""Implementation of LSTM cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
gates += b_inp
if b_hid is not None:
gates += b_hid
i, f, c, o = _op.split(gates, 4, axis=-1)
i = f_act(i)
f = f_act(f)
c = g_act(c)
C = f * cell_state + i * c
o = f_act(o)
H = o * h_act(C)
hidden_state = H
cell_state = C
h_list.append(_op.expand_dims(H, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
cell_state = _op.expand_dims(cell_state, axis=0)
return output, hidden_state, cell_state
def generate_gru(
input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False
):
"""Implementation of GRU cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
xwt = _op.nn.dense(step, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
xwt += b_inp
if b_hid is not None:
hwt += b_hid
i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)
h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)
r_gate = rz_act(i_r + h_r)
z_gate = rz_act(i_z + h_z)
n_gate = n_act(i_n + r_gate * h_n)
hidden_state = (hidden_state - n_gate) * z_gate + n_gate
h_list.append(_op.expand_dims(hidden_state, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
return output, hidden_state
def generate_simplernn(
input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False
):
"""Implementation of SimpleRNN cell for paddlepaddle of TVM"""
h_list = []
seq_length = len(input_seqs)
for i in range(seq_length):
step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]
step = _op.squeeze(step, axis=[0])
xwt = _op.nn.dense(step, w_inp)
hwt = _op.nn.dense(hidden_state, w_hid)
if b_inp is not None:
xwt += b_inp
if b_hid is not None:
hwt += b_hid
n_gate = n_act(xwt + hwt)
hidden_state = n_gate
h_list.append(_op.expand_dims(hidden_state, axis=0))
if backwards:
h_list = h_list[::-1]
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
hidden_state = _op.expand_dims(hidden_state, axis=0)
return output, hidden_state
def make_param_inputs(g, node, layer, hidden_size, num_layers):
"""Param for weight and bias."""
bidirect_len = 4 if node.attr("is_bidirec") else 2
all_layer_param_len = len(node.input("WeightList"))
weight_list = node.input("WeightList")[: all_layer_param_len // 2]
bias_list = node.input("WeightList")[all_layer_param_len // 2 :]
layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]
layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]
param_list = layer_weight_list + layer_bias_list
param_list_len = len(param_list)
input_weights = param_list[0 : param_list_len // 2 : 2]
hidden_weights = param_list[1 : param_list_len // 2 : 2]
input_bias = param_list[param_list_len // 2 : param_list_len : 2]
hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]
return input_weights, hidden_weights, input_bias, hidden_bias
def make_init_param_inputs(g, node, layer):
"""Init param for inputs."""
mode = node.attr("mode")
if mode == "LSTM":
all_init_h, all_init_c = node.input("PreState")
bidirect_len = 2 if node.attr("is_bidirec") else 1
init_h = _op.strided_slice(
g.get_node(all_init_h),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
init_c = _op.strided_slice(
g.get_node(all_init_c),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
return init_h, init_c
all_init_h = node.input("PreState")[0]
bidirect_len = 2 if node.attr("is_bidirec") else 1
init_h = _op.strided_slice(
g.get_node(all_init_h),
[layer * bidirect_len],
[layer * bidirect_len + bidirect_len],
axes=[0],
)
return init_h
hidden_size = op.attr("hidden_size")
num_layers = op.attr("num_layers")
is_bidirec = op.attr("is_bidirec")
mode = op.attr("mode")
input_x = g.get_node(op.input("Input")[0])
num_directions = 1
if is_bidirec:
num_directions = 2
x_shape = infer_shape(input_x)
time_steps = x_shape[0]
x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)
for layer in range(num_layers):
input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(
g, op, layer, hidden_size, num_layers
)
if mode == "LSTM":
init_h, init_c = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
init_cs = _op.split(init_c, num_directions)
result_output = []
result_H = []
result_C = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
C_t = _op.squeeze(init_cs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H, C = generate_lstm(
input_seqs=x_steps,
hidden_state=H_t,
cell_state=C_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
f_act=_op.sigmoid,
g_act=_op.tanh,
h_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
result_C.append(C)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
C = _op.concatenate(result_C, axis=0)
elif mode == "GRU":
init_h = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
result_output = []
result_H = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H = generate_gru(
input_seqs=x_steps,
hidden_state=H_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
rz_act=_op.sigmoid,
n_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
elif mode == "RNN_TANH":
init_h = make_init_param_inputs(g, op, layer)
init_hs = _op.split(init_h, num_directions)
result_output = []
result_H = []
for i in range(num_directions):
H_t = _op.squeeze(init_hs[i], axis=[0])
W = g.get_node(input_weights[i])
R = g.get_node(hidden_weights[i])
WB = g.get_node(input_bias[i])
RB = g.get_node(hidden_bias[i])
output, H = generate_simplernn(
input_seqs=x_steps,
hidden_state=H_t,
w_inp=W,
w_hid=R,
b_inp=WB,
b_hid=RB,
n_act=_op.tanh,
backwards=i == 1,
)
result_output.append(output)
result_H.append(H)
output = _op.concatenate(result_output, axis=1)
H = _op.concatenate(result_H, axis=0)
output = _op.transpose(output, axes=[0, 2, 1, 3])
output = _op.reshape(output, newshape=(0, 0, -1))
x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)
g.add_node(op.output("Out")[0], output)
def convert_scale(g, op, block):
"""Operator converter for scale."""
scale = op.attr("scale")
bias = op.attr("bias")
bias_after_scale = op.attr("bias_after_scale")
x = g.get_node(op.input("X")[0])
if np.isclose(scale, 1.0) and np.isclose(bias, 0.0):
out = x
else:
if np.isclose(bias, 0.0):
out = x * _expr.const(np.array(scale).astype("float32"))
elif np.isclose(scale, 1.0):
out = x + _expr.const(np.array(bias).astype("float32"))
else:
if bias_after_scale:
out = x * _expr.const(np.array(scale).astype("float32")) + _expr.const(
np.array(bias).astype("float32")
)
else:
out = (x + _expr.const(np.array(bias).astype("float32"))) * _expr.const(
np.array(scale).astype("float32")
)
g.add_node(op.output("Out")[0], out)
def convert_scatter(g, op, block):
"""Operator converter for scatter."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Ids")[0])
updates = g.get_node(op.input("Updates")[0])
overwrite = op.attr("overwrite")
shape = infer_shape(updates)
ndims = len(shape)
index = _op.expand_dims(index, axis=-1, num_newaxis=ndims - 1)
index = _op.transform.broadcast_to(index, shape)
if overwrite:
out = _op.scatter(x, index, updates, axis=0)
else:
out = _op.scatter_add(_op.zeros_like(x), index, updates, axis=0)
out += _op.scatter(x, index, _op.zeros_like(updates), axis=0)
g.add_node(op.output("Out")[0], out)
def convert_scatter_nd_add(g, op, block):
"""Operator converter for scatter_nd_add."""
x = g.get_node(op.input("X")[0])
index = g.get_node(op.input("Index")[0])
updates = g.get_node(op.input("Updates")[0])
indices_dim = len(infer_shape(index))
axes = list(range(indices_dim))
index = _op.transpose(index, axes[-1:] + axes[:-1])
out = _op.scatter_nd(x, index, updates, mode="add")
g.add_node(op.output("Out")[0], out)
def convert_selu(g, op, block):
"""Operator converter for selu."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
alpha = _op.const(op.attr("alpha"), dtype)
scale = _op.const(op.attr("scale"), dtype)
out = (
_expr.const(-1.0, dtype=dtype)
* alpha
* _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(x))
)
out = scale * (out + _op.nn.relu(x))
g.add_node(op.output("Out")[0], out)
def convert_shape(g, op, block):
"""Operator converter for shape."""
x = g.get_node(op.input("Input")[0])
out = shape_of(x, dtype="int32")
g.add_node(op.output("Out")[0], out)
def convert_size(g, op, block):
"""Operator converter for size."""
input_x = g.get_node(op.input("Input")[0])
out = _op.ndarray_size(input_x, dtype="int64")
out = _op.expand_dims(out, axis=0)
g.add_node(op.output("Out")[0], out)
def convert_slice(g, op, block):
"""Operator converter for slice."""
data = g.get_node(op.input("Input")[0])
dims = len(infer_shape(data))
axes = op.attr("axes")
indices = _expr.const(axes, dtype="int64")
decrease_axis = op.attr("decrease_axis")
if isinstance(decrease_axis, int):
decrease_axis = [decrease_axis]
if op.input("StartsTensor"):
starts = g.get_node(op.input("StartsTensor")[0])
starts, infered = try_infer_value(starts, g.get_params())
if infered:
starts = starts.tolist()
elif op.input("StartsTensorList"):
starts = []
for start_index in op.input("StartsTensorList"):
start_index = g.get_node(start_index).astype("int64")
starts.append(start_index)
starts = _op.concatenate(starts, axis=0)
starts, infered = try_infer_value(starts, g.get_params())
if infered:
starts = starts.tolist()
else:
starts = op.attr("starts")
if len(axes) < dims:
if isinstance(starts, _expr.Expr):
starts = _op.scatter(
_op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype),
indices,
starts,
axis=0,
)
else:
base = [0] * dims
for i, axis in enumerate(axes):
base[axis] = starts[i]
starts = base
if op.input("EndsTensor"):
ends = g.get_node(op.input("EndsTensor")[0])
ends, infered = try_infer_value(ends, g.get_params())
if infered:
ends = ends.tolist()
elif op.input("EndsTensorList"):
ends = []
for end_index in op.input("EndsTensorList"):
end_index = g.get_node(end_index).astype("int64")
ends.append(end_index)
ends = _op.concatenate(ends, axis=0)
ends, infered = try_infer_value(ends, g.get_params())
if infered:
ends = ends.tolist()
else:
ends = op.attr("ends")
if len(axes) < dims:
if isinstance(ends, _expr.Expr):
ends = _op.scatter(
_expr.const(
np.array([np.iinfo(np.int32).max] * dims),
dtype=infer_type(ends).checked_type.dtype,
),
indices,
ends,
axis=0,
)
else:
base = [np.iinfo(np.int32).max] * dims
for i, axis in enumerate(axes):
base[axis] = ends[i]
ends = base
strides = None
if "StridesTensor" in op.input_names and op.input("StridesTensor"):
strides = g.get_node(op.input("StridesTensor")[0])
strides, infered = try_infer_value(strides, g.get_params())
if infered:
strides = strides.tolist()
elif "StridesTensorList" in op.input_names and op.input("StridesTensorList"):
strides = []
for strides_index in op.input("StridesTensorList"):
strides_index = g.get_node(strides_index).astype("int64")
strides.append(strides_index)
strides = _op.concatenate(strides, axis=0)
strides, infered = try_infer_value(strides, g.get_params())
if infered:
strides = strides.tolist()
elif op.has_attr("strides"):
strides = op.attr("strides")
if len(axes) < dims:
if isinstance(strides, _expr.Expr):
strides = _op.scatter(
_expr.const(
np.array([1] * dims),
dtype=infer_type(strides).checked_type.dtype,
),
indices,
strides,
axis=0,
)
elif strides:
base = [1] * dims
for i, axis in enumerate(axes):
base[axis] = strides[i]
strides = base
if not strides:
strides = _op.const([1] * dims, dtype="int64")
out = _op.strided_slice(data, begin=starts, end=ends, strides=strides)
if decrease_axis:
out = _op.squeeze(out, axis=decrease_axis)
g.add_node(op.output("Out")[0], out)
def convert_softmax(g, op, block):
"""Operator converter for softmax."""
axis = op.attr("axis")
input_shape = block.var(op.input("X")[0]).shape
if axis < 0:
axis = len(input_shape) + axis
x = g.get_node(op.input("X")[0])
m = _op.max(x, axis, keepdims=True)
e = _op.exp(x - m)
out = e / _op.sum(e, axis, keepdims=True)
g.add_node(op.output("Out")[0], out)
def convert_softplus(g, op, block):
"""Operator converter for softplus."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
beta = op.attr("beta")
beta = _expr.const(beta, dtype=dtype)
out = _op.log(_op.exp(x * beta) + _expr.const(1.0, dtype=dtype)) / beta
g.add_node(op.output("Out")[0], out)
def convert_softsign(g, op, block):
"""Operator converter for softsign."""
x = g.get_node(op.input("X")[0])
dtype = infer_type(x).checked_type.dtype
out = x / (_op.const(1.0, dtype) + _op.abs(x))
g.add_node(op.output("Out")[0], out)
def convert_split(g, op, block):
"""Operator converter for split."""
x = g.get_node(op.input("X")[0])
axis = op.input("AxisTensor")
if axis:
axis = g.get_node(axis[0])
axis, infered = try_infer_value(axis, g.get_params())
if infered:
axis = axis.tolist()[0]
else:
axis = op.attr("axis")
sections = op.input("SectionsTensorList")
if sections:
tmp_section = []
for i in sections:
i = g.get_node(i)
i, infered = try_infer_value(i, g.get_params())
if infered:
i = i.tolist()
else:
raise ValueError("Dynamic Split not yet supported.")
tmp_section.extend(i)
sections = tmp_section
else:
sections = op.attr("sections")
if sections:
indices = []
split_index = 0
for i in sections[:-1]:
if i == -1:
input_shape = infer_shape(x)[axis]
i = input_shape - np.sum(sections) - 1
split_index += i
indices.append(split_index)
else:
indices = op.attr("num")
out = _op.split(x, indices, axis)
for i, out_i in enumerate(out):
g.add_node(op.output("Out")[i], out_i)
def convert_square(g, op, block):
"""Operator converter for square."""
x = g.get_node(op.input("X")[0])
dtype = block.var(op.output("Out")[0]).dtype
dtype = _convert_dtype_value(dtype)
out = _op.power(x, _expr.const(2, dtype))
g.add_node(op.output("Out")[0], out)
def convert_squeeze(g, op, block):
"""Operator converter for squeeze2."""
x = g.get_node(op.input("X")[0])
axes = op.attr("axes")
if not axes:
axes = None
x = _op.squeeze(x, axis=axes)
g.add_node(op.output("Out")[0], x)
def convert_swish(g, op, block):
"""Operator converter for swish."""
x = g.get_node(op.input("X")[0])
beta = op.attr("beta")
assert beta == 1.0, "Only support beta==1.0 for PaddlePaddle's swish"
out = x * _op.tensor.sigmoid(x)
g.add_node(op.output("Out")[0], out)
def convert_transpose(g, op, block):
"""Operator converter for transpose."""
perm = op.attr("axis")
out = _op.transpose(g.get_node(op.input("X")[0]), axes=perm)
g.add_node(op.output("Out")[0], out)
def convert_unsqueeze(g, op, block):
"""Operator converter for unsqueeze."""
x = g.get_node(op.input("X")[0])
axes = sorted(op.attr("axes"))
for axis in axes:
x = _op.expand_dims(x, axis=axis, num_newaxis=1)
g.add_node(op.output("Out")[0], x)
_convert_map = {
"abs": convert_unary_op,
"acos": convert_unary_op,
"addmm": convert_addmm,
"arg_max": convert_arg_max_min,
"arg_min": convert_arg_max_min,
"argsort": convert_argsort,
"asin": convert_unary_op,
"assign": convert_assign,
"assign_value": convert_assign_value,
"atan": convert_unary_op,
"batch_norm": convert_batch_norm,
"bicubic_interp_v2": convert_interpolate,
"bilinear_interp_v2": convert_interpolate,
"bmm": convert_bmm,
"brelu": convert_brelu,
"cast": convert_cast,
"ceil": convert_unary_op,
"clip": convert_clip,
"concat": convert_concat,
"conv2d": convert_conv2d,
"conv2d_transpose": convert_conv2d_transpose,
"cos": convert_unary_op,
"cosh": convert_unary_op,
"cumsum": convert_cumsum,
"depthwise_conv2d": convert_conv2d,
"depthwise_conv2d_transpose": convert_conv2d_transpose,
"dot": convert_dot,
"dropout": convert_dropout,
"elementwise_add": convert_elementwise_op,
"elementwise_div": convert_elementwise_op,
"elementwise_floordiv": convert_elementwise_op,
"elementwise_max": convert_elementwise_op,
"elementwise_min": convert_elementwise_op,
"elementwise_mod": convert_elementwise_op,
"elementwise_mul": convert_elementwise_op,
"elementwise_pow": convert_elementwise_op,
"elementwise_prod": convert_elementwise_op,
"elementwise_sub": convert_elementwise_op,
"elu": convert_elu,
"equal": convert_elementwise_op,
"erf": convert_unary_op,
"exp": convert_unary_op,
"expand_v2": convert_expand,
"expand_as_v2": convert_expand_as,
"feed": convert_feed,
"fill_any_like": convert_fill_any_like,
"fill_constant": convert_fill_constant,
"fill_constant_batch_size_like": convert_fill_constant_batch_size_like,
"flatten_contiguous_range": convert_flatten,
"floor": convert_unary_op,
"floor_mod": convert_elementwise_op,
"gather": convert_gather,
"gather_nd": convert_gather_nd,
"gelu": convert_gelu,
"greater_equal": convert_elementwise_op,
"greater_than": convert_elementwise_op,
"group_norm": convert_group_norm,
"hard_shrink": convert_hard_shrink,
"hard_sigmoid": convert_hard_sigmoid,
"hard_swish": convert_hard_swish,
"instance_norm": convert_instance_norm,
"isfinite_v2": convert_unary_op,
"isinf_v2": convert_unary_op,
"isnan_v2": convert_unary_op,
"layer_norm": convert_layer_norm,
"leaky_relu": convert_leaky_relu,
"less_equal": convert_elementwise_op,
"less_than": convert_elementwise_op,
"log": convert_unary_op,
"log2": convert_unary_op,
"log10": convert_unary_op,
"log1p": convert_log1p,
"logical_and": convert_binary_logical_op,
"logical_not": convert_logical_not,
"logical_or": convert_binary_logical_op,
"logical_xor": convert_binary_logical_op,
"logsigmoid": convert_logsigmoid,
"log_softmax": convert_logsoftmax,
"logsumexp": convert_logsumexp,
"lookup_table_v2": convert_lookup_table,
"matmul": convert_matmul,
"matmul_v2": convert_matmul,
"meshgrid": convert_meshgrid,
"mul": convert_mul,
"mv": convert_mv,
"nearest_interp_v2": convert_interpolate,
"not_equal": convert_elementwise_op,
"pad1d": convert_padding,
"pad2d": convert_padding,
"pad3d": convert_padding,
"pixel_shuffle": convert_pixel_shuffle,
"pool2d": convert_pool2d,
"pow": convert_pow,
"prelu": convert_prelu,
"range": convert_range,
"relu": convert_unary_op,
"relu6": convert_relu6,
"reshape2": convert_reshape,
"round": convert_unary_op,
"reciprocal": convert_reciprocal,
"reduce_all": convert_reduce,
"reduce_any": convert_reduce,
"reduce_max": convert_reduce,
"reduce_min": convert_reduce,
"reduce_prod": convert_reduce,
"reduce_sum": convert_reduce,
"reduce_mean": convert_reduce,
"rnn": convert_rnn,
"rsqrt": convert_unary_op,
"scale": convert_scale,
"scatter": convert_scatter,
"scatter_nd_add": convert_scatter_nd_add,
"selu": convert_selu,
"shape": convert_shape,
"sigmoid": convert_unary_op,
"sign": convert_unary_op,
"sin": convert_unary_op,
"sinh": convert_unary_op,
"size": convert_size,
"slice": convert_slice,
"softmax": convert_softmax,
"softplus": convert_softplus,
"softsign": convert_softsign,
"split": convert_split,
"strided_slice": convert_slice,
"sqrt": convert_unary_op,
"square": convert_square,
"squeeze2": convert_squeeze,
"swish": convert_swish,
"tan": convert_unary_op,
"tanh": convert_unary_op,
"transpose2": convert_transpose,
"unsqueeze2": convert_unsqueeze,
}
class GraphProto:
"""A helper class for handling relay functions from PaddlePaddle model."""
def __init__(self):
self.nodes = {}
self.params = {}
self.shape_dict = None
def get_node(self, name):
"""get node from graph"""
assert name in self.nodes
return self.nodes[name]
def add_node(self, name, node):
"""add a node to graph"""
self.nodes[name] = fold_constant(node)
def get_params(self, name=None):
"""Get params from graph."""
if name is None:
return self.params
assert name in self.params
return self.params[name]
def extract_parameters(self, program, scope=None):
"""Extract all the weights from PaddlePaddle program."""
self.params = {}
variables = program.global_block().vars
for name in variables:
var = program.global_block().var(name)
if name.endswith("feed") or name.endswith("fetch"):
continue
if not var.persistable:
continue
if isinstance(scope, dict):
self.params[name] = _nd.array(scope[name])
else:
self.params[name] = _nd.array(np.array(scope.var(name).get_tensor()))
shape = self.params[name].shape
dtype = self.params[name].dtype
self.nodes[name] = new_var(name, shape=shape, dtype=dtype)
def check_input_shape(self, op, block):
"""Check the shape information of model's inputs, fixed shape is recommended."""
ipt_name = op.input(op.input_names[0])
ipt_shape = block.var(ipt_name).shape
for i in ipt_shape:
if i < 0:
warning_msg = "Input {}(shape={}) has unkown dimension shapes. \
Specifying static values may improve performance".format(
ipt_name, ipt_shape
)
warnings.warn(warning_msg)
def check_unsupported_ops(self, program):
"""Check whether all the operators are supported."""
unsupported_ops = set()
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
continue
if op.type not in _convert_map:
unsupported_ops.add(op.type)
if len(unsupported_ops) > 0:
msg = "The following operators are not supported for frontend Paddle: "
msg += ", ".join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
def ops_to_relay(self, program, input_specs=None):
"""Convert PaddlePaddle operators to TVM relay functions."""
if input_specs is not None:
for input_spec in input_specs:
convert_feed(self, input_spec, None)
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
continue
convert_func = _convert_map[op.type]
convert_func(self, op, block)
def from_program(self, program, shape_dict, scope):
"""Construct the TVM relay expression from PaddlePaddle program."""
self.shape_dict = shape_dict
if scope is None:
import paddle
scope = paddle.fluid.global_scope()
self.check_unsupported_ops(program)
self.extract_parameters(program, scope)
self.ops_to_relay(program)
output_names = list()
for block in program.blocks:
for op in block.ops:
if op.type == "fetch":
output_names.append(op.input("X")[0])
outputs = [self.nodes[name] for name in output_names]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
free_vars = analysis.free_vars(outputs)
func = _function.Function(free_vars, outputs)
mod = IRModule.from_expr(func)
return mod, self.params
def from_translated_layer(self, layer, shape_dict):
"""Construct the TVM relay expression from PaddlePaddle TranslatedLayer."""
self.shape_dict = shape_dict
program = layer.program()
parameters = dict()
for param in layer.parameters() + layer.buffers():
parameters[param.name] = np.array(param.value().get_tensor())
self.check_unsupported_ops(program)
self.extract_parameters(program, parameters)
input_specs = layer._input_spec()
self.ops_to_relay(program, input_specs)
output_names = [x.name for x in layer._output_spec()]
outputs = [self.nodes[name] for name in output_names]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
free_vars = analysis.free_vars(outputs)
func = _function.Function(free_vars, outputs)
mod = IRModule.from_expr(func)
# remove unused parameters
final_params = dict()
for var in free_vars:
if var.name_hint in self.params:
final_params[var.name_hint] = self.params[var.name_hint]
self.params = final_params
return mod, self.params
def from_paddle(program_or_layer, shape_dict=None, scope=None):
"""Convert a PaddlePaddle model into an equivalent Relay Function.
PaddlePaddle Program/TranslatedLayer represent the computation graph of PaddlePaddle model,
and PaddlePaddle scope stores all the weights of PaddlePaddle model.
Parameters
----------
program_or_layer : object of `paddle.static.Program` or `paddle.jit.TranslatedLayer`
Loaded model by `paddle.static.load_inference_model` or `paddle.jit.load`
shape_dict : dict of str to tuple/list, optional
The input shape of model
scope : object of `paddle.static.Scope`, optional
The scope that saves all the weights of model, use `paddle.static.global_scope` by default
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
"""
import paddle
# disable system signal capturing in paddle framework
# the signal capturing may cause conflict while running autotvm with paddle frontend
paddle.disable_signal_handler()
g = GraphProto()
if isinstance(program_or_layer, paddle.jit.TranslatedLayer):
# model is loaded by `paddle.jit.load`
mod, params = g.from_translated_layer(program_or_layer, shape_dict)
elif isinstance(program_or_layer, paddle.static.Program):
# model is loaded by `paddle.static.load_inference_model`
mod, params = g.from_program(program_or_layer, shape_dict, scope)
else:
raise Exception("Only PaddlePaddle's Program and TranslatedLayer are supported.")
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/pytorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks
# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except
# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda
# pylint: disable=missing-function-docstring, redefined-builtin
"""PT: PyTorch frontend."""
import functools
import itertools
import math
import sys
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import analysis as _analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn, transform
from ..expr_functor import ExprMutator
from ..loops import while_loop
from ..prelude import Prelude, StaticTensorArrayOps
from ..ty import Any, TensorType, TupleType
from . import qnn_torch
from .common import AttrCvt, get_relay_op, gru_cell, logger, rnn_cell
from .common import infer_shape as _infer_shape
from .common import infer_value as _infer_value
from .common import infer_value_simulated as _infer_value_simulated
from .common import lstm_cell, try_infer_value, unbind, fold_constant
from .pytorch_utils import is_version_greater_than, getattr_attr_name
__all__ = ["from_pytorch"]
# This returns a "subgraph" which puts variables whenever
# the type is known. It also records things to map the input
# nodes to the extracted graph's nodes.
# As Python objects are not round-trippable through C++, and
# our type annotations only live in Python, we need to map
# the we need to map the nodes we get in visiting to the nodes
# we used to construct the graph (they are the same in C++,
# match each other in dictionary lookups, but are not the same
# in Python) by using the hint dictionary filled as
# {node: node for node in nodes} to get the type annotations.
# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440
class _TypeFinder(ExprMutator):
def __init__(self, types):
super().__init__()
self.counter = 0
self.vars = {}
self.types = types
self.leave = set() # some variables are not inputs
def visit_let(self, let):
self.leave.add(let.var)
return super().visit_let(let)
def visit_function(self, fn):
self.leave.update(fn.params)
return super().visit_function(fn)
def visit(self, expr):
if expr in self.leave:
return super().visit(expr)
if expr in self.vars:
return self.vars[expr]
if isinstance(expr, tvm.relay.Var):
self.vars[expr] = expr
return expr
if expr in self.types:
ty = self.types[expr]
v = tvm.relay.var(f"_{self.counter}", type_annotation=ty)
self.counter += 1
self.vars[expr] = v
return v
v = super().visit(expr)
return v
def _should_construct_dynamic_list(list_construct_node):
# if this list is element-accessed or modified at runtime, generate List ADT
def inplace_add_to_add(op_name):
if op_name == "aten::add_":
return "aten::add"
else:
return op_name
uses = _get_uses(list_construct_node)
for loop_use in filter(lambda use: use.user.kind() == "prim::Loop", uses):
block_input_index = loop_use.offset - 1
block = list(loop_use.user.blocks())[0]
list_loop_var = list(block.inputs())[block_input_index]
uses += _get_uses(list_loop_var.node())
op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))
list_ops = set(["aten::add", "aten::__getitem__"])
intersect = list_ops.intersection(op_names)
if len(intersect) > 0 and intersect != set(["aten::add"]):
return True
# if add op outputs list, it is dynamic so we need to construct List ADT
for use in filter(lambda use: use.user.kind() in ["aten::add", "aten::add_"], uses):
output_type = _get_node_type(use.user)
if output_type == "ListType":
return True
return False
def _is_int_seq(seq):
# TODO (t-vi): handle non-int constants? (like numpy.intXX)
return len(seq) > 0 and all([isinstance(i, int) for i in seq])
# operator implementation
class PyTorchOpConverter:
"""A helper class for holding PyTorch op converters."""
def __init__(self, prelude, default_dtype):
self.prelude = prelude
self.default_dtype = default_dtype
self.create_convert_map()
self.types = {} # map from nodes to (Relay) type annotations
# this incrementally infers the type, see the comments on the type visitor
# above.
def infer_type(self, node, mod=None):
"""An incremental method to infer the type of a node in the relay graph."""
if node in self.types:
return self.types[node]
if isinstance(node, tvm.relay.Var):
return node.type_annotation
tf = _TypeFinder(types=self.types)
new_node = tf.visit(node)
fn = _function.Function(list(tf.vars.values()), new_node)
new_mod = IRModule({"main": fn})
if mod is not None:
new_mod.update(mod)
new_mod = transform.RemoveUnusedFunctions()(new_mod)
new_mod = transform.InferType()(new_mod)
entry = new_mod["main"]
ty = entry.body.checked_type
self.types[node] = ty
return self.types[node]
def infer_type_with_prelude(self, val):
body = self.infer_type(val, self.prelude.mod)
return body
# list ADT utilities
def convert_to_list_adt(self, py_lst):
elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]
msg = "List elements should have identical types"
assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg
# get_type returns type_name, ctor1, ..., ctorN
# 1 is nil
_, cons, nil = self.prelude.mod.get_type("List")
adt_lst = nil()
for elem in reversed(py_lst):
adt_lst = cons(elem, adt_lst)
return adt_lst
def map_tensor_array_constructor(self, adt_lst, shape):
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", shape)
static_tensor_array_ops.register()
tensor_create = self.prelude.get_tensor_ctor_static("tensor_constructor", "float32", shape)
return self.prelude.map(tensor_create, adt_lst)
def convert_to_tensor_array(self, adt_lst):
_, cons, nil = self.prelude.mod.get_type("List")
if self.prelude.length(adt_lst) == 0:
return nil()
checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))
shape = checked_type.shape
tensor_array = self.map_tensor_array_constructor(adt_lst, shape)
return tensor_array, tuple(shape)
def infer_shape(self, inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
typ = self.infer_type(inputs, mod=mod)
if hasattr(typ, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(typ.shape)
# The return type is not a tensor, for example List
return typ
def infer_shape_with_prelude(self, inputs):
return self.infer_shape(inputs, mod=self.prelude.mod)
def record_output_type(self, output):
if isinstance(output, tuple):
cleaned_output = [o for o in output if o is not None]
types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))
for o, t in zip(cleaned_output, types.fields):
self.types[o] = t
elif isinstance(output, _expr.Expr):
self.infer_type_with_prelude(output)
# it can also happen that the type is int or so
def pytorch_promote_types(self, inputs, dtypes):
"""This promotes TVM inputs with TVM dtypes passed like PyTorch would"""
actual_dtypes = []
for i, inp in enumerate(inputs):
if isinstance(inp, _expr.Expr):
idt = self.infer_type(inp).dtype
actual_dtypes.append(idt)
else:
actual_dtypes.append(dtypes[i])
dtypes = actual_dtypes
tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]
non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]
result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)
results = []
for inp, dt in zip(inputs, dtypes):
if np.isscalar(inp):
results.append(_expr.const(inp, dtype=result_type))
elif dt == result_type:
results.append(inp)
else:
results.append(_op.cast(inp, result_type))
return results
def is_quantized_tensor(self, data):
# If a quantized Torch module is saved and loaded back, dtype will be dropped
# Since dtypes from Torch tensors are not reliable in such cases, we use
# Relay's type inference result to decide if an input tensor is quantized
ty = self.infer_type_with_prelude(data)
return ty.dtype == "uint8"
# Operator implementations
def make_elemwise(self, name):
def elemwise(inputs, input_types):
if name == "divide":
# https://pytorch.org/docs/stable/generated/torch.div.html#torch.div
# None - default behavior. Performs no rounding and, if both input and
# other are integer types, promotes the inputs to the default scalar type.
if all(["int" in input_type for input_type in input_types[:2]]):
input_types[:2] = ["float32"] * 2
cast_inputs = []
for inp in inputs[:2]:
if np.isscalar(inp):
cast_inputs.append(_expr.const(inp, dtype="float32"))
else:
cast_inputs.append(_op.cast(inp, "float32"))
inputs[:2] = cast_inputs
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name)(data0, data1)
return elemwise
def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):
if len(inputs) == 1:
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name_reduce)(data[0])
elif len(inputs) >= 2 and isinstance(inputs[1], (list, int)):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim = inputs[1]
keepdims = inputs[2] if len(inputs) > 2 else False
# also return dummy indices
return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None
else:
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name_elemwise)(data0, data1)
def max(self, inputs, input_types):
return self.min_max_common("maximum", "max", inputs, input_types)
def min(self, inputs, input_types):
return self.min_max_common("minimum", "min", inputs, input_types)
def maximum(self, inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return _op.maximum(data0, data1)
def minimum(self, inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return _op.minimum(data0, data1)
def make_unary(self, name):
def unary(inputs, input_types):
# this is just to ensure tensor input
(data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name)(data)
return unary
def log1p(self, inputs, input_types):
# 1_plus_log x = log(x + 1)
(dtype,) = input_types
one = _expr.const(1, dtype=dtype)
return _op.log(inputs[0] + one)
def square(self, inputs, input_types):
(dtype,) = input_types
return _op.power(inputs[0], _expr.const(2, dtype))
def lerp(self, inputs, input_types):
if len(inputs) != 3:
msg = "Wrong number of arguments (%d) to parse." % (len(inputs))
raise AssertionError(msg)
start = inputs[0]
end = inputs[1]
weight = inputs[2]
return start + weight * (end - start)
def arange(self, inputs, input_types):
def _get_value(val, dtype):
# dtype is a tvm dtype
if isinstance(val, _expr.Expr):
inp = _op.cast(val, dtype)
ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))
else:
ret = _create_typed_const(val, dtype)
return ret
def _get_type(val, inp_type):
if isinstance(val, _expr.Expr):
dtype = str(self.infer_type(val))
return dtype
return inp_type
# PyTorch arange uses the following type semantics:
# - if a dtype is given, start, stop, step are converted to that dtype
# - if no dtype is given and all args are integral, dtype is int64
# - if no dtype is given and there is a float arg, dtype is float32
if len(inputs) in {5, 6, 7}:
# inputs look like [_,_,_,dtype,layout,device,requires_grad]
# therefore dtype_idx is always the length of inputs minus 4
dtype_idx = len(inputs) - 4
types = [_get_type(inputs[i], input_types[i]) for i in range(dtype_idx)]
if inputs[dtype_idx] is not None:
dtype = _convert_dtype_value(inputs[dtype_idx])
elif any([t.startswith("float") for t in types]):
dtype = "float32"
else:
dtype = "int64"
# - if len(inputs) == 5, inputs = [stop, dtype, ...]
# - if len(inputs) == 6, inputs = [start, stop, dtype, ...]
# - if len(inputs) == 7, inputs = [start, stop, step, dtype, ...]
start = _get_value(inputs[0], dtype) if len(inputs) > 5 else _expr.const(0, dtype)
stop = _get_value(inputs[1 if len(inputs) > 5 else 0], dtype)
step = _get_value(inputs[2], dtype) if len(inputs) > 6 else _expr.const(1, dtype)
else:
msg = "Unknown number of arguments (%d) to parse." % (len(inputs))
raise AssertionError(msg)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def squeeze(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 1:
axis = None
else:
# TODO (t-vi): why is the cast to int needed? similarly elsewhere
axis = [int(inputs[1])]
return _op.transform.squeeze(data, axis)
def unsqueeze(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.expand_dims(data, int(axis), 1)
def concatenate(self, inputs, input_types):
def tensor_array_concat(lst, axis):
assert axis == 0, "Tensor array concat supported only for axis 0"
tensor_array, shape = self.convert_to_tensor_array(lst)
concat_shape = (Any(),) + shape[1:]
concat = self.prelude.get_global_var_static("tensor_array_concat", "float32", shape)
concatenated = concat(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", concat_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static(
"tensor_get_data", "float32", concat_shape
)
return get_tensor(concatenated)
data = inputs[0]
axis = inputs[1]
if not isinstance(data, list):
return tensor_array_concat(data, axis)
if isinstance(data, _expr.Expr):
data = [data]
return _op.tensor.concatenate(data, int(axis))
def slice(self, inputs, input_types):
axis_dtype = "int64"
index_size_limit = sys.maxsize
data = inputs[0]
dshape = self.infer_shape(data)
ndim = len(dshape)
dim = int(inputs[1])
stride = inputs[4]
target_begin, is_begin_const = try_infer_value(
inputs[2], lambda ret: ret.astype(np.int).item(0)
)
target_end, is_end_const = try_infer_value(
inputs[3], lambda ret: ret.astype(np.int).item(0)
)
# A fast path when slicing is nop.
if (
isinstance(target_begin, int)
and isinstance(target_end, int)
and target_begin == 0
and target_end >= index_size_limit
and stride == 1
):
return data
if target_begin is None and target_end is None:
return data
# Process begin
begin = [0] * ndim
if target_begin is not None:
begin[dim] = target_begin
if target_begin is not None and not isinstance(begin[dim], int):
tmp = []
for b in begin:
if isinstance(b, int):
tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))
else:
tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))
begin = _op.concatenate(tmp, axis=0)
btype = self.infer_type(begin).dtype
if str(btype) != axis_dtype:
begin = _op.cast(begin, axis_dtype)
# Process end
if isinstance(target_end, int) and target_end >= index_size_limit:
target_end = dshape[dim]
if any([isinstance(d, tvm.tir.Any) for d in dshape]):
end = _op.shape_of(data)
else:
end = dshape
if isinstance(target_end, int):
if isinstance(end, list):
end[dim] = target_end
else:
all_static = True
for i, shape_dim in enumerate(dshape):
if i != dim and isinstance(shape_dim, tvm.tir.Any):
all_static = False
if all_static:
end = list(get_const_tuple(dshape))
end[dim] = target_end
else:
target_end = _expr.const(target_end)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
else:
end = _op.cast(_op.shape_of(data), axis_dtype)
if target_end is not None and not isinstance(target_end, tvm.tir.Any):
ttype = self.infer_type(target_end).dtype
if str(ttype) != axis_dtype:
target_end = _op.cast(target_end, axis_dtype)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
if not isinstance(end, list):
etype = self.infer_type(end).dtype
if str(etype) != axis_dtype:
end = _op.cast(end, axis_dtype)
strides = [1] * ndim
strides[dim] = stride
return _op.transform.strided_slice(
data, begin=begin, end=end, strides=strides, slice_mode="end"
)
def narrow(self, inputs, input_types):
# Inputs are:
# 0 - the tensor to narrow
# 1 - the dimension along which to narrow
# 2 - the starting dimension
# 3 - the distance to the ending dimension
# Lets find the ending dimension
end = self.add(inputs[2:4], input_types[2:4])
stride = 1
slice_input = inputs[:3] + [end, stride]
slice_types = input_types + ["int32"]
return self.slice(slice_input, slice_types)
def split(self, inputs, input_types):
data = inputs[0]
split_size = int(inputs[1])
dim = int(inputs[2])
split_index = split_size
indices = []
while split_index < self.infer_shape(data)[dim]:
indices.append(split_index)
split_index += split_size
return _op.split(data, indices, dim)
def split_with_sizes(self, inputs, input_types):
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if len(sections) == 1:
# a special case used in torchvision detection models
return _expr.TupleWrapper(_expr.Tuple([data]), 1)
split_index = 0
indices = []
for i in range(len(sections) - 1):
index, _ = try_infer_value(sections[i], lambda ret: int(ret))
split_index += index
indices.append(split_index)
return _op.split(data, indices, dim)
def tensor_split(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.tensor_split.html
import torch
if not isinstance(inputs[1], (int, list, tuple, torch.Tensor)):
msg = "indices_or_sections type %s could not be parsed in tensor_split op" % (
type(inputs[1])
)
raise AssertionError(msg)
if isinstance(inputs[1], torch.Tensor) and not (
list(inputs[1].shape) == [] or list(inputs[1].shape) == 1
):
msg = "indices_or_sections must be a zero-dimensional or one-dimensional long tensor"
raise AssertionError(msg)
if isinstance(inputs[1], int) or (
isinstance(inputs[1], torch.Tensor) and list(inputs[1].shape) == []
):
data = inputs[0]
n = int(inputs[1])
dim = int(inputs[2])
split_size = int(self.infer_shape(data)[dim] / n)
split_rest = int(self.infer_shape(data)[dim] % n)
indices = []
split_index = split_size
if split_rest == 0:
for i in range(n - 1):
indices.append(split_index)
split_index += split_size
else:
for i in range(split_rest):
indices.append(split_index + 1)
split_index = (i + 1) * (split_index + 1)
for i in range(n - split_rest - 1):
split_index += split_size
indices.append(split_index)
return _op.split(data, indices, dim)
else:
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if isinstance(sections, tuple):
sections = list(sections)
elif isinstance(sections, torch.Tensor):
sections = sections.cpu().numpy().tolist()
return _op.split(data, sections, dim)
def select(self, inputs, input_types):
data = inputs[0]
dim = int(inputs[1])
index = _wrap_const(inputs[2])
return _op.transform.take(data, index, axis=dim, mode="wrap")
def take(self, inputs, input_types):
data = inputs[0]
indices = _op.cast(inputs[1], "int32")
return _op.transform.take(data, indices=indices, mode="wrap")
def topk(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[2])
is_ascend = not bool(inputs[3])
sort = bool(inputs[4])
if isinstance(inputs[1], _expr.Expr):
k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
else:
k = inputs[1]
if not sort:
msg = "Currently supports only sorted output for topk operator."
raise AssertionError(msg)
outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type="both", dtype="int64")
return outs[0], outs[1]
def reciprocal(self, inputs, input_types):
data = inputs[0]
return _expr.const(1.0, dtype=input_types[0]) / data
def repeat(self, inputs, input_types):
data = inputs[0]
reps = []
for r in inputs[1]:
if isinstance(r, int):
reps.append(r)
else:
reps.append(int(_infer_value(r, {}).numpy()))
return _op.transform.tile(data, reps=reps)
def repeat_interleave(self, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], int):
repeats = inputs[1]
axis = inputs[2]
elif isinstance(inputs[1], _expr.Expr):
if isinstance(inputs[1], _expr.Constant):
repeats = int(inputs[1].data.numpy())
else:
repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
axis = inputs[2]
else:
msg = "Only repeat with one value as repeat is currently supported."
raise AssertionError(msg)
if axis is None: # Flatten the data if no axis is given from torch
data = _op.transform.reshape(data, [-1])
axis = 0
return _op.transform.repeat(data, repeats=repeats, axis=axis)
def addcdiv(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 / t2))
def addcmul(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 * t2))
def where(self, inputs, input_types):
if len(inputs) == 1:
return self.nonzero([inputs[0], True], input_types)
cond = inputs[0]
x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])
return _op.where(cond, x, y)
def full_impl(self, data, fill_value, dtype):
size = []
need_reshape = False
new_shape = []
for dim in data:
if isinstance(dim, _expr.Expr):
if isinstance(dim, _expr.Constant):
dim = int(dim.data.numpy())
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
else:
dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)
new_shape.append(dim)
if success:
if isinstance(size, list):
size.append(dim)
else:
size = None
need_reshape = True
else:
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
if size is None:
tmp = []
for dim in data:
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)
if not isinstance(fill_value, _expr.Constant):
fill_value = _expr.const(fill_value, dtype=dtype)
out = _op.full(fill_value, size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
def ones(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in ones op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 1, dtype)
def ones_like(self, inputs, input_types):
data = inputs[0]
out = _op.ones_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] != dtype:
out = _op.cast(out, dtype)
return out
def new_ones(self, inputs, input_types):
size = inputs[1]
import torch
if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)):
msg = "Data type %s could not be parsed in ones op" % (type(size))
raise AssertionError(msg)
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
else:
dtype = input_types[0]
return self.full_impl(size, 1, dtype)
def zeros(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in zeros op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 0, dtype)
def zero_(self, inputs, input_types):
data = inputs[0]
return self.full_impl(self.infer_shape(data), 0, input_types[0])
def zeros_like(self, inputs, input_types):
data = inputs[0]
out = _op.zeros_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def full(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in full op" % (type(data))
raise AssertionError(msg)
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
return self.full_impl(data, fill_value, dtype)
def full_like(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
out = _op.full_like(data, _expr.const(fill_value))
# If the input and the output datatype is different, do a cast
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def new_full(self, inputs, input_types):
data = inputs[1]
fill_value = inputs[2]
import torch
if not isinstance(data, (_expr.Expr, list, tuple, torch.Size)):
msg = "Data type %s could not be parsed in full op" % (type(data))
raise AssertionError(msg)
if inputs[3] is not None: # dtype given
dtype = _convert_dtype_value(inputs[3])
else:
# if dtype is None, use the dtype of the input tensor
dtype = self.infer_type(input[0])
return self.full_impl(data, fill_value, dtype)
def fill_(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
if not isinstance(fill_value, (bool, int, float, complex)):
fill_value = fold_constant(fill_value)
return self.full_impl(self.infer_shape(data), fill_value, input_types[0])
def linspace(self, inputs, input_types):
start = inputs[0]
stop = inputs[1]
step = inputs[2]
# Find the spacing between values as step
if step != 1:
step = (stop - start) / (step - 1)
stop = stop + step
else:
stop = start + step
if inputs[3] is None:
import torch
dtype = _convert_data_type(str(torch.get_default_dtype()))
else:
dtype = _convert_dtype_value(inputs[3])
start = _create_typed_const(start, dtype)
stop = _create_typed_const(stop, dtype)
step = _create_typed_const(step, dtype)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def relu(self, inputs, input_types):
data = inputs[0]
if self.is_quantized_tensor(data):
assert len(inputs) == 3, "Input quant param not found in op inputs"
input_zero_point = _expr.const(inputs[2], dtype="int32")
return qnn_torch.quantized_relu(data, input_zero_point)
return _op.nn.relu(data)
def relu6(self, inputs, input_types):
data = inputs[0]
return _op.tensor.clip(data, 0.0, 6.0)
def prelu(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU
data = inputs[0]
dim = self.get_dims(data)
ndims = len(dim)
axis = 0 if ndims == 1 else 1
alpha = _op.broadcast_to(inputs[1], (dim[axis]))
return _op.nn.prelu(data, alpha, axis)
def leaky_relu(self, inputs, input_types):
data = inputs[0]
alpha = float(inputs[1])
return _op.nn.leaky_relu(data, alpha)
def elu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(-float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
def celu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(
_expr.const(1, dtype=dtype) - _op.exp(data / alpha)
) + _op.nn.relu(data)
def gelu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
# gelu is data * normcdf(data)
# normcdf expressed as erf because we don't currently have that intrinsic
# note that there is also a fastgelu variant approximating normcdf
# with tanh and third order polynomials, but this is "true" gelu
return data * (
_expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5**0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
)
def selu(self, inputs, input_types):
data = inputs[0]
# https://pytorch.org/docs/stable/nn.html#selu
dtype = input_types[0]
alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)
gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)
return gamma * (
alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
)
def silu(self, inputs, input_types):
data = inputs[0]
return data * _op.tensor.sigmoid(data)
def glu(self, inputs, input_types):
"""
Applies the gated linear unit function GLU(a,b)= a * sigmoid(b)
where a is the first half of the input matrices and b is the second half.
Link: https://pytorch.org/docs/stable/generated/torch.nn.GLU.html
"""
data = inputs[0]
dim = inputs[1]
relay_tup = _op.transform.split(data, 2, dim)
return relay_tup[0] * _op.tensor.sigmoid(relay_tup[1])
def log_sigmoid(self, inputs, input_types):
data = inputs[0]
mn = _op.minimum(_op.const(0, dtype=input_types[0]), data)
z = _op.exp(-_op.abs(data))
return mn - self.log1p([z], input_types)
def cross_entropy_loss_with_logits(self, inputs, input_types):
input = inputs[0]
target = inputs[1]
weights = inputs[2]
reduction = inputs[3]
ignore_index = inputs[4]
label_smoothing = inputs[5]
input_shape = self.infer_shape(input)
target_shape = self.infer_shape(target)
if input_shape != target_shape:
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
num_class = self.infer_shape(input)[1]
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(
_op.nn.log_softmax(input), target, weights, reduction, ignore_index
)
assert reduction == 1, "reduction not supported in cross_entropy_loss"
assert ignore_index == -100, "ignore_index not supported in cross_entropy_loss"
assert label_smoothing == 0.0, "label_smoothing not supported in cross_entropy_loss"
assert weights is None, "weight not supported in cross_entropy_loss"
return _op.nn.cross_entropy_with_logits(_op.nn.log_softmax(input), target)
def l1_loss(self, inputs, input_types):
assert len(inputs) == 3
[predictions, targets, reduction] = inputs
delta = _op.abs(_op.subtract(predictions, targets))
if reduction == 0:
# reduction = "none"
return delta
elif reduction == 1:
# reduction = "mean"
return _op.mean(delta)
else:
# reduction = "sum"
return _op.sum(delta)
def mse_loss(self, inputs, input_types):
assert len(inputs) == 3
[predictions, targets, reduction] = inputs
delta = _op.subtract(predictions, targets)
delta = _op.power(delta, _expr.const(2, input_types[0]))
if reduction == 0:
# reduction = "none"
return delta
elif reduction == 1:
# reduction = "mean"
return _op.mean(delta)
else:
# reduction = "sum"
return _op.sum(delta)
def hard_sigmoid(self, inputs, input_types):
def _relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def func(x):
return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)
if self.is_quantized_tensor(inputs[0]):
input_scale = _expr.const(inputs[1])
input_zero_point = _expr.const(inputs[2])
# PyTorch seems to use the following output qparams, but accuracy
# is broken if we use this.
# TODO(masahi): Revisit this parameter choice
#
# Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# output_scale = _expr.const(0.00390625) # 1.0 / 2^8
# output_zero_point = _expr.const(-128)
output_scale = input_scale
output_zero_point = input_zero_point
data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
out = func(data)
return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype="uint8")
return func(inputs[0])
def hard_swish(self, inputs, input_types):
data = inputs[0]
return data * self.hard_sigmoid(inputs, input_types)
def adaptive_avg_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
def func(x):
return op(x, output_size=output_size)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
def adaptive_max_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
# returns dummy indices too
return op(data, output_size=output_size), None
@staticmethod
def convert_const_list(data):
if isinstance(data, list):
for i, _ in enumerate(data):
if isinstance(data[i], _expr.Expr):
data[i] = int(_infer_value_simulated(data[i], {}).numpy())
return data
def maxpool_2d(self, inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool2d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCHW",
ceil_mode=ceil_mode,
)
def maxpool_2d_with_indices(self, inputs, input_types):
# returns dummy indices too
return self.maxpool_2d(inputs, input_types), None
def maxpool_1d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool1d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCW",
ceil_mode=ceil_mode,
)
def maxpool_3d(self, inputs, input_types):
data = inputs[0]
need_squeeze = False
if len(self.get_dims(data)) == 4:
need_squeeze = True
data = _op.expand_dims(data, 0)
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
res = _op.nn.max_pool3d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
)
return res if not need_squeeze else _op.squeeze(res, [0])
def hardtanh(self, inputs, input_types):
a = inputs[0]
tanh_min = float(inputs[1])
tanh_max = float(inputs[2])
return _op.tensor.clip(a, tanh_min, tanh_max)
def convolution(self, inputs, input_types):
# Use transpose or normal
use_transpose = True if inputs[6] == 1 else False
data = inputs[0]
weight = inputs[1]
bias = inputs[2]
strides = tuple(inputs[3])
padding = tuple(inputs[4])
dilation = tuple(inputs[5])
if isinstance(weight, _expr.Expr):
inferred_shape = self.infer_shape(weight)
weight_shape = []
for infer in inferred_shape:
weight_shape.append(infer)
else:
msg = "Data type %s could not be parsed in conv op" % (type(weight))
raise AssertionError(msg)
groups = int(inputs[8])
if use_transpose:
channels = weight_shape[1] * groups
in_channels = weight_shape[0]
else:
channels = weight_shape[0]
in_channels = weight_shape[1]
# Check if this is depth wise convolution
# We need to reshape weight so that Relay could recognize this is depth wise
# weight_shape[1] is always in_channels // groups
# For depthwise, in_channels == groups, so weight_shape[1] == 1
# If groups > 1 but weight_shape[1] != 1, this is group convolution
if groups > 1 and in_channels == 1:
channel_multiplier = channels // groups
new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])
weight = _op.transform.reshape(weight, new_weight_shape)
kernel_size = weight_shape[2:]
use_bias = isinstance(bias, _expr.Expr)
# We are trying to invoke various relay operations through a single conv_op variable.
# However the function signatures for some operations have additional attributes so we
# pass these in along with the standard ones.
additional_arguments = dict()
if use_transpose:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d_transpose
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d_transpose
else:
conv_op = _op.nn.conv1d_transpose
output_padding = tuple(inputs[7])
additional_arguments["output_padding"] = output_padding
else:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d
else:
conv_op = _op.nn.conv1d
if len(kernel_size) == 3:
data_layout = "NCDHW"
kernel_layout = "OIDHW"
elif len(kernel_size) == 2:
data_layout = "NCHW"
kernel_layout = "OIHW"
if use_transpose:
# Transposed convolutions have IOHW layout.
kernel_layout = "IOHW"
else:
data_layout = "NCW"
kernel_layout = "OIW"
# Conv1d does not currently support grouped convolution so we convert it to conv2d
is_grouped_conv1d = False
if groups > 1 and len(kernel_size) == 1 and not use_transpose:
is_grouped_conv1d = True
conv_op = _op.nn.conv2d
kernel_size = [1] + kernel_size
strides = (1,) + strides
padding = (0,) + padding
dilation = (1,) + dilation
data = _op.expand_dims(data, axis=2)
weight = _op.expand_dims(weight, axis=2)
data_layout = "NCHW"
kernel_layout = "OIHW"
conv_out = conv_op(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout="",
out_dtype="",
**additional_arguments,
)
if use_bias:
res = _op.nn.bias_add(conv_out, bias)
else:
res = conv_out
if is_grouped_conv1d:
# Because we conducted grouped conv1d convolution through conv2d we must
# squeeze the output to get the correct result.
res = _op.squeeze(res, axis=[2])
return res
def softmax(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
if isinstance(axis, str):
axis = int(axis)
return _op.nn.softmax(data, axis=axis)
def threshold(self, inputs, input_types):
data = inputs[0]
return _op.nn.relu(data)
def contiguous(self, inputs, input_types):
return inputs[0]
def batch_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
moving_mean = inputs[3]
moving_var = inputs[4]
epsilon = float(inputs[7])
return _op.nn.batch_norm(
data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=epsilon,
center=center,
scale=scale,
)[0]
def instance_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
epsilon = float(inputs[7])
return _op.nn.instance_norm(
data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale
)
def get_dims(self, data):
import torch
if isinstance(data, _expr.Expr):
dims = self.infer_shape(data)
elif isinstance(data, list):
dims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
dims = data.shape
else:
msg = "Data type %s could not be parsed" % type(data)
raise AssertionError(msg)
return dims
def layer_norm(self, inputs, input_types):
data = inputs[0]
ndims = len(self.get_dims(inputs[1]))
assert ndims == 1, "Support only normalization over last one dimension."
return _op.nn.layer_norm(
data,
gamma=inputs[2],
beta=inputs[3],
axis=-1,
epsilon=float(inputs[4]),
center=True,
scale=True,
)
def group_norm(self, inputs, input_types):
data = inputs[0]
gamma = inputs[2]
beta = inputs[3]
num_groups = inputs[1]
epsilon = float(inputs[4])
return _op.nn.group_norm(
data,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
def transpose(self, inputs, input_types):
data = inputs[0]
import torch
if isinstance(data, _expr.Expr):
ndims = len(self.infer_shape_with_prelude(data))
elif isinstance(data, list):
ndims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
ndims = data.shape
else:
msg = "Data type %s could not be parsed in transpose op" % (type(data))
raise AssertionError(msg)
if isinstance(data, tvm.runtime.NDArray):
ndims = len(data.shape)
axes = list(range(ndims))
num_inputs = len(inputs)
if num_inputs == 1:
if ndims >= 2:
axes[-1] = ndims - 2
axes[-2] = ndims - 1
if not isinstance(data, _expr.Expr):
data = _expr.const(data)
elif num_inputs == 3:
parse = lambda i: ndims * (i < 0) + i
src, dst = [parse(int(inputs[i])) for i in [1, 2]]
axes[src] = dst
axes[dst] = src
else:
axes = inputs[1]
return _op.transform.transpose(data, axes)
def numpy_T(self, inputs, input_types):
data = inputs[0]
shape = self.infer_shape(data)
if len(shape) != 2:
logger.warning(
"The use of Tensor.T on tensors of dimensions != 2 is deprecated"
"and will be removed in a future release of PyTorch."
)
return _op.transform.transpose(data)
def flatten(self, inputs, input_types):
data = inputs[0]
start = int(inputs[1])
end = int(inputs[2])
dshape = get_const_tuple(self.infer_shape_with_prelude(data))
ndim = len(dshape)
if start < 0:
start += ndim
if end < 0:
end += ndim
assert start <= end, "start dim cannot come after end dim"
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(data, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
return out
def addmm(self, inputs, input_types):
input_mat = inputs[0]
mat1 = inputs[1]
data_type = input_types[1]
mat2 = inputs[2]
beta = inputs[3]
alpha = inputs[4]
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _create_typed_const(alpha, data_type)
mat1 *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _create_typed_const(beta, data_type)
mat2 *= beta
transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])
units = self.infer_shape(transposed_mat2)[0]
dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)
return dense_out + input_mat
def size(self, inputs, input_types):
shape = self.infer_shape_with_prelude(inputs[0])
axis = None
if len(inputs) > 1:
axis = int(inputs[1])
if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):
if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):
shape_dynamic = _op.shape_of(inputs[0], dtype="int32")
if axis is not None:
return _op.take(shape_dynamic, _expr.const(axis), 0)
return shape_dynamic
if axis is not None:
return _expr.const(shape[axis])
return _expr.const(shape)
def numtotensor(self, inputs, input_types):
val = inputs[0]
dtype = input_types[0]
if isinstance(val, _expr.Expr):
return val
if isinstance(val, tvm.tir.IntImm):
val = val.__int__()
dtype = int
arr = val * np.ones([]).astype(dtype)
return arr
def tensortonum(self, inputs, input_types):
return inputs[0]
def view(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 3:
shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]
else:
if isinstance(inputs[1], list):
shape_inp = inputs[1]
else:
shape_inp = self.infer_shape(inputs[1])
new_shape = shape_inp
for i, shape in enumerate(shape_inp):
if isinstance(shape, _expr.Expr):
val = _infer_value_simulated(shape, {})
new_shape[i] = val.numpy().item(0)
return _op.transform.reshape(data, new_shape)
def reshape(self, inputs, input_types):
data = inputs[0]
new_shape = inputs[1]
tmp_shape = []
is_dyn = False
for s in new_shape:
if isinstance(s, _expr.Constant):
tmp_shape.append(int(s.data.numpy()))
elif isinstance(s, _expr.Expr):
dim, success = try_infer_value(s, lambda ret: int(ret))
tmp_shape.append(dim)
if not success:
is_dyn = True
else:
tmp_shape.append(s)
if is_dyn:
new_shape = []
for i, s in enumerate(tmp_shape):
if not isinstance(s, _expr.Expr):
s = _expr.const(s, "int64")
else:
s = _op.cast(s, "int64")
new_shape.append(_op.expand_dims(s, axis=0))
new_shape = _op.concatenate(new_shape, axis=0)
else:
new_shape = tmp_shape
return _op.transform.reshape(data, new_shape)
def reshape_as(self, inputs, input_types):
data = inputs[0]
new_shape = self.infer_shape(inputs[1])
return _op.transform.reshape(data, new_shape)
def pixel_shuffle(self, inputs, input_types):
data = inputs[0]
upscale_factor = inputs[1]
upscale_squared = upscale_factor * upscale_factor
b, c, h, w = self.infer_shape(data)
assert (
c % upscale_squared == 0
), "input channel should be divisible by square of upscale_factor"
ndims = len(self.infer_shape_with_prelude(data))
axes = list(range(ndims))
num_inputs = len(inputs)
oc = c // upscale_squared
oh = h * upscale_factor
ow = w * upscale_factor
new_shape = [b, oc, upscale_factor, upscale_factor, h, w]
out_shape = [b, oc, oh, ow]
data = _op.transform.reshape(data, new_shape)
# The data will be transposed to
# [b, oc, h, upscale_factor, w, upscale_factor]
# for further reshape
axes = [0, 1, 4, 2, 5, 3]
data = _op.transform.transpose(data, axes)
return _op.transform.reshape(data, out_shape)
def clone(self, inputs, input_types):
data = inputs[0]
return _op.tensor.copy(data)
def log_softmax(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return _op.nn.log_softmax(data, axis)
def sigmoid(self, inputs, input_types):
data = inputs[0]
def func(x):
return _op.tensor.sigmoid(x)
if self.is_quantized_tensor(data):
assert len(inputs) == 5, "Input/Ouput quant param not found in op inputs"
return qnn_torch.quantized_sigmoid(inputs)
return func(data)
def softplus(self, inputs, input_types):
dtype = input_types[0]
beta = _expr.const(float(inputs[1]), dtype=dtype)
return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta
def make_avg_pool(self, dim):
def avg_pool(inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
ceil_mode = int(inputs[4])
count_include_pad = int(inputs[5])
def func(x):
if dim == 1:
return _op.nn.avg_pool1d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1,),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 2:
return _op.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 3:
return _op.nn.avg_pool3d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
msg = "Average Pooling dimension should be between 1 and 3"
raise RuntimeError(msg)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
return avg_pool
def linear(self, inputs, input_types):
# https://pytorch.org/docs/stable/nn.functional.html#linear
# 0 - input
# 1 - weight
bias = inputs[2]
a_shape = self.infer_shape_with_prelude(inputs[0])
b_shape = self.infer_shape_with_prelude(inputs[1])
if len(a_shape) == 2 and len(b_shape) == 2:
mm_out = _op.nn.dense(inputs[0], inputs[1])
elif len(b_shape) == 1:
mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])
else:
mm_out = self.matmul(
[inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]
)
if isinstance(bias, _expr.Expr):
bias_ndims = len(self.infer_shape_with_prelude(bias))
if bias_ndims == 1:
return _op.nn.bias_add(mm_out, bias, axis=-1)
mm_dtype = self.infer_type_with_prelude(mm_out).dtype
return self.add([mm_out, bias], [mm_dtype, input_types[2]])
return mm_out
def dropout(self, inputs, input_types):
data = inputs[0]
rate = float(inputs[1])
return _op.nn.dropout(data, rate)
def make_reduce(self, name):
def reduce(inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False
if isinstance(inputs[1], int):
axis = int(inputs[1])
elif _is_int_seq(inputs[1]):
axis = inputs[1]
else:
axis = list(self.infer_shape(inputs[1]))
keepdims = bool(inputs[2])
return get_relay_op(name)(data, axis=axis, keepdims=keepdims)
return reduce
def norm(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
axis = None
keepdims = False
if len(inputs) > 3:
axis = inputs[2]
keepdims = bool(inputs[3])
order = inputs[1]
if order == np.inf:
return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)
elif order == np.NINF:
return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)
else:
reci_order = _expr.const(1.0 / order, dtype=dtype)
order = _expr.const(order)
return _op.power(
_op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),
reci_order,
)
def frobenius_norm(self, inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2:
axis = inputs[1] if len(inputs[1]) > 0 else None
keepdims = bool(inputs[2])
return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))
def std(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def variance(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def mean(self, inputs, input_types):
data = inputs[0]
if inputs[1]:
axis = inputs[1]
else:
axis = None
if len(inputs) > 2 and inputs[2]:
keepdims = int(inputs[2])
else:
keepdims = False
if len(inputs) > 3 and inputs[3]:
exclude = int(inputs[3])
else:
exclude = False
def func(x):
return _op.mean(x, axis, keepdims, exclude)
if self.is_quantized_tensor(data):
assert len(inputs) == 6, "Input quant param not found in op inputs"
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
def var_mean(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
m, v = _op.reduce.mean_variance(data, axis, keepdims, False, unbiased)
return v, m
def chunk(self, inputs, input_types):
data = inputs[0]
num_chunks = int(inputs[1])
axis = int(inputs[2])
if isinstance(data, _expr.Expr):
inferred_shape = self.infer_shape_with_prelude(data)
shape = []
for infer in inferred_shape:
shape.append(infer)
dim = int(shape[axis])
if dim % num_chunks:
unif_size = int(dim / (num_chunks - 1))
else:
unif_size = int(dim / num_chunks)
indeces = []
for i in range(unif_size, dim, unif_size):
indeces.append(i)
return _op.split(data, indeces, axis)
def matmul(self, inputs, input_types):
inputs_0 = inputs[0]
inputs_1 = inputs[1]
# Need to check input shape as batch matmul must be supported.
a_shape = self.infer_shape_with_prelude(inputs_0)
b_shape = self.infer_shape_with_prelude(inputs_1)
# When performing a batch matmul, we need to properly handle N-dim shapes.
if len(a_shape) > 2 and len(b_shape) > 2:
# Convert a into a 3 dimensional tensors.
need_reshape_output = False
if len(a_shape) != 3:
a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]])
need_reshape_output = True
else:
a = inputs_0
# Transpose matrix dimensions of b.
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
b = _op.transpose(inputs_1, trans_axes)
# Convert b into a 3 dimensional tensor. Note that the last two dimensions
# are transposed.
if len(b_shape) != 3:
b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Reshape output to original dimensions.
if need_reshape_output:
return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])
return output
elif len(a_shape) > 2:
inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]])
elif len(a_shape) == 1:
return _op.squeeze(_op.nn.matmul(_op.expand_dims(inputs_0, axis=0), inputs_1), axis=[0])
if len(b_shape) > 2:
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]])
elif len(b_shape) == 2:
input_1 = _op.transpose(inputs_1, axes=(1, 0))
elif len(b_shape) == 1:
input_1 = _op.expand_dims(inputs_1, 0, 1)
out = _op.nn.dense(inputs_0, input_1)
if len(b_shape) == 1:
out = _op.squeeze(out, axis=[-1])
# Reshape output into a N dimensional tensor when a or b dim > 2
if len(a_shape) > 2:
out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]])
elif len(b_shape) > 2:
out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]])
out = _op.reshape(
_op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]]
)
return out
def expand(self, inputs, input_types):
data_in = inputs[0]
shape = list(self.infer_shape(data_in))
ndims = len(shape)
sizes = inputs[1]
out = data_in
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)
shape = [1] * num_newaxis + shape
for i in range(out_dims):
if sizes[i] != -1 and shape[i] == 1:
if not isinstance(sizes[i], int):
sizes[i] = int(_infer_value(sizes[i], {}).numpy())
out = _op.repeat(out, sizes[i], axis=i)
return out
def int(self, inputs, input_types):
if isinstance(inputs[0], _expr.Expr):
return inputs[0]
return int(inputs[0])
def identity(self, inputs, input_types):
return inputs[0]
def none(self, inputs, input_types):
return None
def pad_common(self, mode, pad_value, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], list):
pad_list = inputs[1]
else:
pad_list = list(self.infer_shape(inputs[1]))
# initialize paddings based on input len
pad_len = len(self.infer_shape(data)) * 2
paddings = [0] * pad_len
if len(pad_list) >= 2:
paddings[-1] = pad_list[1]
paddings[-2] = pad_list[0]
if len(pad_list) >= 4:
paddings[-3] = pad_list[3]
paddings[-4] = pad_list[2]
if len(pad_list) >= 6:
paddings[-5] = pad_list[5]
paddings[-6] = pad_list[4]
# group into tuple of 2 ints
paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]
const_paddings = []
non_zero_found = False
for pad in paddings:
const_paddings.append([])
for p in pad:
if isinstance(p, _expr.Expr):
p = int(_infer_value(p, {}).numpy())
elif not isinstance(p, int):
raise NotImplementedError("pad width should be int/expr")
const_paddings[-1].append(p)
if p != 0:
non_zero_found = True
if not non_zero_found:
return data
elif mode == "constant":
return _op.nn.pad(data, const_paddings, pad_value=pad_value, pad_mode=mode)
else:
return _op.nn.pad(data, const_paddings, pad_mode=mode)
def pad(self, inputs, input_types):
# mode: Optional default "constant"
if len(inputs) > 2 and inputs[2] is not None:
mode = inputs[2]
else:
mode = "constant"
# pad_value: Optional default 0
if len(inputs) == 4 and inputs[3] is not None:
pad_value = inputs[3]
else:
pad_value = 0
# replicate is edge in TVM's padding mode
if mode == "replicate":
mode = "edge"
elif mode == "circular":
raise ValueError("circular mode for torch.nn.functional.pad are not supported in TVM")
return self.pad_common(mode, pad_value, inputs, input_types)
def constant_pad_nd(self, inputs, input_types):
return self.pad_common("constant", _expr.const(inputs[2]), inputs, input_types)
def reflection_pad1d(self, inputs, input_types):
return self.pad_common("reflect", 0, inputs, input_types)
def reflection_pad2d(self, inputs, input_types):
return self.pad_common("reflect", 0, inputs, input_types)
def replication_pad1d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def replication_pad2d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def replication_pad3d(self, inputs, input_types):
return self.pad_common("edge", 0, inputs, input_types)
def clamp_common(self, data, min=None, max=None):
def get_v(v, default_v):
if isinstance(v, _expr.Constant):
return float(v.data.numpy())
if isinstance(v, _expr.Expr):
infer_v, success = try_infer_value(v, lambda ret: float(ret))
if success:
return infer_v
if v is not None:
return v
return default_v
dtype = self.infer_type(data).dtype
type_info = np.finfo(dtype) if "float" in dtype else np.iinfo(dtype)
# TODO(masahi): Properly handle inf in a one-way clamp case.
if min is not None and max is not None:
amin = get_v(min, type_info.min)
amax = get_v(max, type_info.max)
elif min is not None:
amin = get_v(min, type_info.min)
amax = type_info.max
else:
amin = type_info.min
amax = get_v(max, type_info.max)
return _op.clip(data, amin, amax)
def clamp(self, inputs, _):
return self.clamp_common(inputs[0], min=inputs[1], max=inputs[2])
def clamp_min(self, inputs, input_types):
return self.clamp_common(inputs[0], min=inputs[1])
def clamp_max(self, inputs, input_types):
return self.clamp_common(inputs[0], max=inputs[1])
def to(self, inputs, input_types):
data = inputs[0]
dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]
# special handling for aten::to(data, 6, _, _, _) case
# 6 means dtype = float
# this happens when converting upsampling with scale factor
cast_map = {
5: "float16",
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}
ret = data
if isinstance(data, _expr.Expr):
actual_dtype = str(self.infer_type(data).dtype)
if dtype in cast_map and cast_map[dtype] != actual_dtype:
ret = _op.cast(data, cast_map[dtype])
elif dtype in cast_map:
ret = cast_func[dtype](data)
return ret
def get_upsample_out_size(self, inputs, method):
# This assumes a static shape
out_size = []
if inputs[1] is not None:
for size in inputs[1]:
if not isinstance(size, int):
out_size.append(int(_infer_value(size, {}).numpy()))
else:
out_size.append(size)
else:
scale_index = 3 if method != "nearest_neighbor" else 2
scales = inputs[scale_index]
assert scales is not None, "neither out size nor scale provided"
assert isinstance(scales, list)
ishape = self.infer_shape(inputs[0])
for i, scale in enumerate(scales):
out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))
return out_size
def make_upsample(self, method):
def upsample(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method != "nearest_neighbor":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
def func(x):
return _op.image.resize2d(
x, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
if self.is_quantized_tensor(data):
# input qparams are manually appended by us
assert isinstance(inputs[-2], float)
assert isinstance(inputs[-1], int)
input_scale = _expr.const(inputs[-2])
input_zero_point = _expr.const(inputs[-1])
# currently piggy backs to fp32, it gets identical output as torch
return qnn_torch.apply_with_fp32_fallback(data, input_scale, input_zero_point, func)
return func(data)
return upsample
def make_upsample3d(self, method):
def upsample3d(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method == "linear":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize3d(data, out_size, None, "NCDHW", method, coord_trans)
return upsample3d
def expand_as(self, inputs, input_types):
target = inputs[1]
t0 = self.infer_type(inputs[0]).dtype
t1 = self.infer_type(inputs[1]).dtype
if str(t0) != str(t1):
target = _op.cast(target, t0)
return _op.broadcast_to_like(inputs[0], target)
def broadcast_tensors(self, inputs, input_types):
tensor_list = inputs[0]
import torch
res_shape = list(torch.broadcast_shapes(*[self.infer_shape(t) for t in tensor_list]))
return [_op.broadcast_to(tensor, res_shape) for tensor in tensor_list]
def Bool(self, inputs, input_types):
assert len(inputs) == 1
return inputs[0]
def Float(self, inputs, input_types):
assert len(inputs) == 1
return _op.cast(inputs[0], "float32")
def bitwise_not(self, inputs, input_types):
data = inputs[0]
# The input tensor must be of integral or Boolean types.
# For bool tensors, it computes the logical NOT
if input_types[0] == "bool":
out = _op.logical_not(_op.cast(data, "bool"))
else:
out = _op.bitwise_not(_op.cast(data, "int"))
return out
def bitwise_xor(self, inputs, input_types):
lhs = inputs[0]
rhs = inputs[1]
lhs = _op.cast(lhs, "bool") if input_types[0] == "bool" else _op.cast(lhs, "int")
rhs = _op.cast(rhs, "bool") if input_types[1] == "bool" else _op.cast(rhs, "int")
return _op.bitwise_xor(lhs, rhs)
def logical_not(self, inputs, input_types):
data = _wrap_const(inputs[0])
return _op.logical_not(_op.cast(data, "bool"))
def logical_xor(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_xor(lhs, rhs)
def list_getitem(self, inputs, input_types):
return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))
def list_len(self, inputs, input_types):
return self.prelude.length(inputs[0])
def type_as(self, inputs, input_types):
assert len(inputs) == 2
assert len(input_types) == 2
return _op.cast(inputs[0], input_types[1])
def gather(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
indices = inputs[2]
return _op.gather(data, axis, indices)
def add(self, inputs, input_types):
# add_ is overloaded for tensor add and list concat
if input_types[0] == "ListType":
return self.prelude.concat(inputs[0], inputs[1])
return self.make_elemwise("add")(inputs, input_types)
def tensor_array_stack(self, inputs, input_types):
dim = inputs[1]
assert dim == 0, "stacking on a dynamic tensor list only supported on a first axis"
tensor_array, shape = self.convert_to_tensor_array(inputs[0])
stacked_shape = (Any(),) + shape
stack = self.prelude.get_global_var_static("tensor_array_stack", "float32", shape)
stacked = stack(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", stacked_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static("tensor_get_data", "float32", stacked_shape)
return get_tensor(stacked)
def stack(self, inputs, input_types):
if isinstance(inputs[0], list):
# a static python list of tensors
dim = inputs[1]
return _op.stack(inputs[0], dim)
else:
# List ADT case
assert isinstance(inputs[0], _expr.Expr)
ty = self.infer_type_with_prelude(inputs[0])
list_ty = self.prelude.mod.get_global_type_var("List")
msg = "The input list is expected to be List ADT"
assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg
return self.tensor_array_stack(inputs, input_types)
def sub(self, inputs, input_types):
if len(inputs) == 3:
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, alpha * data1)
else:
data0, data1 = self.pytorch_promote_types(inputs, input_types)
return get_relay_op("subtract")(data0, data1)
def rsub(self, inputs, input_types):
data0, data1, alpha = self.pytorch_promote_types(inputs, input_types)
# note: rsub means data0 and data1 swap places
return get_relay_op("subtract")(data1, alpha * data0)
def embedding(self, inputs, input_types):
weight = inputs[0]
indices = inputs[1]
return _op.take(weight, indices.astype("int32"), axis=0)
def one_hot(self, inputs, input_types):
indices = inputs[0].astype("int32")
num_classes = inputs[1]
if num_classes == -1:
msg = "Inferring the number of classes is not yet supported."
raise NotImplementedError(msg)
dtype = "int32"
on_value = tvm.relay.const(1.0, dtype)
off_value = tvm.relay.const(0.0, dtype)
return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)
def index(self, inputs, input_types):
data = inputs[0]
indices_list = []
for indices in inputs[1]:
if self.infer_type(indices).dtype == "bool":
# adv_index does not support a mask as the index tensor (it will treat 0/1 as
# an index rather than a flag).
# So we use argwhere to turn the mask into indices, which will also take care
# of the dynamism in the indexing by mask.
indices_list.append(_op.squeeze(_op.transform.argwhere(indices), axis=[1]))
else:
indices_list.append(indices)
return _op.adv_index([data] + indices_list)
def meshgrid(self, inputs, input_types):
data = inputs[0]
return _op.meshgrid(data, indexing="ij")
def nms(self, inputs, input_types):
boxes = inputs[0]
scores = inputs[1]
iou_threshold = inputs[2]
# TVM NMS assumes score > 0
scores = scores - _op.min(scores) + _op.const(1.0)
num_boxes = _op.shape_of(scores)
# PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count
indices = _op.transform.arange(_op.squeeze(num_boxes), dtype="int32")
indices = _op.expand_dims(indices, 0, 1)
# Generate data with shape (1, num_anchors, 5)
scores = AttrCvt(op_name="expand_dims", extras={"axis": -1, "num_newaxis": 1})([scores], {})
data = _op.concatenate([scores, boxes], -1)
data = _op.expand_dims(data, 0, 1)
# Perform Non-Maximum Suppression,
# PyTorch NMS doesn't have parameter top_k and max_output_size
score_index = 0
top_k = max_out_size = -1
nms_ret = get_relay_op("non_max_suppression")(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=top_k,
coord_start=1,
score_index=score_index,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
# squeeze the two outputs of nms for strided_slice
size = get_relay_op("squeeze")(nms_ret[1], axis=[1])
data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0])
# strided slice to get the dynamic result
ret = get_relay_op("strided_slice")(
data_slice, begin=_expr.const([0]), end=size, slice_mode="size"
)
# in torchvision, indices from nms are int64
return _op.cast(ret, "int64")
def logsumexp(self, inputs, input_types):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim_list = inputs[1]
keepdim = inputs[2] if len(inputs) > 2 else False
# dim is output of prim::ListConstruct, even if it is int in python code
assert isinstance(dim_list, list), "dim is expected to be a list"
return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)
def roi_align(self, inputs, input_types):
data = inputs[0]
boxes = inputs[1]
output_size = (inputs[3], inputs[4])
spatial_scale = inputs[2]
sample_ratio = inputs[5]
aligned = False if len(inputs) < 7 else inputs[6]
if aligned:
boxes -= _expr.const(0.5 / spatial_scale)
return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)
def deform_conv2d(self, inputs, input_types):
data = inputs[0]
weight = inputs[1]
offset = inputs[2]
if len(inputs) > 12:
strides_offset = 5
bias = inputs[4]
logger.warning("mask argument in deformable conv2d is not supported and ignored")
else:
strides_offset = 4
bias = inputs[3]
strides = (inputs[strides_offset], inputs[strides_offset + 1])
padding = (inputs[strides_offset + 2], inputs[strides_offset + 3])
dilation = (inputs[strides_offset + 4], inputs[strides_offset + 5])
groups = inputs[strides_offset + 6]
deformable_groups = inputs[strides_offset + 7]
weight_shape = self.infer_shape(weight)
output_channels = weight_shape[0]
kernel_size = (weight_shape[2], weight_shape[3])
conv_out = _op.nn.deformable_conv2d(
data,
offset,
weight,
strides,
padding,
dilation,
deformable_groups,
groups,
output_channels,
kernel_size,
)
return _op.nn.bias_add(conv_out, bias)
def stft(self, inputs, input_types):
data = inputs[0]
n_fft = inputs[1]
hop_length = inputs[2]
win_length = inputs[3]
window = inputs[4]
normalized = inputs[5]
onesided = inputs[6]
return _op.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
def unbind(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return unbind(data, axis)
def shape_as_tensor(self, inputs, input_types):
is_symbolic_shape = False
input_shape = self.infer_shape(inputs[0], self.prelude.mod)
for axis in input_shape:
if not isinstance(axis, (int, tvm.tir.IntImm)):
is_symbolic_shape = True
break
if is_symbolic_shape:
ret = _op.shape_of(inputs[0], dtype="int64")
else:
ret = _expr.const(np.array(input_shape), dtype="int64")
return ret
def logical_and(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_and(lhs, rhs)
def nonzero(self, inputs, input_types, is_numpy_style=False):
data = inputs[0]
ret = _op.transform.argwhere(data)
if is_numpy_style or (len(inputs) > 1 and inputs[1]):
return unbind(ret, 1)
return ret
def nonzero_numpy(self, inputs, input_types):
return self.nonzero(inputs, input_types, is_numpy_style=False)
def scatter(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
index = inputs[2]
src = inputs[3]
return _op.transform.scatter(data, index, src, axis)
def index_put(self, inputs, input_types):
in_tensor = inputs[0]
indices = inputs[1]
values = inputs[2]
accumulate = inputs[3]
if not accumulate:
mode = "update"
else:
mode = "add"
# Combine array of index tensors into one index tensor with shape (N,_)
index_tensor = _op.stack(indices, axis=0)
return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)
def scalar_tensor(self, inputs, input_types):
data = inputs[0]
cast_map = {
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
type_key = inputs[1]
if isinstance(data, _expr.Constant):
data = data.data.numpy().tolist()
return _expr.const(data, cast_map[type_key])
def interpolate(self, inputs, input_types):
if isinstance(inputs[1], _expr.Expr):
out_size = inputs[1]
elif isinstance(inputs[1], list):
out_size = []
for i in [0, 1]:
size, _ = try_infer_value(
inputs[1][i],
lambda ret: ret.astype(np.int),
lambda: _op.expand_dims(inputs[1][i], axis=0),
)
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
data = inputs[0]
align_corners = inputs[4]
method = inputs[3]
if method.startswith("nearest"):
method = "nearest_neighbor"
elif method[0:2] == "bi":
method = method[2:]
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize2d(
data, out_size, None, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
def numel(self, inputs, input_types):
return _op.ndarray_size(inputs[0])
def empty(self, inputs, input_types):
shape = inputs[0]
return _op.zeros(shape, _convert_dtype_value(inputs[1]))
def empty_like(self, inputs, input_types):
shape = self.infer_shape(inputs[0])
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = input_types[0]
return _op.zeros(shape, dtype)
def new_empty(self, inputs, input_types):
size = inputs[1]
import torch
if not isinstance(size, (_expr.Expr, list, tuple, torch.Size, np.ndarray)):
msg = "Data type %s could not be parsed in empty op" % (type(size))
raise AssertionError(msg)
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
else:
dtype = input_types[0]
return _op.zeros(size, dtype)
def randn(self, inputs, input_types):
import time # use current time as seed
shape = inputs[0]
output = _op.random.normal(_op.random.threefry_key(int(time.time())), shape)
_, values = _expr.TupleWrapper(output, 2)
return values
def bincount(self, inputs, input_types):
data = inputs[0]
weights = inputs[1]
input_type = self.infer_type(data).dtype
if input_type == "int64":
logger.warning(
"Casting an int64 input to int32, since we do not have int64 atomic add"
"needed for bincount yet."
)
data = _op.cast(data, "int32")
maximum = _op.max(data)
dim = maximum + _expr.const(1, dtype="int32")
if weights:
weight_type = self.infer_type(weights)
out_dtype = weight_type.dtype
updates = weights
else:
out_dtype = "int32"
updates = _op.ones_like(data)
counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)
out = _op.scatter_add(counts, data, updates, axis=0)
if input_type == "int32":
# Torch always outputs int64 results for bincount
return _op.cast(out, "int64")
return out
def scatter_add(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
index = inputs[2]
src = inputs[3]
return _op.scatter_add(data, index, src, axis=axis)
def cumsum(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
dtype = inputs[2]
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
return _op.cumsum(data, axis=dim, dtype=dtype)
def masked_fill(self, inputs, input_types):
mask = inputs[1]
value = _op.cast(_wrap_const(inputs[2]), input_types[0])
return _op.where(mask, value, inputs[0])
def masked_select(self, inputs, input_types):
mask = inputs[1]
indices = self.nonzero([mask], input_types, is_numpy_style=True)
return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])
def sort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
# pytorch sort returns both sorted indices and values
indices = _op.argsort(data, dim, not is_descending)
return _op.gather(data, dim, indices), indices
def argsort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
return _op.argsort(data, dim, not is_descending)
def is_floating_point(self, inputs, input_types):
assert len(inputs) == 1
if isinstance(inputs[0], _expr.Expr):
input_type = self.infer_type(inputs[0]).dtype
else:
input_type = input_types[0]
is_float = input_type in ["float32", "float64", "float16", "bfloat16"]
return _expr.const(is_float)
def unique(self, inputs, input_types):
assert len(inputs) == 4
[data, is_sorted, return_inverse, return_counts] = inputs
if not is_sorted:
logger.warning("TVM always assumes sorted=True for torch.unique")
is_sorted = True
if return_counts:
[unique, indices, inverse_indices, num_uniq, counts] = _op.unique(
data, is_sorted=is_sorted, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices, counts_sliced)
else:
[unique, indices, inverse_indices, num_uniq] = _op.unique(
data, is_sorted=is_sorted, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices)
def nll_loss(self, inputs, input_types):
assert len(inputs) == 5
[predictions, targets, weights, reduction, ignore_index] = inputs
num_class = self.infer_shape(predictions)[1]
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
def flip(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.reverse(data, axis=axis[0])
def bidir_rnn_cell(self, input_seqs, weights_dicts, act=_op.tanh):
"""
Bidirectional RNN cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = rnn_cell(input_seqs, **weights_dicts[0], backwards=False, act=act)
reverse_outputs, rev_H_t = rnn_cell(input_seqs, **weights_dicts[1], backwards=True, act=act)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def rnn_layers(self, input_data, layer_weights_dicts, bidirectional, act, dropout_p=0.0):
"""
Methods iterates layers for Stacked RNN
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_rnn_cell(input_seqs, weights_dicts, act=act)
else:
input_seqs, H_t = rnn_cell(input_seqs, **weights_dicts[0], act=act)
output_hiddens.append(H_t)
# TODO (yuanfz98): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
output_hiddens = (
_op.concatenate(output_hiddens, 0) if bidirectional else _op.stack(output_hiddens, 0)
)
return _op.stack(input_seqs, 0), output_hiddens
def rnn(self, inputs, input_types, nonlinearity):
"""
Description of RNN in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.RNN.html#torch.nn.RNN
Description of inputs:
https://github.com/pytorch/pytorch/blob/736fb7d22cc948b739db2c35aeb5ad4d19aea4f4/torch/overrides.py#L937
"""
# TODO (yuanfz98): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (hidden_size, feature_size)
# Wh layer[0] shape (hidden_size, hidden_size)
# Bi layer[0] shape (hidden_size)
# Bh layer[0] shape (hidden_size)
# Wi layer[>0] shape (hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (hidden_size, hidden_size)
# Bi layer[>0] shape (hidden_size)
# Bh layer[>0] shape (hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
if nonlinearity == "tanh":
act = _op.tanh
elif nonlinearity == "relu":
act = _op.nn.relu
assert act, "The nonlinearity is unknown"
X = (
_op.transpose(_X, (1, 0, 2)) if batch_first else _X
) # always (seq_num, batch, feature_size)
# TODO (yuanfz98): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0])
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of RNN weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked RNN number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.rnn_layers(
X,
layer_weights_dicts,
bidirectional,
act,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_gru_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t = gru_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):
"""
Methods iterates layers for Stacked GRU
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])
output_hiddens.append(H_t)
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)
def gru(self, inputs, input_types):
"""
Description of GRU in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (3 * hidden_size, feature_size)
# Wh layer[0] shape (3 * hidden_size, hidden_size)
# Bi layer[0] shape (3 * hidden_size)
# Bh layer[0] shape (3 * hidden_size)
# Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (3 * hidden_size, hidden_size)
# Bi layer[>0] shape (3 * hidden_size)
# Bh layer[>0] shape (3 * hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0] / 3)
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked GRU number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.gru_layers(
X,
layer_weights_dicts,
bidirectional,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_lstm_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)
def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):
"""
Methods iterates layers for Stacked LSTM
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])
output_hiddens.append((H_t, C_t))
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for LSTM has not been supported yet!")
final_hiddens = []
if bidirectional:
for output_hidden in output_hiddens:
final_hiddens.append(output_hidden[0])
final_hiddens.append(output_hidden[1])
else:
final_hiddens = output_hiddens
return _op.stack(input_seqs, 0), final_hiddens
def lstm(self, inputs, input_types):
"""
Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
Native implementation for torch version less than 1.8.0 (projection is unsupported):
https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \
src/ATen/native/RNN.cpp#L1396
Native implementation for torch version from 1.8.0 and higher (projection is supported):
https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_states = inputs[1]
assert len(hidden_states) == 2, "lstm expects two hidden states"
h_0 = hidden_states[0]
c_0 = hidden_states[1]
# H0 shape (hidden_layers_num, batch, proj_size) if projection
# else (hidden_layers_num, batch, hidden_size)
# C0 shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# If no projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, hidden_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, hidden_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# If projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, proj_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# P layer[0] shape (proj_size, hidden_size)
# Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, proj_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# P layer[>0] shape (proj_size, hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
has_proj = False
proj_size = 0
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
if weights_num == 5:
has_proj = True
proj_size = _infer_shape(_weights[4])[0]
else:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
if weights_num == 3:
has_proj = True
proj_size = _infer_shape(_weights[2])[0]
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = _infer_shape(_weights[0])[0] / 4
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
layers_c = []
hidden_layers_num = num_directions * num_layers
if h_0 is None:
if has_proj:
h_0 = _op.zeros((batch_size, proj_size), X_dtype)
else:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(h_0, 0)
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_c.append(c_0)
else:
layers_c = unbind(c_0, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "cell_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 4]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "cell_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 2]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked LSTM number of weights sets should be the same as number of layers!"
outputs = self.lstm_layers(
X,
layer_weights_dicts,
bidirectional,
dtype=X_dtype,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
output = outputs[0]
hy = []
cy = []
for hidden in outputs[1]:
hy.append(hidden[0])
cy.append(hidden[1])
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, _op.stack(hy, 0), _op.stack(cy, 0))
def all_any_common(self, op, inputs, input_types):
if len(inputs) >= 2:
dim = inputs[1]
else:
dim = None
if len(inputs) >= 3:
keepdim = inputs[2]
else:
keepdim = False
if self.infer_type(inputs[0]).dtype != "bool":
# The input dtype can be uint8.
inp = _op.cast(inputs[0], "bool")
else:
inp = inputs[0]
return op(inp, axis=dim, keepdims=keepdim)
def searchsorted_common(
self, sorted_sequence, values, out_int32, right, side=None, out=None, sorter=None
):
assert side is None and out is None and sorter is None, "unsupported parameters"
dtype = "int32" if out_int32 else "int64"
values_shape = _infer_shape(values)
if len(values_shape) == 0:
values = _op.expand_dims(values, 0)
out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)
if len(values_shape) == 0:
return _op.squeeze(out)
return out
def searchsorted(self, inputs, input_types):
return self.searchsorted_common(*inputs)
def bucketize(self, inputs, input_types):
return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])
def roll(self, inputs, input_types):
def slide_axes(inp, shape, ax):
axes = list(range(len(shape)))
axes = axes[:ax] + [-1] + axes[ax:-1]
return _op.transpose(inp, axes)
x = inputs[0]
shifts = inputs[1]
dims = inputs[2]
shape = self.infer_shape(x)
start = _expr.const(0, "int64")
step = _expr.const(1, "int64")
out = x
for i, dim in enumerate(dims):
roll_dim = _expr.const(shape[dim], "int64")
indices_1d = _op.mod(
_op.transform.arange(start, roll_dim, step, "int64")
- _expr.const(shifts[i], "int64")
+ roll_dim,
roll_dim,
)
# First fill in the last axis with roll indices, and then do transpose to
# bring the roll indices into the desired axis.
indices = slide_axes(
_op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)),
shape,
dim,
)
out = _op.gather(out, dim, indices)
return out
def einsum(self, inputs, input_types):
equation, data = inputs
return _op.einsum(data, equation)
def dot(self, inputs, _):
lhs, rhs = inputs
return _op.sum(_op.multiply(lhs, rhs))
def mv(self, inputs, _):
lhs, rhs = inputs
# Convert the 1D matrix (vector) into a 2D matrix with the extra
# dimension=1
rhs_matrix = _op.transform.expand_dims(rhs, 0)
# Run multiplication
dense_result = _op.nn.dense(lhs, rhs_matrix, units=None)
# Chop off the extra result dimension
return _op.transform.squeeze(dense_result)
def grid_sampler(self, inputs, input_types):
interpolate_mode = inputs[2]
padding_mode = inputs[3]
align_corners = inputs[4]
data_shape = self.infer_shape_with_prelude(inputs[0])
if len(data_shape) == 4:
layout = "NCHW"
axes = [0, 3, 1, 2]
grid = _op.transform.transpose(inputs[1], axes)
elif len(data_shape) == 5:
layout = "NCDHW"
axes = [0, 4, 1, 2, 3]
grid = _op.transform.transpose(inputs[1], axes)
else:
msg = f"only 4D and 5D are supported."
raise ValueError(msg)
if interpolate_mode == 0:
interpolate_str = "bilinear"
elif interpolate_mode == 1:
interpolate_str = "nearest"
elif interpolate_mode == 2:
interpolate_str = "bicubic"
else:
msg = f"interpolation method {interpolate_mode} is not supported"
raise ValueError(msg)
if padding_mode == 0:
padding_mode_str = "zeros"
elif padding_mode == 1:
padding_mode_str = "border"
elif padding_mode == 2:
padding_mode_str = "reflection"
else:
msg = f"padding_mode {padding_mode} is not supported"
raise ValueError(msg)
return _op.image.grid_sample(
inputs[0], grid, interpolate_str, layout, padding_mode_str, align_corners
)
def trilu(self, inputs, input_types, mode):
data = inputs[0]
k = inputs[1] if inputs[1] else 0
upper = True if mode == "triu" else False
return _op.trilu(data, k, upper)
def multinomial(self, inputs, input_types):
probs = inputs[0]
num_samples = inputs[1]
replacement = inputs[2] if inputs[2] else True
assert not (
replacement is False and num_samples > 1
), "Multinomial without replacement is not yet supported."
# Ideally this seed would be generated by a previous threefry operation.
# Eventually we might want to add a global store for random keys.
seed = np.random.randint(1e6)
key = _op.random.threefry_key(seed)
output = _op.random.multinomial(key, probs, num_samples)
_, indices = _expr.TupleWrapper(output, 2)
return indices
# Operator mappings
def create_convert_map(self):
self.convert_map = {
"aten::is_floating_point": self.is_floating_point,
"aten::pixel_shuffle": self.pixel_shuffle,
"aten::device": self.none,
"prim::device": self.none,
"aten::sub": self.sub,
"aten::max": self.max,
"aten::min": self.min,
"aten::maximum": self.maximum,
"aten::minimum": self.minimum,
"aten::amax": self.max,
"aten::amin": self.min,
"aten::stft": self.stft,
"aten::mul": self.make_elemwise("multiply"),
"aten::pow": self.make_elemwise("power"),
"aten::lerp": self.lerp,
"aten::arange": self.arange,
"aten::meshgrid": self.meshgrid,
"aten::div": self.make_elemwise("divide"),
"aten::floor_divide": self.make_elemwise("floor_divide"),
"aten::true_divide": self.make_elemwise("divide"),
"aten::fmod": self.make_elemwise("trunc_mod"),
"aten::remainder": self.make_elemwise("floor_mod"),
"aten::addcdiv": self.addcdiv,
"aten::addcmul": self.addcmul,
"aten::ones": self.ones,
"aten::ones_like": self.ones_like,
"aten::zeros": self.zeros,
"aten::zero_": self.zero_,
"aten::zeros_like": self.zeros_like,
"aten::new_ones": self.new_ones,
"aten::full": self.full,
"aten::full_like": self.full_like,
"aten::new_full": self.new_full,
"aten::fill_": self.fill_,
"aten::linspace": self.linspace,
"aten::reciprocal": self.reciprocal,
"aten::repeat": self.repeat,
"aten::repeat_interleave": self.repeat_interleave,
"aten::to": self.to,
"aten::squeeze": self.squeeze,
"aten::unsqueeze": self.unsqueeze,
"aten::cat": self.concatenate,
"aten::slice": self.slice,
"aten::narrow": self.narrow,
"aten::split": self.split,
"aten::tensor_split": self.tensor_split,
"aten::split_with_sizes": self.split_with_sizes,
"aten::select": self.select,
"aten::take": self.take,
"aten::where": self.where,
"aten::topk": self.topk,
"aten::relu": self.relu,
"aten::relu6": self.relu6,
"aten::prelu": self.prelu,
"aten::leaky_relu": self.leaky_relu,
"aten::elu": self.elu,
"aten::celu": self.celu,
"aten::gelu": self.gelu,
"aten::selu": self.selu,
"aten::silu": self.silu,
"aten::glu": self.glu,
"aten::log_sigmoid": self.log_sigmoid,
"aten::adaptive_avg_pool1d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d
),
"aten::adaptive_avg_pool2d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d
),
"aten::adaptive_avg_pool3d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d
),
"aten::adaptive_max_pool1d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool1d
),
"aten::adaptive_max_pool2d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool2d
),
"aten::adaptive_max_pool3d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool3d
),
"aten::max_pool2d": self.maxpool_2d,
"aten::max_pool2d_with_indices": self.maxpool_2d_with_indices,
"aten::max_pool1d": self.maxpool_1d,
"aten::max_pool3d": self.maxpool_3d,
"aten::hardtanh": self.hardtanh,
"aten::_convolution": self.convolution,
"aten::softmax": self.softmax,
"aten::threshold": self.threshold,
"aten::contiguous": self.contiguous,
"aten::batch_norm": self.batch_norm,
"aten::instance_norm": self.instance_norm,
"aten::layer_norm": self.layer_norm,
"aten::group_norm": self.group_norm,
"aten::transpose": self.transpose,
"aten::t": self.transpose,
"aten::numpy_T": self.numpy_T,
"aten::flatten": self.flatten,
"aten::addmm": self.addmm,
"aten::size": self.size,
"aten::view": self.view,
"aten::reshape": self.reshape,
"aten::reshape_as": self.reshape_as,
"aten::clone": self.clone,
"aten::log_softmax": self.log_softmax,
"aten::sigmoid": self.sigmoid,
"aten::softplus": self.softplus,
"aten::avg_pool1d": self.make_avg_pool(1),
"aten::avg_pool2d": self.make_avg_pool(2),
"aten::avg_pool3d": self.make_avg_pool(3),
"aten::linear": self.linear,
"aten::dropout": self.dropout,
"aten::feature_dropout": self.dropout,
"aten::alpha_dropout": self.dropout,
"aten::mean": self.mean,
"aten::chunk": self.chunk,
"aten::unsafe_chunk": self.chunk,
"aten::matmul": self.matmul,
"aten::bmm": self.matmul,
"aten::expand": self.expand,
"aten::Int": self.int,
"prim::NumToTensor": self.numtotensor,
"prim::ImplicitTensorToNum": self.tensortonum,
"aten::ScalarImplicit": self.tensortonum,
"aten::pad": self.pad,
"aten::constant_pad_nd": self.constant_pad_nd,
"aten::reflection_pad1d": self.reflection_pad1d,
"aten::reflection_pad2d": self.reflection_pad2d,
"aten::replication_pad1d": self.replication_pad1d,
"aten::replication_pad2d": self.replication_pad2d,
"aten::replication_pad3d": self.replication_pad3d,
"aten::permute": self.transpose,
"aten::sum": self.make_reduce("sum"),
"aten::prod": self.make_reduce("prod"),
"aten::argmin": self.make_reduce("argmin"),
"aten::argmax": self.make_reduce("argmax"),
"aten::norm": self.norm,
"aten::frobenius_norm": self.frobenius_norm,
"aten::std": self.std,
"aten::var": self.variance,
"aten::var_mean": self.var_mean,
"aten::abs": self.make_unary("abs"),
"aten::neg": self.make_unary("negative"),
"aten::cos": self.make_unary("cos"),
"aten::cosh": self.make_unary("cosh"),
"aten::sin": self.make_unary("sin"),
"aten::sinh": self.make_unary("sinh"),
"aten::tan": self.make_unary("tan"),
"aten::tanh": self.make_unary("tanh"),
"aten::acos": self.make_unary("acos"),
"aten::asin": self.make_unary("asin"),
"aten::atan": self.make_unary("atan"),
"aten::log": self.make_unary("log"),
"aten::log2": self.make_unary("log2"),
"aten::log10": self.make_unary("log10"),
"aten::log1p": self.log1p,
"aten::exp": self.make_unary("exp"),
"aten::erf": self.make_unary("erf"),
"aten::trunc": self.make_unary("trunc"),
"aten::sign": self.make_unary("sign"),
"aten::sqrt": self.make_unary("sqrt"),
"aten::rsqrt": self.make_unary("rsqrt"),
"aten::square": self.square,
"aten::tril": functools.partial(self.trilu, mode="tril"),
"aten::triu": functools.partial(self.trilu, mode="triu"),
"aten::ceil": self.make_unary("ceil"),
"aten::floor": self.make_unary("floor"),
"aten::round": self.make_unary("round"),
"aten::isfinite": self.make_unary("isfinite"),
"aten::isinf": self.make_unary("isinf"),
"aten::isnan": self.make_unary("isnan"),
"aten::clamp": self.clamp,
"aten::clamp_min": self.clamp_min,
"aten::clamp_max": self.clamp_max,
"aten::detach": self.identity,
"aten::upsample_bilinear2d": self.make_upsample("linear"),
"aten::upsample_bicubic2d": self.make_upsample("cubic"),
"aten::upsample_nearest2d": self.make_upsample("nearest_neighbor"),
"aten::upsample_trilinear3d": self.make_upsample3d("linear"),
"aten::upsample_nearest3d": self.make_upsample3d("nearest_neighbor"),
"aten::expand_as": self.expand_as,
"aten::broadcast_tensors": self.broadcast_tensors,
"aten::lt": self.make_elemwise("less"),
"aten::gt": self.make_elemwise("greater"),
"aten::le": self.make_elemwise("less_equal"),
"aten::ge": self.make_elemwise("greater_equal"),
"aten::ne": self.make_elemwise("not_equal"),
"aten::eq": self.make_elemwise("equal"),
"aten::logical_not": self.logical_not,
"aten::logical_xor": self.logical_xor,
"aten::bitwise_not": self.bitwise_not,
"aten::bitwise_xor": self.bitwise_xor,
"aten::Bool": self.Bool,
"aten::Float": self.Float,
"aten::rsub": self.rsub,
"aten::embedding": self.embedding,
"aten::one_hot": self.one_hot,
"aten::mm": self.matmul,
"aten::add": self.add,
"aten::stack": self.stack,
"aten::__getitem__": self.list_getitem,
"aten::len": self.list_len,
"aten::type_as": self.type_as,
"aten::gather": self.gather,
"aten::index_select": self.select,
"aten::index": self.index,
"torchvision::nms": self.nms,
"aten::logsumexp": self.logsumexp,
"torchvision::roi_align": self.roi_align,
"torchvision::deform_conv2d": self.deform_conv2d,
"aten::unbind": self.unbind,
"aten::__and__": self.logical_and,
"aten::logical_and": self.logical_and,
"aten::_shape_as_tensor": self.shape_as_tensor,
"aten::nonzero": self.nonzero,
"aten::nonzero_numpy": self.nonzero_numpy,
"aten::scatter": self.scatter,
"aten::index_put": self.index_put,
"aten::scalar_tensor": self.scalar_tensor,
"aten::__interpolate": self.interpolate,
"aten::IntImplicit": self.identity,
"aten::tensor": self.identity, # used for example in tensor(1.0)
"aten::numel": self.numel,
"aten::empty": self.empty,
"aten::empty_like": self.empty_like,
"aten::new_empty": self.new_empty,
"aten::randn": self.randn,
"aten::bincount": self.bincount,
"aten::scatter_add": self.scatter_add,
"aten::__not__": self.logical_not,
"aten::hardswish": self.hard_swish,
"aten::hardsigmoid": self.hard_sigmoid,
"aten::cumsum": self.cumsum,
"aten::masked_fill": self.masked_fill,
"aten::masked_select": self.masked_select,
"aten::argsort": self.argsort,
"aten::sort": self.sort,
"aten::_unique2": self.unique,
"aten::nll_loss": self.nll_loss,
"aten::nll_loss2d": self.nll_loss,
"aten::nll_loss_nd": self.nll_loss,
"aten::cross_entropy_loss": self.cross_entropy_loss_with_logits,
"aten::l1_loss": self.l1_loss,
"aten::mse_loss": self.mse_loss,
"aten::flip": self.flip,
"aten::rnn_tanh": functools.partial(self.rnn, nonlinearity="tanh"),
"aten::rnn_relu": functools.partial(self.rnn, nonlinearity="relu"),
"aten::gru": self.gru,
"aten::lstm": self.lstm,
"aten::all": functools.partial(self.all_any_common, _op.all),
"aten::any": functools.partial(self.all_any_common, _op.any),
"aten::searchsorted": self.searchsorted,
"aten::bucketize": self.bucketize,
"aten::roll": self.roll,
"aten::einsum": self.einsum,
"aten::dot": self.dot,
"aten::mv": self.mv,
"aten::grid_sampler": self.grid_sampler,
"aten::__ior__": self.make_elemwise("bitwise_or"),
"aten::__iand__": self.make_elemwise("bitwise_and"),
"aten::__ixor__": self.make_elemwise("bitwise_xor"),
"aten::__lshift__": self.make_elemwise("left_shift"),
"aten::__rshift__": self.make_elemwise("right_shift"),
"aten::multinomial": self.multinomial,
}
def update_convert_map(self, custom_map):
self.convert_map.update(custom_map)
def report_missing_conversion(self, op_names):
"""Check if all ops in an input graph are supported by TVM"""
known_ops = [
"prim::Constant",
"prim::GetAttr",
"prim::ListConstruct",
"prim::ListUnpack",
"prim::TupleConstruct",
"prim::TupleUnpack",
"prim::RaiseException",
"prim::If",
"prim::Loop",
]
known_ops += list(self.convert_map.keys())
known_ops += list(qnn_torch.convert_map.keys())
missing = []
for op_name in op_names:
# Also take care of in-place variant ops like aten::relu_
if op_name not in known_ops and not (
op_name.endswith("_") and op_name[:-1] in known_ops
):
missing.append(op_name)
if missing:
msg = "The following operators are not implemented: {}".format(missing)
raise NotImplementedError(msg)
def convert_block(self, block, outputs):
"""Translate Torch "Block", used for prim::If and prim::Loop"""
ops = _get_operator_nodes(block.nodes())
ret_names = _get_input_names(block.returnNode())
return self.convert_operators(ops, outputs, ret_names)
def convert_if(self, if_node, outputs):
"""Translate Torch prim::If to Relay If"""
cond = outputs[if_node.inputsAt(0).debugName()]
blocks = list(if_node.blocks())
true_branch = self.convert_block(blocks[0], outputs)
false_branch = self.convert_block(blocks[1], outputs)
assert len(true_branch) == 1 and len(false_branch) == 1
return _expr.If(cond, true_branch[0], false_branch[0])
def convert_loop(self, loop_node, outputs):
"""Translate Torch prim::Loop to Relay while_loop"""
def get_input(index):
ivalue = loop_node.inputsAt(index)
inode = ivalue.node()
if inode.kind() == "prim::Constant":
return _expr.const(_get_constant(inode))
var_name = ivalue.debugName()
assert var_name in outputs
return _wrap_const(outputs[var_name])
# Refer to the spec for prim::Loop below
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops
# The first input: %max_trip_count
# The second input: %initial_condition
# The rest of input: loop variables
max_loop_count = get_input(0)
init_cond = get_input(1)
num_loop_var = len(list(loop_node.inputs())) - 2
init_vals = [get_input(i + 2) for i in range(num_loop_var)]
# while loop has always max_loop_count being int64 max
# max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again
is_while_loop = (
isinstance(max_loop_count, _expr.Constant)
and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize
)
if is_while_loop:
loop_iter_dtype = "bool"
# while loop with non input dependent condition such as while i < 10:
# init_cond is int, need to cast to bool to type check
if isinstance(init_cond, _expr.Constant):
init_cond = _op.cast(init_cond, "bool")
init_loop_iter_val = init_cond
else:
loop_iter_dtype = "int32"
# always count from 0
init_loop_iter_val = _expr.const(0, dtype="int32")
body_block = list(loop_node.blocks())[0]
block_input_names = _get_input_names(body_block)
num_block_inputs = len(block_input_names)
name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))
outputs.update(name_val_pairs)
def get_var(name, val):
if val:
checked_type = self.infer_type_with_prelude(val)
if hasattr(checked_type, "shape"):
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(Any())
else:
actual_shape.append(dim)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
else:
return _expr.var(name, type_annotation=checked_type)
return _expr.var(name)
loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype)
loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]]
# Add non constant free variables to loop variables to prevent code blow up
# Without this, if there are two for loops in a row, which often happens
# if the outer loop is unrolled, the computation corresponding to the first for loop
# is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).
# This issue was found when converting from Stacked LSTM test. Torch does not add the
# outputof the eariler loop into loop variables of the next loop.
# So the variable corresponding to the first loop output appears free in the second
# loop body.
free_vars = [
var
for var in _get_free_vars_from_block(body_block)
if var in outputs
and not isinstance(outputs[var], (_expr.Constant, int, float, str))
and outputs[var]
]
prev_outputs = {}
for name in free_vars:
prev_output = outputs[name]
new_loop_var = get_var(name, prev_output)
prev_outputs[name] = prev_output
outputs[name] = new_loop_var
loop_vars.append(new_loop_var)
init_vals.append(prev_output)
def cond(*current_vals):
i = current_vals[0]
if is_while_loop:
return _op.equal(i, _expr.const(True, "bool"))
return _op.less(i, max_loop_count)
def body(*current_vals):
# Update loop variables using the prev iteration outputs
assert len(current_vals) == num_block_inputs + len(free_vars)
for (i, val) in enumerate(current_vals):
if i < num_block_inputs:
outputs[block_input_names[i]] = val
else:
outputs[free_vars[i - num_block_inputs]] = val
block_outputs = self.convert_block(body_block, outputs)
block_outputs += [outputs[name] for name in free_vars]
if not is_while_loop:
# iter var increment implicit in torch, so do it manually
# for while loop, block_outputs[0] is already a boolean,
# the result of termination check
incr = _expr.const(1, dtype="int32")
block_outputs[0] = current_vals[0] + incr
return block_outputs
loop = while_loop(cond, [loop_iter_var] + loop_vars, body)
loop_val = loop(init_loop_iter_val, *init_vals)
# restore original output values for free vars
outputs.update(prev_outputs)
# The first element is a loop counter or boolean condition, ignore it
return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]
def convert_operators(self, operators, outputs, ret_names):
"""Convert each Torch IR operators to Relay equivalent"""
for node_name, op_node in operators:
operator = op_node.kind()
inputs = _get_op_inputs(op_node, outputs)
if operator == "prim::Constant":
outputs[node_name] = _get_constant(op_node)
elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node):
outputs[node_name] = self.convert_to_list_adt(inputs)
elif operator == "prim::ListConstruct":
# This assumes that no more elements will be appended to this list
# In this case, we keep the Python list
outputs[node_name] = inputs
elif operator == "prim::TupleConstruct":
def _handel_nested_input(inputs):
inputs_list = []
for i, _ in enumerate(inputs):
if isinstance(inputs[i], list):
inputs_list.append(_handel_nested_input(inputs[i]))
else:
assert isinstance(inputs[i], _expr.Expr)
inputs_list.append(inputs[i])
return _expr.Tuple(inputs_list)
outputs[node_name] = _handel_nested_input(inputs)
elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]:
assert len(inputs) == 1
if isinstance(inputs[0], (list, _expr.TupleWrapper)):
unpacked = inputs[0]
else:
unpacked = _unpack_tuple(inputs[0])
outputs.update(zip(_get_output_names(op_node), unpacked))
elif operator == "prim::prim::RaiseException":
logger.warning("raising exceptions is ignored")
outputs[node_name] = None
elif operator == "prim::If":
if_out = self.convert_if(op_node, outputs)
outputs[node_name] = if_out
elif operator == "prim::Loop":
loop_out = self.convert_loop(op_node, outputs)
unpacked_names = _get_output_names(op_node)
assert len(loop_out) == len(unpacked_names)
outputs.update(zip(unpacked_names, loop_out))
else:
if operator not in self.convert_map:
# At this point, the only possible ops that are not in convert_map are
# in-place variant of ops like aten::relu_
assert operator.endswith("_")
logger.warning(
"An in-place op %s found, the result will not be correct "
"if the model depends on side-effects by this op.",
operator,
)
relay_op = self.convert_map[operator[:-1]]
else:
relay_op = self.convert_map[operator]
relay_out = relay_op(
inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype)
)
self.record_output_type(relay_out)
if isinstance(relay_out, tuple):
# This is for torch operators that return multiple outputs
# See _adaptive_max_2d above for example
out_names = _get_output_names(op_node)
outputs.update(zip(out_names, relay_out))
else:
assert op_node.outputsSize() == 1
outputs[node_name] = relay_out
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
def _pytorch_result_type(dtypes, non_tensor_inputs):
"""This promotes TVM dtypes like PyTorch would"""
import torch
dtype_map = {
"float64": torch.float64,
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
"int64": torch.int64,
"int32": torch.int32,
"int16": torch.int16,
"int8": torch.int8,
"uint8": torch.uint8,
"bool": torch.bool,
}
if len(dtypes) > 0:
result_type = dtypes[0]
for dt in dtypes[1:]:
if dt != result_type: # we don't want to work with same types as we
# don't do quantized here (which cannot be promoted?)
result_type = _convert_data_type(
str(
torch.result_type(
torch.zeros((), dtype=dtype_map[result_type]),
torch.zeros((), dtype=dtype_map[dt]),
)
)
)
else:
result_type = "bool" # this is the smallest type...
for inp in non_tensor_inputs:
result_type = _convert_data_type(
str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))
)
return result_type
# Helper functions for operator implementation
def _convert_dtype_value(val):
"""converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
convert_torch_dtype_map = {
11: "torch.bool",
7: "torch.float64",
6: "torch.float32",
5: "torch.float16",
4: "torch.int64",
3: "torch.int32",
2: "torch.int16",
1: "torch.int8",
0: "torch.uint8",
None: "torch.int64",
} # Default is torch.int64
if val in convert_torch_dtype_map:
return _convert_data_type(convert_torch_dtype_map[val])
else:
msg = "Torch data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
def _convert_data_type(input_type, default_dtype=None):
"""converts the PyTorch scalar type input_type to a TVM dtype.
optionally, default_dtype can be a TVM dtype that is used
if input_type is None (but not when it is unknown)"""
if input_type is None and default_dtype is not None:
return default_dtype
input_type = input_type.lower()
if input_type in ["double", "float64", "torch.float64"]:
return "float64"
elif input_type in ["float", "float32", "torch.float32"]:
return "float32"
elif input_type in ["half", "float16", "torch.float16"]:
return "float16"
elif input_type in ["long", "int64", "torch.int64"]:
return "int64"
elif input_type in ["int", "int32", "torch.int32"]:
return "int32"
elif input_type in ["short", "int16", "torch.int16"]:
return "int16"
elif input_type in ["char", "int8", "torch.int8"]:
return "int8"
elif input_type in ["byte", "uint8", "torch.uint8"]:
return "uint8"
elif input_type in ["quint8", "torch.quint8"]:
return "quint8"
elif input_type in ["qint8", "torch.qint8"]:
return "qint8"
elif input_type in ["qint32", "torch.qint32"]:
return "qint32"
elif input_type in ["bool", "torch.bool"]:
return "bool"
elif input_type in ["str"]:
return "str"
else:
raise NotImplementedError("input_type {} is not handled yet".format(input_type))
return "float32" # Never reached
def _create_typed_const(data, dtype):
"""create a (scalar) constant of given value and dtype.
dtype should be a TVM dtype"""
if dtype == "float64":
typed_data = _expr.const(np.float64(data), dtype=dtype)
elif dtype == "float32":
typed_data = _expr.const(np.float32(data), dtype=dtype)
elif dtype == "float16":
typed_data = _expr.const(np.float16(data), dtype=dtype)
elif dtype == "int64":
typed_data = _expr.const(np.int64(data), dtype=dtype)
elif dtype == "int32":
typed_data = _expr.const(np.int32(data), dtype=dtype)
elif dtype == "int16":
typed_data = _expr.const(np.int16(data), dtype=dtype)
elif dtype == "int8":
typed_data = _expr.const(np.int8(data), dtype=dtype)
elif dtype == "uint8":
typed_data = _expr.const(np.uint8(data), dtype=dtype)
else:
raise NotImplementedError("input_type {} is not handled yet".format(dtype))
return typed_data
def _wrap_const(c):
if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):
return _expr.const(c)
return c
def _run_jit_passes(graph, enable_lower_all_tuples=True):
"""The inline pass is necessary to unwrap prim::CallMethod"""
# pylint: disable=c-extension-no-member
import torch
if is_version_greater_than("1.5.1"):
# This is required for torchvision detection models from 1.6 above
# It is the same as _jit_pass_inline, except that it has some special
# case behaviors for some ops such as aten::__interpolate()
torch._C._jit_pass_onnx_function_substitution(graph)
else:
torch._C._jit_pass_inline(graph)
if enable_lower_all_tuples:
torch._C._jit_pass_lower_all_tuples(graph)
def _get_tensor_and_var(torch_tensor, name):
tensor = tvm.nd.array(torch_tensor.cpu().numpy())
var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)
return tensor, var
def _get_output_name(node):
assert node.outputsSize() == 1
return node.output().debugName()
def _get_output_names(node):
return [output.debugName() for output in node.outputs()]
def _get_input_names(node_or_graph):
return [inp.debugName() for inp in node_or_graph.inputs()]
def _get_op_inputs(op_node, outputs):
return [outputs[name] for name in _get_input_names(op_node)]
def _get_node_type(node):
assert node.outputsSize() == 1
return node.output().type().kind()
def _get_uses(node):
uses = []
for output in node.outputs():
uses += output.uses()
return uses
def _get_users(node):
return [use.user for use in _get_uses(node)]
def _getattr_full_name(getattrs, sep="."):
return sep.join([getattr_attr_name(node) for node in getattrs])
def _get_pytorch_value_type(typ, default_dtype="float32"):
kind = typ.kind()
if kind == "TensorType":
if typ.scalarType() is None:
# Tensor's type can be unknown if we use torch.jit.script(...)
# Defaults can be passed in, if not it is float32
logger.warning("Untyped Tensor found, assume it is %s", default_dtype)
return default_dtype
else:
return _convert_data_type(typ.scalarType())
elif kind == "ListType":
return "ListType"
elif kind in ["IntType", "FloatType", "BoolType", "StringType", "OptionalType"]:
pt_dtype = str(typ).lower()
dtype = pt_dtype if pt_dtype == "OptionalType" else _convert_data_type(pt_dtype)
return dtype
else:
return "UnsupportedType"
def _get_input_types(op_node, outputs, default_dtype="float32"):
"""Returns a TVM dtype for each input nodes derived from the torch type"""
in_types = []
for inp in op_node.inputs():
if inp.node().kind() == "prim::GetAttr":
# GetAttr nodes always return None when we call scalarType() on it
name = inp.debugName()
assert name in outputs
if isinstance(outputs[name], _expr.Var):
in_types.append(outputs[name].type_annotation.dtype)
else:
# For quantized modules with parameters, here we would get
# "prim::GetAttr[name="_packed_params"]". Since the dtype corresponding to
# _packed_params is not needed by quantized ops, we return an arbitrary type.
in_types.append(default_dtype)
else:
in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))
return in_types
def _get_constant(node):
"""Retrieve a constant associated with this prim::Constant node"""
attribute_names = node.attributeNames()
num_attributes = len(attribute_names)
if num_attributes == 1:
attr_name = attribute_names[0]
ty = node.output().type().kind()
if ty == "IntType":
return node.i(attr_name)
elif ty == "BoolType":
return bool(node.i(attr_name))
elif ty in ["FloatType", "LongType"]:
return node.f(attr_name)
elif ty in ["TensorType", "CompleteTensorType"]:
tensor = node.t(attr_name)
if tensor.is_cuda:
tensor = tensor.cpu()
if len(tensor.shape) == 0: # tensor(0.1)
# TODO(t-vi): When is this needed?
return tensor.item()
return _wrap_const(tensor.numpy())
elif ty in ["DeviceObjType", "StringType"]:
return node.s(attr_name)
elif ty == "FunctionType":
return None
else:
raise NotImplementedError("Unsupported type: %s" % ty)
else:
assert num_attributes == 0
return None
def _get_operator_nodes(nodes):
"""Returns torch IR nodes that need conversion to Relay"""
ops = []
# Traverse nodes and add to graph
for node in nodes:
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(_get_output_names(node))
else:
node_name = _get_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"):
"""
Return Relay vars from input shapes and create entries based on
expected graph inputs - to allow translation
"""
graph_inputs = list(graph.inputs())
if is_module:
# a module has "self" as first input, which we do not need/want
graph_inputs = graph_inputs[1:]
if not isinstance(input_infos, list):
msg = "Graph inputs input_infos should be a list"
raise RuntimeError(msg)
if len(graph_inputs) != len(input_infos):
msg = "PyTorch has {} inputs and input_infos lists {}.".format(
len(graph_inputs), len(input_infos)
)
raise RuntimeError(msg)
def get_relay_ty(ishape, itype, pt_type):
if pt_type.kind() == "TensorType":
if not (_is_int_seq(ishape) or len(ishape) == 0):
msg = "Shape for Tensors must be lists of ints"
raise RuntimeError(msg)
if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (
pt_type.sizes() is not None
and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])
):
msg = "Shapes of input list and information in the graph do not match"
raise RuntimeError(msg)
pt_dtype = pt_type.scalarType()
if not pt_dtype and itype:
pt_dtype = itype
dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)
return TensorType(ishape, dtype)
elif pt_type.kind() == "TupleType":
if not isinstance(ishape, tuple):
msg = "Shapes for tuples must be tuples"
raise RuntimeError(msg)
return TupleType(
[get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]
)
elif pt_type.kind() == "ListType":
if not isinstance(ishape, list):
msg = "Shapes for lists must be lists"
raise RuntimeError(msg)
pt_elemtype = pt_type.getElementType()
elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]
if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):
msg = "List elements need have identical types"
raise RuntimeError(msg)
rlist, _, _ = prelude.mod.get_type("List")
return rlist(elem_tys[0])
elif pt_type.kind() == "OptionalType":
# we do not support None yet, so we fill in the type
return get_relay_ty(ishape, itype, pt_type.getElementType())
# TODO: scalar inputs
raise NotImplementedError("unsupported input type")
input_vars = {}
new_input_infos = []
for num, inp in enumerate(input_infos):
if not isinstance(inp, tuple):
msg = "Graph input {} is not a tuple".format(num)
raise RuntimeError(msg)
if len(inp) != 2 or not isinstance(inp[0], str):
msg = (
"Graph input {} is not valid,"
" expected ('name', shape) or ('name', (shape, dtype))".format(inp)
)
raise RuntimeError(msg)
if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):
new_input_infos.append((inp[0], (inp[1], default_dtype)))
else:
new_input_infos.append(inp)
input_types = [
(name, get_relay_ty(info[0], info[1], gi.type()))
for (name, info), gi in zip(new_input_infos, graph_inputs)
]
ir_inputs = [i.debugName() for i in graph_inputs]
for ir_input, (name, itype) in zip(ir_inputs, input_types):
inp = _expr.var(name, type_annotation=itype)
# Translate from graph input to user input name
input_vars[ir_input] = inp
return input_vars
def _unpack_tuple(tup):
def unpack(tup, num_fields):
return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]
if isinstance(tup, _expr.Tuple):
return unpack(tup, len(tup.fields))
elif isinstance(tup.type_annotation, TupleType):
return unpack(tup, len(tup.type_annotation.fields))
# shouldn't happen
assert False
def _get_free_vars_from_block(block):
block_inp_names = _get_input_names(block)
bound_names = block_inp_names
free_vars = set()
for node in block.nodes():
inp_names = _get_input_names(node)
list_diff = [name for name in inp_names if name not in bound_names]
free_vars.update(list_diff)
bound_names += _get_output_names(node)
return free_vars
def get_use_chains(root_node, terminate=lambda _: False):
"""
Track a chain of users of this node forward, returning a list of chains
See get_attr_chains below for its usage
"""
def concat_lists(lists):
return itertools.chain.from_iterable(lists)
def inner(current, accum):
users = _get_users(current)
if not users or terminate(users):
return [accum]
return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])
return inner(root_node, [root_node])
def get_attr_chains(root_getattr_node):
"""Returns chains of attribute access starting from root_getattr_node
For example, given attribute "block", as in "self.block" when "self" points
to the top level torch.nn.Module, it returns lists of attribute "chains",
e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']
These sets of attributes form full attribute accessors. For example,
"self.block.1", "self.block.2" will return the second and third submodule,
and "self.block.0._packed_params" will return the parameters of the first
submodule.
"""
def terminate(users):
next_attrs = [user for user in users if user.kind() == "prim::GetAttr"]
return len(next_attrs) == 0
return get_use_chains(root_getattr_node, terminate)
def convert_params(graph, state_dict, use_parser_friendly_name=False):
"""
Return Relay vars and TVM NDArrays for input parameters
A chain of prim::GetAttr nodes is processed one at a time
"""
getattr_nodes = graph.findAllNodes("prim::GetAttr", recurse=True)
params = {}
param_tensors = {}
packed_param_map = {}
vars_by_name = {}
seen = set()
attr_name_sep = "_" if use_parser_friendly_name else "."
for node in getattr_nodes:
if _get_output_name(node) in seen:
continue
for getattrs in get_attr_chains(node):
seen.update(map(_get_output_name, getattrs))
full_attr = _getattr_full_name(getattrs, attr_name_sep)
full_attr_node_name = _get_output_name(getattrs[-1])
if full_attr.endswith("_packed_params"): # for quantized models
packed_param_map[full_attr_node_name] = full_attr
elif full_attr in state_dict:
if full_attr in vars_by_name:
var = vars_by_name[full_attr]
else:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor, full_attr)
param_tensors[full_attr] = tensor
vars_by_name[full_attr] = var
params[full_attr_node_name] = var
return params, param_tensors, packed_param_map
def get_all_op_names(graph):
"""Return all operator names in the input graph"""
nodes = list(graph.nodes())
prim_with_blocks = ["prim::If", "prim::Loop"]
for prim in prim_with_blocks:
prim_nodes = graph.findAllNodes(prim, recurse=True)
for prim_node in prim_nodes:
for block in prim_node.blocks():
nodes += block.nodes()
return set(node.kind() for node in nodes)
def from_pytorch(
script_module,
input_infos,
custom_convert_map=None,
default_dtype="float32",
use_parser_friendly_name=False,
keep_quantized_weight=False,
):
"""Load PyTorch model in the form of a scripted PyTorch model and convert into relay.
The companion parameters will be handled automatically.
Parameters
----------
script_module : TopLevelTracedModule object
TorchScripted PyTorch graph
Note: We currently only support traces (ie: torch.jit.trace(model, input))
input_infos : List of tuples
Can be (input name, input shape) or (input name, (input shape, input types))
Graph level input shape and type list
The same input names need to be used for deployment, so choose easy to
remember names (such as: input0, input1)
e.g.
[('input0', (1, 2)), ('input1', (3, 4))]
or
[('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]
custom_convert_map : Dictionary of str to Relay op
A custom op conversion map in the same format as _convert_map above
default_type : str
The default dtype to use when type information is not provided by PyTorch.
use_parser_friendly_name : bool
When True, replace '.' with `_' in a original parameter name.
The Relay text parser treats a variable name followed by a period as a tuple element access,
so a variable name like "dense.weight" cannot be parsed correctly.
Use this option when you want to run the AnnotateSpans pass on the imported module.
keep_quantized_weight : bool
Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights
in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use
a PyTorch function to unpack quantized weights into float32 arrays and quantization
parameters. By default, we return float32 weights and rely on the QNN lowering and the
Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,
we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,
we quantize weights in the frontend using a function that is equivalent to
qnn.op.quantize(...) operating on Numpy arrays.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.runtime.NDArray
Dict of converted parameters stored in tvm.runtime.ndarray format
"""
import torch
mod = tvm.IRModule()
prelude = Prelude(mod)
enable_lower_all_tuples = True
converter = PyTorchOpConverter(prelude, default_dtype)
graph = script_module.graph.copy()
# Check if lower_all_tuples pass can be enabled
graph_inputs = list(graph.inputs())
for inp in graph_inputs:
if inp.type().kind() == "TupleType" or inp.type().kind() == "ListType":
enable_lower_all_tuples = False
break
_run_jit_passes(graph, enable_lower_all_tuples)
if custom_convert_map:
converter.update_convert_map(custom_convert_map)
op_names = get_all_op_names(graph)
converter.report_missing_conversion(op_names)
is_module = isinstance(script_module, torch.jit.ScriptModule)
params = script_module.state_dict() if is_module else {}
outputs = _get_relay_input_vars(
graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module
)
if use_parser_friendly_name:
new_names = [key.replace(".", "_") for key in params.keys()]
params = dict(zip(new_names, params.values()))
param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name)
tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}
outputs.update(param_vars)
ret_name = _get_input_names(graph.return_node())
# For quantized models
quantized_ops = set(["aten::quantize_per_tensor", "quantized::linear_dynamic"])
if len(quantized_ops.intersection(set(op_names))) > 0:
weight_quant_params = qnn_torch.get_weight_quant_params(
script_module, packed_param_map.values()
)
qnn_torch.inline_input_quant_params_for_fx(graph, tensors)
input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)
qnn_torch.add_quant_params_to_outputs(
outputs,
packed_param_map,
weight_quant_params,
input_scales_for_bias,
keep_quantized_weight,
)
qnn_torch.add_quant_params(tvm_params, weight_quant_params)
converter.update_convert_map(qnn_torch.convert_map)
outputs = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)
# ListConstruct kept original python list. Convert to tuple.
outputs = [_expr.Tuple(output) if isinstance(output, list) else output for output in outputs]
if len(outputs) > 1:
ret = _expr.Tuple(outputs)
else:
ret = outputs[0]
# Separate data inputs and parameters to make sure data inputs come first.
func_args = []
data_inputs = []
for arg in _analysis.free_vars(ret):
if arg.name_hint not in tvm_params.keys():
data_inputs.append(arg)
else:
func_args.append(arg)
func_args = data_inputs + func_args
mod["main"] = tvm.relay.Function(func_args, ret)
return transform.RemoveUnusedFunctions()(mod), tvm_params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/pytorch_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel, unused-argument, invalid-name
""" Common utilities used by PyTorch frontend """
from .. import expr
from .. import op
from ..dataflow_pattern import (
wildcard,
is_constant,
is_op,
rewrite,
is_tuple,
is_tuple_get_item,
is_if,
DFPatternCallback,
)
def is_version_greater_than(ver):
"""
Returns True if the local PyTorch version is greater
than the one given as an argument.
"""
import torch
from distutils.version import LooseVersion
torch_ver = torch.__version__
# PT version numbers can include +cu[cuda version code]
# and we don't want to include that in the comparison
if "+cu" in torch_ver:
torch_ver = torch_ver.split("+cu")[0]
return LooseVersion(torch_ver) > ver
def getattr_attr_name(node):
attribute_names = node.attributeNames()
assert len(attribute_names) == 1
return node.s(attribute_names[0])
def dyn_strided_slice_pattern(inp, end):
"""A pattern to detect dynamic strided slice op."""
zero = is_constant()
cast_like = is_op("cast_like")(zero, is_constant())
less = is_op("less")(is_constant(), cast_like)
shape_of = is_op("shape_of")(inp)
cast_like = is_op("cast_like")(shape_of, is_constant())
add = is_op("add")(is_constant(), cast_like)
where = is_op("where")(less, add, is_constant())
return is_op("dyn.strided_slice")(inp, where, end, is_constant())
def batched_nms_pattern(boxes, scores, idxs, iou_threshold, num_boxes, indices):
"""A pattern to detect batched_nms function in torchvision
The inputs to this function, boxes, scores, idxs, iou_threshold are wildcard
patterns which can be used later in the rewriting to extract matched Relay fragments.
We want to detect the following PyTorch code snippet:
def batched_nms(boxes, scores, idxs, iou_threshold):
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep
Here is how PyTorch frontend lowers above PyTorch code. For simplicity, Relay ops for
dealing with dynamic strided_slice are omitted. %num_boxes, %indices are complex
expressions, but since we can use the wildcard part for them, we do not need to construct
their patterns.
%2 = expand_dims(%scores, axis=-1);
%3 = cast(%idxs, dtype="float32");
%4 = max(%boxes);
%5 = add(%4, 1f);
%6 = multiply(%3, %5);
%7 = strided_slice(%6, begin=[0], end=[4507], strides=[1]);
%8 = expand_dims(%7, axis=1);
%9 = add(%boxes, %8);
%10 = (%2, %9);
%11 = concatenate(%10, axis=-1);
%12 = expand_dims(%11, axis=0);
...
...
%17 = vision.non_max_suppression(%12, %num_boxes, %indices, -1, 0.7f, ...);
"""
one = is_constant()
# Equivalent PyTorch code from above snippet
# offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
cast = is_op("cast")(idxs)
mx = is_op("max")(boxes)
add = is_op("add")(mx, one)
mul = is_op("multiply")(cast, add)
shape_of = is_op("shape_of")(mul)
cast = is_op("cast")(shape_of)
# Add offsets to the boxes
expand_dims = is_op("expand_dims")(mul)
add = is_op("add")(boxes, expand_dims)
# The rest of patterns correspond to the PyTorch frontend conversion
# function for torchvision::nms
score_expand_dims = is_op("expand_dims")(scores)
tup = is_tuple([score_expand_dims, add])
concat = is_op("concatenate")(tup)
data = is_op("expand_dims")(concat)
return is_op("vision.non_max_suppression")(
data, num_boxes, indices, is_constant(), iou_threshold
)
def topk_after_batch_nms_pattern(cond, true_branch, data, valid_count, indices, iou_threshold):
"""
Detect the following pattern used in torchvision detection models.
def batched_nms(...):
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
else:
...
return nms(boxes_for_nms, scores, iou_threshold)
keep = batched_nms(boxes, scores, lvl, self.nms_thresh)
keep = keep[:post_nms_top_k] # keep only topk scoring predictions
An equivalent Relay subgraph:
%1184 = if (%1117) {
...
} else {
...
%1172 = vision.non_max_suppression(%1167, %1168, %1171, -1, 0.7f, ...);
...
%1183 = dyn.strided_slice(%1174, %1180, %1182, ...);
cast(%1183, dtype="int64")
};
%1185 = strided_slice(%1184, begin=[0], end=[1000], strides=[1]);
"""
nms = is_op("vision.non_max_suppression")(
data, valid_count, indices, is_constant(), iou_threshold
)
indices = is_op("squeeze")(is_tuple_get_item(nms, 0))
size = is_op("squeeze")(is_tuple_get_item(nms, 1))
dyn_strided_slice = dyn_strided_slice_pattern(indices, size)
cast_i64 = is_op("cast")(dyn_strided_slice)
batched_nms_result = is_if(cond, true_branch, cast_i64)
return is_op("strided_slice")(batched_nms_result)
class MulticlassNMSRewrite(DFPatternCallback):
"""A callback to rewrite nms and restore batched nms."""
def __init__(self):
super().__init__()
# exprs to extract
self.boxes = wildcard()
self.scores = wildcard()
self.idxs = wildcard()
self.iou_threshold = wildcard()
self.num_boxes = wildcard()
self.indices = wildcard()
self.pattern = batched_nms_pattern(
self.boxes,
self.scores,
self.idxs,
self.iou_threshold,
self.num_boxes,
self.indices,
)
def convert_batched_nms(self, boxes, scores, idxs, iou_thres, num_boxes, indices):
"""Restore class-aware NMS using extracted class indices"""
scores = op.expand_dims(scores, axis=-1, num_newaxis=1)
idxs = op.expand_dims(idxs, axis=-1, num_newaxis=1)
idxs = op.cast(idxs, "float32")
data = op.concatenate([idxs, scores, boxes], -1)
data = op.expand_dims(data, 0, 1)
top_k = max_out_size = -1
out = op.vision.non_max_suppression(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_thres,
force_suppress=False,
top_k=top_k,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
)
return out.tuple_value
def callback(self, pre, post, node_map):
boxes = node_map[self.boxes][0]
scores = node_map[self.scores][0]
idxs = node_map[self.idxs][0]
iou_thres = node_map[self.iou_threshold][0]
num_boxes = node_map[self.num_boxes][0]
indices = node_map[self.indices][0]
return self.convert_batched_nms(boxes, scores, idxs, iou_thres, num_boxes, indices)
class PostNMSTopKRewrite(DFPatternCallback):
"""A callback to rewrite nms to exploit max_out_size parameter."""
def __init__(self):
super().__init__()
self.cond = wildcard()
self.true_branch = wildcard()
self.data = wildcard()
self.valid_count = wildcard()
self.indices = wildcard()
self.iou_threshold = wildcard()
self.pattern = topk_after_batch_nms_pattern(
self.cond,
self.true_branch,
self.data,
self.valid_count,
self.indices,
self.iou_threshold,
)
def rewrite_batch_nms_with_max_out_size(
self, cond, true_branch, data, valid_count, indices, iou_threshold, post_nms_topk
):
"""Use the detected post NMS topk parameter in NMS op."""
nms_ret = op.vision.non_max_suppression(
data=data,
valid_count=valid_count,
indices=indices,
max_output_size=post_nms_topk,
iou_threshold=iou_threshold,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
)
size = op.squeeze(nms_ret[1], axis=[1])
data_slice = op.squeeze(nms_ret[0], axis=[0])
ret = op.strided_slice(data_slice, begin=expr.const([0]), end=size, slice_mode="size")
nms_result = op.cast(ret, "int64")
return expr.If(cond, true_branch, nms_result)
def callback(self, pre, post, node_map):
post_nms_topk = post.attrs.end[0].value
return self.rewrite_batch_nms_with_max_out_size(
node_map[self.cond][0],
node_map[self.true_branch][0],
node_map[self.data][0],
node_map[self.valid_count][0],
node_map[self.indices][0],
node_map[self.iou_threshold][0],
post_nms_topk,
)
def scatter_roi_align_result_pattern(levels, roi_align_results, num_scales):
"""Detect the Relay subgraph corresponding to the following PyTorch code
first_result = roi_align_results[0]
dtype, device = first_result.dtype, first_result.device
res = torch.zeros((levels.size(0), first_result.size(1),
first_result.size(2), first_result.size(3)),
dtype=dtype, device=device)
for level in range(len(roi_align_results)):
index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
index = index.expand(index.size(0),
roi_align_results[level].size(1),
roi_align_results[level].size(2),
roi_align_results[level].size(3))
res = res.scatter(0, index, roi_align_results[level])
return res
"""
def do_where(levels, _):
idx_in_level = is_op("argwhere")(is_op("equal")(levels, is_constant()))
idx_in_level = is_op("split")(idx_in_level)
idx_in_level = is_tuple_get_item(idx_in_level, 0)
idx_in_level = is_op("squeeze")(idx_in_level)
idx_in_level = is_tuple_get_item(is_tuple([idx_in_level]), 0)
return idx_in_level
scatter_res = wildcard()
for i in range(num_scales):
# index = torch.where(levels == level)[0].view(-1, 1, 1, 1)
scatter_indices = do_where(levels, i)
scatter_indices = is_op("reshape")(scatter_indices)
# index = index.expand(index.size(0),
# unmerged_results[level].size(1),
# unmerged_results[level].size(2),
# unmerged_results[level].size(3))
scatter_indices = is_op("repeat")(scatter_indices)
scatter_indices = is_op("repeat")(scatter_indices)
scatter_indices = is_op("repeat")(scatter_indices)
scatter_res = is_op("scatter")(scatter_res, scatter_indices, roi_align_results[i])
return is_op("reshape")(scatter_res)
class ScatterRewrite(DFPatternCallback):
"""A callback to rewrite repeated scatters with a batched gather."""
def __init__(self, num_scales):
super().__init__()
self.num_scales = num_scales
self.levels = wildcard()
self.roi_align_results = []
for _ in range(num_scales):
self.roi_align_results.append(wildcard())
self.pattern = scatter_roi_align_result_pattern(
self.levels, self.roi_align_results, num_scales
)
def convert_scatter_to_gather(self, levels, roi_align_results):
"""Replace the detected scatter loop with the following PyTorch code
indices_per_level = []
for level in range(num_scales):
idx_in_level = torch.where(levels == level)[0]
indices_per_leve.append(idx_in_level)
stacked_features = torch.cat(roi_align_results, dim=0)
stacked_indices = torch.cat(indices_per_level, dim=0)
argsort_indices = torch.argort(stacked_indices)
return stacked_features[argsort_indices, :]
"""
# Collect inidices and concat them
indices_per_level = []
for i in range(self.num_scales):
equal = op.equal(levels, expr.const(i, dtype="int64"))
argwhere = op.argwhere(equal)
split = op.split(argwhere, indices_or_sections=1, axis=1)
squeeze = op.squeeze(split[0], axis=[1])
indices = op.cast(squeeze, dtype="int64")
indices_per_level.append(indices)
indices_concat = op.concatenate(indices_per_level, 0)
# Concat roi align results per level, and argsort indices
# To prepare for a batched gather
roi_align_results_concat = op.concatenate(roi_align_results, 0)
argsort_indices = op.cast(op.argsort(indices_concat), dtype="int64")
# Permute rows by argsorted indices
permuted = op.take(roi_align_results_concat, argsort_indices, axis=0)
return op.reshape(permuted, [0, -1, 1, 1])
def callback(self, pre, post, node_map):
levels = node_map[self.levels][0]
roi_align_results = [node_map[feat][0] for feat in self.roi_align_results]
return self.convert_scatter_to_gather(levels, roi_align_results)
def rewrite_nms_to_batched_nms(mod):
"""Rewrite the input graph to replace non maximum surpression
in torchvision that does not take class id into account with the one
that avoids IOU tests between different classes.
"""
mod["main"] = rewrite(MulticlassNMSRewrite(), mod["main"])
return mod
def rewrite_batched_nms_with_max_out_size(mod):
"""Rewrite the input graph to detect slicing after batched nms and
use the slicing size as the parameter max_out_size in NMS.
"""
mod["main"] = rewrite(PostNMSTopKRewrite(), mod["main"])
return mod
def rewrite_scatter_to_gather(mod, num_scales):
"""Rewrite the input graph to replace a repeated scatter loop with
a batched gather. The scatter loop is used in torchvision MultiScaleRoIAlign
to merge roi_align results for all scales. The scatter is used to emulate
inplace updates.
"""
mod["main"] = rewrite(ScatterRewrite(num_scales), mod["main"])
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/qnn_torch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel
""" Functions to convert quantized torch models to QNN """
import numpy as np
import tvm
from tvm import relay
from tvm.relay import expr as _expr
from tvm.relay import op as _op
from tvm.relay.frontend.common import infer_shape
from .common import logger
from .pytorch_utils import is_version_greater_than, getattr_attr_name
class QNNParam(object):
"""A placeholder for weight quantization parameters"""
def __init__(self, weight, bias, scale, zero_point):
self.weight = weight
self.bias = None if bias is None else bias.detach().numpy()
self.scale = _expr.const(scale)
self.zero_point = _expr.const(zero_point, dtype="int32")
class ConvPackedParam(QNNParam):
"""A placeholder for quantized conv2d op attributes
As of PyTorch 1.6, attributes of quantized conv2d ops, like
stride, padding etc are stored in ConvPackedParams objects,
together with weights and quantization parameters
"""
def __init__(
self,
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
):
super().__init__(weight_np, bias, scale, zero_point)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
# Used only for conv_transpose2d
self.output_padding = output_padding
def _get_quant_params(qweight):
import torch
weight_np = qweight.dequantize().numpy()
if qweight.qscheme() == torch.per_tensor_affine:
return weight_np, qweight.q_scale(), int(qweight.q_zero_point())
scales = qweight.q_per_channel_scales().numpy()
zero_points = qweight.q_per_channel_zero_points().numpy()
# This is an assumption posed by QNN
msg = "The values of zero points should be all zero for per channel"
assert np.all(zero_points == 0), msg
return weight_np, scales, 0
def make_qnn_param(qweight, bias):
weight_np, scale, zero_point = _get_quant_params(qweight)
return QNNParam(weight_np, bias, scale, zero_point)
def make_conv_packed_param(qweight, bias, packed_params):
weight_np, scale, zero_point = _get_quant_params(qweight)
stride = packed_params.stride()
padding = packed_params.padding()
dilation = packed_params.dilation()
groups = packed_params.groups()
output_padding = packed_params.output_padding()
return ConvPackedParam(
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
)
def get_weight_quant_params(script_module, packed_param_names):
"""Retrieve and unpack weight parameters from quantized modules"""
import torch
param_name = "_packed_params"
quant_params = {}
def filter_func(named_module):
m = named_module[1]
return isinstance(m, torch.jit.RecursiveScriptModule) and (
("Conv" in m.original_name) or (m.original_name == "LinearPackedParams")
)
for name, m in filter(filter_func, script_module.named_modules()):
key = name + "." + param_name
state_dict = m.state_dict()
if key not in packed_param_names:
continue
if len(state_dict) == 0 and not hasattr(m, param_name):
# for v1.6 and above
# This case seems to happen if a model is serialized
# and loaded back
# This module can be safely ignored
continue
if len(state_dict) == 0 and hasattr(m, param_name):
# for v1.6 and above
packed_params = m._packed_params
else:
assert len(state_dict) == 1
packed_params = list(state_dict.values())[0]
if "Conv" in m.original_name and len(state_dict) == 0:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_conv_packed_param(qweight, bias, packed_params)
elif "Conv" in m.original_name:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
elif m.original_name == "LinearPackedParams":
qweight, bias = torch.ops.quantized.linear_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
return quant_params
def quantize_numpy(weight, scale, zero_point, out_dtype_np):
iinfo = np.iinfo(out_dtype_np)
clip_min = iinfo.min
clip_max = iinfo.max
if len(scale.shape) > 0:
scale = np.reshape(scale, [weight.shape[0]] + [1] * (len(weight.shape) - 1))
transformed = zero_point + weight / scale
return np.clip(np.round(transformed), clip_min, clip_max).astype(out_dtype_np)
def add_quant_params_to_outputs(
outputs, packed_param_map, quant_params, input_scales_for_bias, keep_quantized_weight=False
):
"""
Add quant params to outputs so that they can be referenced by other
ops later. Weights are quantized here.
"""
for node_name, packed_param_name in packed_param_map.items():
qparam = quant_params[packed_param_name]
weight_scale = _get_numpy(qparam.scale)
param_prefix = packed_param_name[: -len("._packed_params")]
if keep_quantized_weight:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="int8"
)
qparam.weight = quantize_numpy(
qparam.weight, weight_scale, _get_numpy(qparam.zero_point), np.int8
)
qweight = qparam.weight_var
else:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="float32"
)
qweight = relay.qnn.op.quantize(
qparam.weight_var, qparam.scale, qparam.zero_point, out_dtype="int8", axis=0
)
if qparam.bias is not None:
float_bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="float32"
)
if node_name not in input_scales_for_bias:
# This case is for dynamic quantization, where the input activation scale is
# unknown until runtime.
qparam.bias_var = float_bias_var
qbias = qparam.bias_var
elif keep_quantized_weight:
qparam.bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="int32"
)
qparam.bias = quantize_numpy(
qparam.bias, input_scales_for_bias[node_name] * weight_scale, 0, np.int32
)
qbias = qparam.bias_var
else:
qparam.bias_var = float_bias_var
qbias = relay.qnn.op.quantize(
qparam.bias_var,
_expr.const(input_scales_for_bias[node_name] * weight_scale),
_expr.const(0, "int32"),
out_dtype="int32",
axis=0,
)
else:
qbias = None
quant_params[packed_param_name] = qparam
params = [qweight, qparam.scale, qparam.zero_point, qbias]
if isinstance(quant_params[packed_param_name], ConvPackedParam):
params += [
qparam.stride,
qparam.padding,
qparam.dilation,
qparam.groups,
qparam.output_padding,
]
outputs[node_name] = params
def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embedded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
"""
# Indices for output scale and zp
# For example, in quantized::conv2d(%input, %1, %2, %3, %4, %5, %6, %7),
# 6th and 7th arg are output scale and zp respectively.
# PyTorch 1.6 changed qconv API
if is_version_greater_than("1.5.1"):
qconv_indices = (2, 3)
else:
qconv_indices = (6, 7)
output_quant_param_indices = {
"aten::quantize_per_tensor": (1, 2),
"quantized::conv2d": qconv_indices,
"quantized::conv2d_relu": qconv_indices,
"quantized::linear": (2, 3),
"quantized::linear_relu": (2, 3),
"quantized::add_relu": (2, 3),
"quantized::add": (2, 3),
"quantized::mul_relu": (2, 3),
"quantized::mul": (2, 3),
"quantized::cat": (2, 3),
"quantized::mul_scalar": (2, 3),
"quantized::add_scalar": (2, 3),
"quantized::hardswish": (1, 2),
"quantized::conv_transpose2d": qconv_indices,
"quantized::leaky_relu": (3, 4),
"aten::sigmoid": (1, 2),
}
def dfs(current_node):
# trace back to find the producer of this input value
current_op = current_node.kind()
if current_op in output_quant_param_indices:
indices = output_quant_param_indices[current_op]
scale = current_node.inputsAt(indices[0])
zp = current_node.inputsAt(indices[1])
return scale, zp
# Trace back eariler nodes, dfs order
# Assume quantized tensor comes earlier in the args
for arg in current_node.inputs():
return dfs(arg.node())
# If input_value is not quantized, we reach here.
return None, None
return dfs(input_value.node())
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::add_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
s = input_scale
z = input_zero_point
c = scalar
c_q = round(c / s)
if q_min > z - c_q:
s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s
z_prime = q_min
elif q_max < z - c_q:
s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s
z_prime = q_max
else:
s_prime = s
z_prime = z - c_q
return s_prime, z_prime
def _get_mul_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::mul_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
self_scale = input_scale
self_zero_point = input_zero_point
other_val = scalar
if other_val > 0.0:
s_prime = other_val * self_scale
z_prime = self_zero_point
elif other_val == 0.0:
s_prime = 1.0
z_prime = 0
else:
s_prime = abs(other_val) * self_scale
z_prime = q_max - (self_zero_point - q_min)
return s_prime, z_prime
def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_point, scalar):
"""
The output scale and zp of {add,mul}_scalar op are not explicit in the IR
They are required for _get_quant_param_for_input above to work correctly
So calculate these params using the same way torch does, and make new
constant nodes in the input IR. Also add these params to the inputs of
scalar op.
For example,
%6 : float = prim::Constant[value=3.]()
%input : QUInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6)
becomes
%6 : float = prim::Constant[value=3.]()
%7 : float = prim::Constant[value=0.015686161816120148]()
%8 : int = prim::Constant[value=0]()
%input : UInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6, %7, %8)
%7 and %8 are newly created output scale and zp constant nodes
"""
# pylint: disable=c-extension-no-member
import torch
operator = node.kind()
if operator == "quantized::mul_scalar":
out_scale, out_zero_point = _get_mul_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
elif operator == "quantized::add_scalar":
out_scale, out_zero_point = _get_add_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
else:
raise NotImplementedError("unsupported scalar op: %s" % operator)
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def _add_output_quant_params_to_sigmoid_op(node, graph):
"""
Refer to aten/src/ATen/native/quantized/cpu/qsigmoid.cpp,
the output scale and zp of sigmoid op are two fixed numbers.
So we need to make two new constant nodes in the input IR and
add these params to the inputs of sigmoid op.
"""
# pylint: disable=c-extension-no-member
import torch
# suppose scale_type is uint8
out_scale = 1.0 / 256
out_zero_point = 0
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def add_input_quant_params_to_op_inputs(graph):
"""
In Torch, input quant params are not explicitly passed around
Instead, they are stored in QTensor data structure, and retrieved
at runtime by each quantized ops.
However, they need to be known statically for QNN translation.
To workaround and simplify the translation of inputs, we manually add
input quant params to inputs of Torch quantized operators listed below.
See _quantized_conv2d() below for example of why this is helpful.
For example,
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435)
becomes
%395 : float = prim::Constant[value=0.036212071776390076]()
%396 : int = prim::Constant[value=0]()
%430 : float = prim::Constant[value=0.16080744564533234]()
%431 : int = prim::Constant[value=42]()
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435,
%430, %431, %395, %396)
%434, %435 are output scale and zp of quantized::add op
%430, %431, %395, %396 are two pairs of input (scale, zp) for two tensors
added by this function
"""
# How many quantized tensors each op takes as inputs?
# A pair of (scale, zp) for each input quantized tensor will be added
# to the input nodes
num_quantized_inputs = {
"quantized::conv2d": 1,
"quantized::conv2d_relu": 1,
"quantized::linear": 1,
"quantized::linear_relu": 1,
"quantized::add_relu": 2,
"quantized::add": 2,
"quantized::mul_relu": 2,
"quantized::mul": 2,
"aten::dequantize": 1,
"aten::mean": 1,
"aten::sigmoid": 1,
"aten::upsample_nearest2d": 1,
"aten::upsample_bilinear2d": 1,
"aten::relu_": 1,
"aten::relu": 1,
"quantized::add_scalar": 1,
"quantized::mul_scalar": 1,
"quantized::relu6": 1,
"quantized::hardswish": 1,
"aten::hardsigmoid": 1,
"quantized::conv_transpose2d": 1,
"quantized::leaky_relu": 1,
}
need_input_quant_param = set(num_quantized_inputs.keys())
need_input_quant_param.add("quantized::cat")
input_scales_for_bias = {}
for node in graph.nodes():
operator = node.kind()
if operator not in need_input_quant_param:
continue
input_scales = []
input_zero_points = []
if operator == "quantized::cat":
# the number of inputs to concat is not constant
# so handle it separately
inputs = node.inputsAt(0).node().inputs()
for inp in inputs:
scale, zp = _get_quant_param_for_input(inp)
input_scales.append(scale)
input_zero_points.append(zp)
else:
for i in range(num_quantized_inputs[operator]):
scale, zp = _get_quant_param_for_input(node.inputsAt(i))
if scale is not None and zp is not None:
input_scales.append(scale)
input_zero_points.append(zp)
if operator in ["quantized::add_scalar", "quantized::mul_scalar"]:
scalar = node.inputsAt(1).node().f("value")
inp_scale = input_scales[0].node().f("value")
inp_zero_point = input_zero_points[0].node().i("value")
# see the comments in this function above
_add_output_quant_params_to_scalar_op(node, graph, inp_scale, inp_zero_point, scalar)
if operator == "aten::sigmoid":
_add_output_quant_params_to_sigmoid_op(node, graph)
for scale, zp in zip(input_scales, input_zero_points):
node.addInput(scale)
node.addInput(zp)
if "quantized::conv" in operator or "quantized::linear" in operator:
# This is required for quantizing the bias
assert len(input_scales) == 1, "One quantized parameter expected for qconv or qlinear."
input_scales_for_bias[node.inputsAt(1).debugName()] = input_scales[0].node().f("value")
return input_scales_for_bias
def add_quant_params(params, quant_params):
"""Add quant parameters to TVM param map"""
for qparam in quant_params.values():
params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight)
if qparam.bias is not None:
params[qparam.bias_var.name_hint] = tvm.nd.array(qparam.bias)
def inline_input_quant_params_for_fx(graph, params):
"""
Canonicalize input scale and zero point access for FX-quantized graphs.
We expect input qparams to aten::quantize_per_tensor to be prim::Constant, but that's
not the case for FX-based quantized models as shown below.
We replace prim::GetAttr with prim::Constant so that FX-based quantized models can be
converted in the same way as eager-mode based quantized models.
Before:
%pan_input_zero_point_1 : Tensor = prim::GetAttr[name="pan_input_zero_point_1"](%backbone)
%pan_input_scale_1 : Tensor = prim::GetAttr[name="pan_input_scale_1"](%backbone)
...
%quantize_per_tensor_2 ... = aten::quantize_per_tensor(...,
%pan_input_scale_1, %pan_input_zero_point_1, ...)
After:
%2402 : int = prim::Constant[value=0]()
%2403 : float = prim::Constant[value=1.]()
%quantize_per_tensor_2 ... = aten::quantize_per_tensor(..., %2403, %2402, ...)
"""
# pylint: disable=c-extension-no-member
import torch
def get_full_attr_name(current):
current_attr = getattr_attr_name(current)
inputs = list(current.inputs())
if len(inputs) == 1 and inputs[0].node().kind() == "prim::GetAttr":
return get_full_attr_name(inputs[0].node()) + "." + current_attr
return current_attr
for node in graph.findAllNodes("prim::GetAttr", recurse=True):
out_name = node.output().debugName()
if "_scale" in out_name or "_zero_point" in out_name:
full_attr = get_full_attr_name(node)
assert full_attr in params, "%s not found in param dict." % full_attr
param_np = params[full_attr].numpy()
new_const_node = graph.create("prim::Constant")
new_const_node.insertBefore(node)
if "_scale" in out_name:
new_const_node.f_("value", param_np)
new_const_node.output().setType(torch._C.FloatType.get())
else:
new_const_node.i_("value", param_np.item())
new_const_node.output().setType(torch._C.IntType.get())
node.replaceAllUsesWith(new_const_node)
def apply_with_upcast(data, func):
inp = _op.cast(data, dtype="int32")
out = func(inp)
return _op.cast(out, "uint8")
def apply_with_fp32_fallback(data, input_scale, input_zero_point, func_fp32):
dequantized = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(dequantized)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_relu(data, input_zero_point):
# refer to aten/src/ATen/native/quantized/cpu/qrelu.cpp
zp = _op.cast(input_zero_point, dtype="uint8")
return _op.tensor.maximum(data, zp)
def quantized_sigmoid(inputs):
data = inputs[0]
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
return relay.qnn.op.sigmoid(
data, input_scale, input_zero_point, output_scale, output_zero_point
)
def _quantize_per_tensor():
def _impl(inputs, _):
dim = len(infer_shape(inputs[0]))
if dim > 1:
axis = 1
else:
axis = 0
return relay.qnn.op.quantize(
inputs[0], _expr.const(inputs[1]), _expr.const(inputs[2]), out_dtype="uint8", axis=axis
)
return _impl
def _dequantize():
def _impl(inputs, _):
assert len(inputs) == 3, "Input quant params not found in op inputs"
inp_scale = _expr.const(inputs[1])
inp_zero_point = _expr.const(inputs[2])
return relay.qnn.op.dequantize(inputs[0], inp_scale, inp_zero_point)
return _impl
def _get_numpy(relay_const_scalar):
return relay_const_scalar.data.numpy()
def _get_scalar(relay_const_scalar):
return _get_numpy(relay_const_scalar).item(0)
def _do_bias_and_requantize(
output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
):
"""Output processing for conv and linear"""
# this is a vector for per channel case
requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale))
# Torch does bias add and requanize scale in fp32
# refer to third_party/fbgemm/include/fbgemm/OutputProcessing-inl.h
# Instead, we do bias add in int32 and use qnn requantize, which needs
# integer input.
# We observed no loss in accuracy in doing this way, and it is better
# for tvm because bias quantization can be done at compile time
# Instead, the torch way requires rounding of activation at runtime
if bias is not None:
requantize_input = _op.nn.bias_add(output, bias)
else:
requantize_input = output
requantized = relay.qnn.op.requantize(
requantize_input,
requant_input_scale,
relay.const(0, "int32"),
output_scale,
output_zero_point,
out_dtype="int32",
axis=1,
)
clip_min = 0
if with_relu:
clip_min = _get_scalar(output_zero_point)
clip = _op.tensor.clip(requantized, clip_min, 255.0)
return _op.cast(clip, dtype="uint8")
def _quantized_conv2d(with_relu=False):
def _impl(inputs, _):
# refer to src/ATen/native/quantized/cpu/qconv.cpp
# inputs[0]: input tensor
# inputs[1]: (weight, scale, zero_point, bias)
# inputs[2-5]: stride, padding, dilation, groups
# inputs[6]: output_scale
# inputs[7]: output_zero_point
# inputs[8]: input_scale (added manually by frontend)
# inputs[9]: input_zero_point (added manually by frontend)
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
if len(conv_params) > 4:
# Torch 1.6 or newer case
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
else:
strides = inputs[2]
padding = inputs[3]
dilation = inputs[4]
groups = inputs[5]
output_scale = _expr.const(inputs[6])
output_zero_point = _expr.const(inputs[7])
assert len(inputs) == 10, "Input quant params not found in op inputs"
input_scale = _expr.const(inputs[8])
input_zero_point = _expr.const(inputs[9])
weight_shape = infer_shape(weight)
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[0]
if padding[0] != 0 or padding[1] != 0:
pad_val = _get_scalar(input_zero_point)
inp = _op.nn.pad(
inputs[0],
pad_width=((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])),
pad_value=float(pad_val),
)
else:
inp = inputs[0]
# padding is (0, 0) because we did explicit pad op with
# pad value being zero point above
conv_out = relay.qnn.op.conv2d(
inp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=(0, 0),
groups=groups,
channels=out_channels,
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _linear(with_relu=False):
# similar to conv
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = infer_shape(weight)
dense = relay.qnn.op.dense(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=weight_shape[0],
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
dense, bias_var, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _binop(relay_op, with_relu=False, fp32_piggy_back=False):
def qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
qnn_out = relay_op(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
if with_relu:
clip_min = _get_scalar(output_zero_point)
return _op.tensor.clip(qnn_out, clip_min, 255)
return qnn_out
# refer to aten/src/ATen/native/quantized/cpu/{qadd, qmul}.cpp
# they piggy backs to fp32 math by dequantize -> fp32 math -> quantize
def torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
if isinstance(lhs, _expr.Call) and lhs.op.name == "qnn.quantize":
lhs = lhs.args[0]
else:
lhs = relay.qnn.op.dequantize(lhs, input_scale_lhs, input_zero_point_lhs)
if isinstance(rhs, _expr.Call) and rhs.op.name == "qnn.quantize":
rhs = rhs.args[0]
else:
rhs = relay.qnn.op.dequantize(rhs, input_scale_rhs, input_zero_point_rhs)
fp32_out = relay_op(lhs, rhs)
if with_relu:
fp32_out = _op.nn.relu(fp32_out)
return relay.qnn.op.quantize(
fp32_out, output_scale, output_zero_point, axis=-1, out_dtype="uint8"
)
def _impl(inputs, _):
lhs = inputs[0]
rhs = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 8, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale_lhs = _expr.const(inputs[4])
input_zero_point_lhs = _expr.const(inputs[5])
input_scale_rhs = _expr.const(inputs[6])
input_zero_point_rhs = _expr.const(inputs[7])
if fp32_piggy_back:
logger.info("Piggy backing to FP32 op (PyTorch way)")
return torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return _impl
def _cat(fp32_piggy_back=False):
# refer to aten/src/ATen/native/quantized/cpu/qconcat.cpp
# for concat they also piggy backs to fp32(!)
# dequantize -> fp32 math -> quantize
def torch_impl(inputs, input_scales, input_zero_points, output_scale, output_zero_point, axis):
dequantized = []
for inp, inp_scale, inp_zp in zip(inputs, input_scales, input_zero_points):
dequantized.append(relay.qnn.op.dequantize(inp, inp_scale, inp_zp))
concat = _op.tensor.concatenate(dequantized, axis=axis)
return relay.qnn.op.quantize(
concat, output_scale, output_zero_point, axis=axis, out_dtype="uint8"
)
def _impl(inputs, _):
axis = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
num_inputs = (len(inputs) - 4) // 2
input_scales = []
input_zero_points = []
for i in range(0, num_inputs):
input_scales.append(_expr.const(inputs[4 + i * 2]))
input_zero_points.append(_expr.const(inputs[4 + i * 2 + 1]))
if fp32_piggy_back:
return torch_impl(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return relay.qnn.op.concatenate(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return _impl
def _add_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
assert len(inputs) == 6, "Input quant params not found in op inputs"
s = inputs[4]
z = inputs[5]
c = inputs[1]
c_q = round(c / s)
q_min = 0
q_max = 255
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
out_scale = _expr.const(inputs[2])
out_zp = _expr.const(inputs[3])
if q_min > z - c_q or q_max < z - c_q:
# TODO(masahi): Replace this with integer only compute
dequant = relay.qnn.op.dequantize(inputs[0], _expr.const(s), _expr.const(z))
dequantized_add = _op.tensor.add(dequant, _expr.const(c_q * s))
return relay.qnn.op.quantize(
dequantized_add, out_scale, out_zp, axis=1, out_dtype="uint8"
)
# only scale change
return inputs[0]
return _impl
def quantize_scalar(data, scale, zero_point):
# used to quantize 6., in mobilenet v3
transformed = zero_point + data / scale
return max(0, min(round(transformed), 255))
def _relu6():
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl(inputs, _):
assert len(inputs) == 4, "Input quant params not found in op inputs"
input_scale = inputs[2]
input_zero_point = inputs[3]
six = quantize_scalar(6.0, input_scale, input_zero_point)
return _op.tensor.clip(inputs[0], input_zero_point, six)
return _impl
def _leaky_relu(fp32_piggy_back=False):
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl_fp32(inputs, _):
alpha = inputs[1]
output_scale = _expr.const(inputs[3])
output_zero_point = _expr.const(inputs[4])
input_scale = _expr.const(inputs[5])
input_zero_point = _expr.const(inputs[6])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point)
dequantized = _op.nn.leaky_relu(dequant, alpha)
return relay.qnn.op.quantize(
dequantized, output_scale, output_zero_point, out_dtype="uint8"
)
def _impl_int8(inputs, _):
alpha = inputs[1]
output_scale = _expr.const(inputs[3])
output_zero_point = _expr.const(inputs[4])
input_scale = _expr.const(inputs[5])
input_zero_point = _expr.const(inputs[6])
return relay.qnn.op.leaky_relu(
inputs[0], alpha, input_scale, input_zero_point, output_scale, output_zero_point
)
def _impl(inputs, _):
assert len(inputs) == 7, "Input quant params not found in op inputs"
if fp32_piggy_back:
return _impl_fp32(inputs, _)
return _impl_int8(inputs, _)
return _impl
def _mul_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
assert len(inputs) == 6, "Input quant params not found in op inputs"
other_val = inputs[1] # scalar
if other_val > 0.0:
# only scale change
return inputs[0]
if other_val == 0.0:
shape = infer_shape(inputs[0])
return _op.full(_expr.const(0), shape, dtype="uint8")
# negative scale case
q_min = 0
q_max = 255
bias = _expr.const(q_max + q_min, dtype="int8")
int8 = bias - _op.cast(inputs[0], "int8")
return _op.cast(int8, "uint8")
return _impl
def _hswish(fp32_piggy_back=False):
def _impl_fp32(inputs):
# refer to src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# They fallback to fp32
def relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def hardsigmoid(x):
dtype = "float32"
return relu6(x + _expr.const(3.0, dtype=dtype)) / _expr.const(6.0, dtype=dtype)
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
dequantized_hswish = dequant * hardsigmoid(dequant)
return relay.qnn.op.quantize(
dequantized_hswish, output_scale, output_zero_point, out_dtype="uint8"
)
def _impl_int8(inputs):
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
return relay.qnn.op.hardswish(
inputs[0], input_scale, input_zero_point, output_scale, output_zero_point
)
def _impl(inputs, _):
assert len(inputs) == 5, "Input quant params not found in op inputs"
if fp32_piggy_back:
return _impl_fp32(inputs)
return _impl_int8(inputs)
return _impl
def _linear_dynamic():
def _calculate_qparam(inp):
# reference ATen/native/quantized/cpu/qlinear_dynamic.cpp
# ChooseQuantizationParams function
mn = _op.min(inp)
mx = _op.max(inp)
# Ensure that the interval contains 0
mn = _op.minimum(mn, _op.const(0.0, dtype="float32"))
mx = _op.maximum(mx, _op.const(0.0, dtype="float32"))
qmax = 255
# reduce_range became True in v1.6
if is_version_greater_than("1.5.1"):
qmax = 127
scale = (mx - mn) / _expr.const(qmax, dtype="float32")
zero_point_from_min = -(mn / scale)
zero_point = _op.cast(_op.round(_op.clip(zero_point_from_min, 0.0, qmax)), "int32")
return scale, zero_point
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
inp = inputs[0]
input_scale, input_zero_point = _calculate_qparam(inp)
qinp = relay.qnn.op.quantize(inp, input_scale, input_zero_point, out_dtype="uint8")
data_shape = infer_shape(inp)
if len(data_shape) > 2:
qinp = _op.reverse_reshape(qinp, [-1, 0])
weight_shape = infer_shape(weight)
units = weight_shape[0]
dense = relay.qnn.op.dense(
qinp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=units,
)
bias_var = inputs[1][3]
dequant_scale = input_scale * weight_scale
dense_out = relay.qnn.op.dequantize(
dense, dequant_scale, input_zero_point=relay.const(0, "int32"), axis=1
)
if len(data_shape) > 2:
new_shape = list(data_shape[:-1])
new_shape.append(units)
dense_out = _op.reshape(dense_out, new_shape)
if bias_var is not None:
return dense_out + bias_var
return dense_out
return _impl
def _quantized_conv_transpose2d(with_relu=False):
def _impl(inputs, _):
# Refer to aten/src/ATen/native/quantized/cpu/qconv.cpp
# Supported in Torch 1.7 or newer
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_padding = conv_params[8]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = list(infer_shape(weight))
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[1]
conv_out = relay.qnn.op.conv2d_transpose(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=out_channels,
output_padding=output_padding,
out_dtype="int32",
kernel_layout="IOHW",
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
convert_map = {
"aten::quantize_per_tensor": _quantize_per_tensor(),
"quantized::conv2d_relu": _quantized_conv2d(with_relu=True),
"aten::dequantize": _dequantize(),
"quantized::conv2d": _quantized_conv2d(),
"quantized::add_relu": _binop(relay.qnn.op.add, with_relu=True),
"quantized::add": _binop(relay.qnn.op.add),
"quantized::mul_relu": _binop(relay.qnn.op.mul, with_relu=True),
"quantized::mul": _binop(relay.qnn.op.mul),
"quantized::linear": _linear(),
"quantized::linear_relu": _linear(with_relu=True),
"quantized::cat": _cat(),
"quantized::add_scalar": _add_scalar(),
"quantized::mul_scalar": _mul_scalar(),
"quantized::relu6": _relu6(),
"quantized::leaky_relu": _leaky_relu(),
"quantized::linear_dynamic": _linear_dynamic(),
"quantized::hardswish": _hswish(fp32_piggy_back=False),
"quantized::conv_transpose2d": _quantized_conv_transpose2d(),
}
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tensorflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
# pylint: disable=import-outside-toplevel, redefined-builtin
"""TF: Tensorflow frontend."""
import warnings
from collections import defaultdict
# Numpy support
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.relay.prelude import Prelude
from tvm.relay.transform import InferType
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from ..ty import Any
from ..expr_functor import ExprMutator, ExprVisitor
from .common import get_relay_op
from .common import infer_type as _infer_type
from .common import infer_shape as _infer_shape
from .common import infer_value as _infer_value
from .tensorflow_ops import _convert_map
from .tensorflow_ops import _need_prelude_for_shape_inference
from .tensorflow_ops import _get_more_static_shape
__all__ = ["from_tensorflow"]
# The default configurations of Relay TensorFlow frontend.
TF_DEFAULT_CONFIGS = {
# By default, TVM converts `tf.matmul` to `transpose(weight) + nn.dense`, which introduces
# unnecessary overhead in weight transpose. Change this flag to False to directly convert to
# `nn.matmul` to get rid of the overhead.
# However, please note that `nn.matmul` is in experimental so it may have some performance
# issues.
"use_dense": True,
# By default, TVM converts `tf.batch_matmul` to `transpose(weight) + nn.batch_matmul_NT`.
# Change this flag to False to directly convert to `nn.batch_matmul`.
# Note that `nn.batch_matmul` with format other than NT is in experimental, it may have some
# performance issues.
"use_nt_batch_matmul": True,
}
# compatible operators that do NOT require any conversion.
_identity_list = []
# Operators that get pruned away when the complete graph is frozen.
# These operators are not needed for inference.
_freezed_graph_pruned_op_list = [
"ReadVariableOp",
"ResourceGather",
"Variable",
"VariableV2",
"VarHandleOp",
"Assign",
"AssignVariableOp",
]
# An internal list to contain all the control flow primitives used in Tensorflow
# 1.x.
_control_flow_nodes = ["Merge", "Switch", "NextIteration", "Exit", "Enter", "LoopCond"]
# A map to record tensor array write ops and input ta/tensor indices
# Value is (index of tensor array, index of written node)
_tensor_array_write_ops = {
"TensorArrayWrite": (3, 2),
"TensorArrayScatter": (0, 2),
"TensorArraySplit": (0, 1),
}
def is_tensor_array_constuctor(tf_node):
"""Check whether is tensor array constructor node."""
is_ta = False
ta_start = "TensorArrayV"
if tf_node.op.startswith(ta_start):
is_ta = tf_node.op[len(ta_start)].isnumeric()
return is_ta
def find_parent_loop_name(node_name, while_loop_name_set):
"""Find name of direct parent while loop."""
ploop_name = ""
name_prefix = node_name.rsplit("/", 1)[0]
if name_prefix.startswith("^"):
name_prefix = name_prefix[1:]
for lname in while_loop_name_set:
if name_prefix.startswith(lname) and len(ploop_name) < len(lname):
ploop_name = lname
if len(ploop_name) == 0:
ploop_name = name_prefix
return ploop_name
def _in_while_loop(control_flow_node_map, op_name):
"""
Check if a given control flow operator is part of a while loop execution
frame. This is based on the fact that there is only one occurrence of
`LoopCond` for a loop execution frame and it is only presented in the loop
construct.
Parameters
----------
control_flow_node_map : Dict[str, Set[str]]
A dictionary contains the unique control flow execution frame name to
a set of primitive operators mapping.
op_name : str
The name of a control flow primitive.
Returns
-------
ret : bool
Return true if the operator is in a while loop execution frame,
otherwise, return false.
"""
return op_name in control_flow_node_map and "LoopCond" in control_flow_node_map[op_name]
class RewriteSubgraph(ExprMutator):
"""
A helper class to rewrite expr in while loop function to variable.
Parameters
----------
rewrite_map : Dict[expr, expr]
A dictionary contains a set of expr to var mapping.
"""
def __init__(self, rewrite_map):
ExprMutator.__init__(self)
self.rewrite_map = rewrite_map
def visit(self, expr):
if expr in self.rewrite_map:
return self.rewrite_map[expr]
return super().visit(expr)
def rewrite_subgraph(expr, rewrites):
"""Rewrite loop body."""
return RewriteSubgraph(rewrites).visit(expr)
class Branch:
"""A class contains the components that are used to build up a Relay if
node.
Parameters
----------
cond : tvm.relay.Expr
The condition of a if node.
true_branch : tvm.relay.Expr
The body of the true branch of a if expression.
false_branch: tvm.relay.Expr
The body of the false branch of a if expression.
_if : tvm.relay.Expr
An internal variable indicates where an if expression is already created
for a matched TF condition construct.
Examples
--------
The following is a cond statement written in TensorFlow:
.. code-block:: python
def vanilla_cond():
i = tf.constant(1)
j = tf.constant(4)
def f1():
return tf.multiply(1, 17)
def f2():
return tf.add(4, 23)
r = tf.cond(tf.less(i, j), f1, f2)
This condition statement should be converted into Relay in the following
form:
.. code-block:: python
fn (%Const: Tensor[(1,), int32],
%Const_1: Tensor[(1,), int32],
%cond/Mul/x: Tensor[(1,), int32],
%cond/Mul/y: Tensor[(1,), int32],
%cond/Add/x: Tensor[(1,), int32],
%cond/Add/y: Tensor[(1,), int32]) {
%0 = less(%Const, %Const_1) # ty=Tensor[(1,), bool]
%1 = min(%0)
if (%1) {
%2 = multiply(%cond/Mul/x, %cond/Mul/y)
%2
} else {
%3 = add(%cond/Add/x, %cond/Add/y)
%3
}
}
"""
def __init__(self):
self._if = None
self.cond = None
self.true_branch = None
self.false_branch = None
def _if_node(self):
"""An internal API to create a relay if node from the matched TF
condition construct.
"""
# `cond` returns a tensor that contains boolean values. We add a `min`
# operator to checks if there is any false value. If so, this condition
# doesn't not hold.
cond = tvm.relay.op.min(self.cond)
return tvm.relay.If(cond, self.true_branch, self.false_branch)
def if_node(self):
"""Create an tvm.relay.If node if it hasn't been created yet."""
if self._if is None:
self._if = self._if_node()
return self._if
class VarChecker(ExprVisitor):
"""Check whether a Variable is used in loop body.
Parameters
----------
var : relay.expr.Var
Relay Variable to be checked.
"""
def __init__(self, var):
ExprVisitor.__init__(self)
self._var = var
self.used = False
def visit(self, expr):
if self._var == expr:
self.used = True
super().visit(expr)
class Loop:
"""
A class contains the components that are used to build up a Relay
recursive call.
Parameters
----------
mod : tvm.IRModule
Module for current parsed IR.
loop_name : str
Name prefix of while loop in TensorFlow graph.
lvar2expr : dict from str to dict from Relay.expr.Var to Relay.expr
A dictionary recording all loop vars and corresponding
relay expression.
Examples
--------
The following is a vanilla loop from TensorFlow:
.. code-block:: python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
It will be converted to the following recursive call in Relay:
.. code-block:: python
fn (%while/Less/y: Tensor[(1,), int32],
%while/Add/y: Tensor[(1,), int32],
%Const: Tensor[(1,), int32]) {
%0 = fn(%loop_var0: Tensor[(1,), int32]) {
%1 = less(%loop_var0, %while/Less/y)
%2 = min(%1)
if (%2) {
%3 = add(%loop_var0, %while/Add/y)
free_var %while_loop
%4 = %while_loop(%3)
%4
} else {
%5 = (%loop_var0,)
%5
}
}
let %while_loop1 = %0
%6 = %while_loop1(%Const)
%6
}
"""
def __init__(self, mod, loop_name, lvar2expr):
self.cond = None
self.body = []
self._loop = None
self._mod = mod
self._loop_name = loop_name
self._lvar2expr = lvar2expr
self.loop_vars = []
self.aligned = False
def _while_loop(self):
"""An internal API to create a Relay recursive call for a matched TF
`while_loop` construct.
"""
bind_map = {}
wl = tvm.relay.var("while_loop")
sb = tvm.relay.scope_builder.ScopeBuilder()
lv_list = []
expr_list = []
extra_vars = []
for i, lv in enumerate(self.loop_vars):
if self._loop_name not in self._lvar2expr:
self._lvar2expr[self._loop_name] = {}
# Handle the case when loop var is not properly lifted.
# This can happen when loop var node name is set accidentally
# beginning with loop name.
if lv not in self._lvar2expr[self._loop_name]:
var_name = "{}_loop_var_{}".format(self._loop_name, i)
var_type = _infer_type(lv, self._mod).checked_type
loop_var = tvm.relay.var(var_name, type_annotation=var_type)
self._lvar2expr[self._loop_name][loop_var] = lv
bind_map[lv] = loop_var
self.loop_vars[i] = loop_var
lv = loop_var
lv_list.append(lv)
expr_list.append(self._lvar2expr[self._loop_name][lv])
if bind_map:
self.cond = rewrite_subgraph(self.cond, bind_map)
self.body = [rewrite_subgraph(b, bind_map) for b in self.body]
cond = tvm.relay.op.min(self.cond)
for lv, exp in self._lvar2expr[self._loop_name].items():
if lv not in self.loop_vars:
var_checker = VarChecker(lv)
for bd in self.body + [cond]:
var_checker.visit(bd)
if var_checker.used:
lv_list.append(lv)
expr_list.append(exp)
extra_vars.append(lv)
break
with sb.if_scope(cond):
sb.ret(wl(*list(self.body + extra_vars)))
with sb.else_scope():
sb.ret(tvm.relay.Tuple(lv_list))
loop_fn = tvm.relay.Function(lv_list, sb.get())
sb = tvm.relay.scope_builder.ScopeBuilder()
sb.let(wl, loop_fn)
loop_ret = wl(*expr_list)
sb.ret(loop_ret)
ret = sb.get()
return ret
def while_loop(self):
"""Instantiate a while loop if it has not been created yet."""
if self._loop is None:
self._loop = self._while_loop()
return self._loop
return self._loop
class GraphProto(object):
"""A helper class for handling relay graph copying from Tensorflow GraphDef.
Definition:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/graph.proto
"""
def __init__(self):
self._nodes = {}
self._tf_node_map = {}
self._params = {}
self._input_shapes = {}
self._output_shapes = {}
self._num_rnn_layer = False
self._input_shapes = {}
self._loops = {}
self._branches = {}
self._mod = IRModule({})
self._prelude = Prelude(self._mod)
self._control_flow_node_map = defaultdict(set)
self._loop_body_order = {}
self._loop_var_order = {}
self._lvar2expr = {}
self._lname_map = {}
self._sorted_cf_node_names = []
self._while_loop_name_set = set()
self._main_graph_proto = self
self._tensor_array_shapes = {}
self._tensor_array_shape_nodes = {}
def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None):
"""Construct relay nodes from tensorflow graph definition - GraphDef.
Follow the tensorflow graph definition to parse and convert it to Relay.
Some of the assumptions listed below.
-> All Placeholders are considered as graph input.
-> All Const nodes are params.
-> Last node is assumed as graph output.
-> _output_shapes : Graph should be frozen with add_shapes=True.
Or user can pass input shape dictionary optionally.
-> DecodeJpeg, ResizeBilinear: These are dummy operators.
Hence user should handle preprocessing outside.
-> CheckNumerics: No implementation as of now for this.
Just copies input to output.
Parameters
----------
graph : tensorflow graph definition object
The loaded tensorflow GraphDef
layout : target layout to be used (Optional)
NCHW only supported now to enable NHWC models on GPU.
shape : Dictionary of input dimensions (Optional)
Graph level input shape dictionary.
outputs : List of output tensor names (Optional)
if not specified then the last node is assumed as graph output.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
try:
from tensorflow.python.framework import tensor_util
except ImportError as e:
raise ImportError("Unable to import tensorflow which is required {}".format(e))
missing_operators = self._parse_import_prerequisites(graph)
control_flow_nodes = []
ta_write_nodes = []
ta_gather_nodes = []
ta_construct_nodes = []
self._in_shape = shape
self._layout = layout
self._graph = graph
if missing_operators:
freezed_ops = [op for op in missing_operators if op in _freezed_graph_pruned_op_list]
if freezed_ops:
raise Exception(
"Graph is not frozen. Provide a frozen graph. "
"Found operators {}".format(freezed_ops)
)
raise NotImplementedError(
"The following operators are not implemented: {}".format(missing_operators)
)
for node in graph.node:
node_name_prefix = node.name.rsplit("/", 1)[0]
self._control_flow_node_map[node_name_prefix].add(node.op)
self._tf_node_map[node.name] = node
# Parse output_shapes attribute
parsed_attr = self._parse_attr(node.attr)
if "_output_shapes" in parsed_attr:
self._output_shapes[node.name] = [
tensor_util.TensorShapeProtoToList(tshape)
for tshape in parsed_attr["_output_shapes"]
]
else:
self._output_shapes[node.name] = [None]
# Parse placeholder and const here since input shape info is required.
if node.op == "Placeholder" or node.op == "PlaceholderWithDefault":
# Give priority to user argument.
if shape and node.name in shape:
self._input_shapes[node.name] = list(shape[node.name])
else:
self._input_shapes[node.name] = tensor_util.TensorShapeProtoToList(
node.attr["shape"].shape
)
for idx, dim in enumerate(self._input_shapes[node.name]):
if dim < 0:
self._input_shapes[node.name][idx] = Any()
self._output_shapes[node.name] = [self._input_shapes[node.name]]
attr = self._parse_attr(node.attr)
self._nodes[node.name] = [
_expr.var(
node.name, shape=self._input_shapes[node.name], dtype=attr["dtype"].name
)
]
# Ignore user's input shape for Non placeholder
elif node.op == "Const":
tensor_value = node.attr["value"].tensor
self._input_shapes[node.name] = tensor_util.TensorShapeProtoToList(
tensor_value.tensor_shape
)
self._output_shapes[node.name] = [self._input_shapes[node.name]]
if shape and node.name in shape:
warnings.warn(
"Ignore the passed shape. Shape in graphdef "
"will be used for operator %s." % node.name
)
for key, value in node.attr.items():
self._parse_param(key, value, node.name, self._in_shape)
elif node.op in _control_flow_nodes:
# We assume that the direct parent node of Exit is a while loop block
if node.op == "Exit":
self._while_loop_name_set.add(node_name_prefix)
control_flow_nodes.append(node)
elif node.op.startswith("TensorArray"):
if is_tensor_array_constuctor(node):
ta_construct_nodes.append(node)
else:
for ta_write_name, idx in _tensor_array_write_ops.items():
if node.op.startswith(ta_write_name):
ta_write_nodes.append((node, idx))
break
if node.op.startswith("TensorArrayGather"):
ta_gather_nodes.append(node)
# Use tensor array gather to infer static tensor array shape
for gather_node in ta_gather_nodes:
input_ta_name = gather_node.input[0]
input_ta_node = self._tf_node_map[input_ta_name]
if is_tensor_array_constuctor(input_ta_node):
gather_attr = self._parse_attr(gather_node.attr)
if "element_shape" not in gather_attr:
continue
raw_elem_shape = tensor_util.TensorShapeProtoToList(gather_attr["element_shape"])
elem_shape = []
for dim in raw_elem_shape:
if dim < 0:
elem_shape.append(Any())
else:
elem_shape.append(int(dim))
self._tensor_array_shapes[input_ta_node.name] = elem_shape
# Fetch node contains static tensor array shape
for item in ta_write_nodes:
wnode = item[0]
ta_idx, inode_idx = item[1]
stack = [self._tf_node_map[wnode.input[ta_idx].split(":")[0]]]
while stack:
cnode = stack.pop(0)
if not cnode.op.startswith("TensorArray"):
for iname in cnode.input:
stack.append(self._tf_node_map[iname.split(":")[0]])
elif cnode.name != wnode.name:
if is_tensor_array_constuctor(cnode):
inode = self._tf_node_map[wnode.input[inode_idx].split(":")[0]]
tn = wnode.input[inode_idx].split(":")
output_index = int(tn[1]) if len(tn) > 1 else 0
self._tensor_array_shape_nodes[cnode.name] = (inode, wnode.op, output_index)
break
# First, parse all control flow nodes.
# Convert tf.cond to Branch and tf.while_loop to Loop.
sorted_cf_nodes = []
exit_pos_map = {}
ordered_prefix = []
# Sort control flow nodes to move all Exit nodes to the end
# of corresponding while_loop block.
for node in control_flow_nodes:
loop_name = find_parent_loop_name(node.name, self._while_loop_name_set)
if node.op == "Exit":
if loop_name not in exit_pos_map:
ordered_prefix.append(loop_name)
exit_pos_map[loop_name] = len(sorted_cf_nodes)
sorted_cf_nodes.append(node)
elif loop_name in self._while_loop_name_set:
if loop_name not in exit_pos_map:
sorted_cf_nodes.append(node)
else:
sorted_cf_nodes.insert(exit_pos_map[loop_name], node)
for j in range(ordered_prefix.index(loop_name), len(ordered_prefix)):
exit_pos_map[ordered_prefix[j]] += 1
else:
sorted_cf_nodes.append(node)
for node in sorted_cf_nodes:
self._sorted_cf_node_names.append(node.name)
for node in sorted_cf_nodes:
self._backtrack_construct(node.name)
# Second, parse other nodes to re-create TF graph using Relay operators.
for node in graph.node:
self._backtrack_construct(node.name)
out = []
if outputs is None:
last_node = graph.node[-1]
op = self._nodes[last_node.name.split(":")[0]]
if last_node.op == "Exit":
out = [op[0].tuple_value]
else:
out = op
else:
for out_name in outputs:
if ":" in out_name:
out_name, out_num = out_name.split(":")
out_num = int(out_num)
out.append(self._nodes[out_name][out_num])
else:
out.append(self._nodes[out_name][0])
if isinstance(out, _expr.TupleWrapper):
out = out.tuple_value
else:
out = out[0] if len(out) == 1 else _expr.Tuple(out)
fvars = analysis.free_vars(out)
func = _function.Function(fvars, out)
final_params = {}
for fv in fvars:
if fv.name_hint in self._params:
final_params[fv.name_hint] = self._params[fv.name_hint]
self._params = final_params
return func
def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None):
"""Wrapper to _get_relay_func which converts Tensorflow graph to Relay function
which is used as main function for the Relay module
"""
func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)
self._mod["main"] = func
return self._mod, self._params
def _parse_import_prerequisites(self, graph):
"""Calculate the named preconditions from TensorFlow `graph`.
Return prerequisites for parsing:
a. Set of operator names which don't have their mapping in TVM, i.e.
which are not supported
"""
missing_operators = set()
from tensorflow.python.framework import op_def_registry
for node in graph.node:
getOpDef = (
op_def_registry._registered_ops.get
if hasattr(op_def_registry, "_registered_ops")
else op_def_registry.get
)
op_def = getOpDef(node.op)
if node.op == "Placeholder" or node.op == "PlaceholderWithDefault":
pass
elif node.op == "Const":
pass
elif node.op in ["PartitionedCall", "StatefulPartitionedCall"]:
pass
else:
if any([node.op in t for t in [_identity_list, _convert_map, _control_flow_nodes]]):
pass
elif op_def is not None and op_def.is_stateful:
missing_operators.add(node.op)
else:
missing_operators.add(node.op)
return missing_operators
def _parse_param(self, key, value, name, shape):
try:
from tensorflow.python.framework import tensor_util
except ImportError as e:
raise ImportError("Unable to import tensorflow which is required {}".format(e))
if key == "value":
np_array = tensor_util.MakeNdarray(value.tensor)
if np_array.dtype == np.dtype(object):
# Object types are generally tensorflow DT_STRING (DecodeJpeg op).
# Just leave it as placeholder.
if shape and name in shape:
var_shape = shape[name]
else:
var_shape = tensor_util.TensorShapeProtoToList(value.tensor.tensor_shape)
self._nodes[name] = [_expr.var(name, shape=var_shape, dtype="uint8")]
return
array_ndim = len(np_array.shape)
if array_ndim == 0:
self._nodes[name] = [tvm.relay.const(np_array, np_array.dtype)]
else:
self._params[name] = tvm.nd.array(np_array)
self._nodes[name] = [
_expr.var(name, shape=self._params[name].shape, dtype=self._params[name].dtype)
]
else:
if key not in ("dtype", "_output_shapes", "_class"):
raise NotImplementedError(
"Other attributes for a Const(param) Node {} ? .".format(key)
)
def _get_attr(self, buf):
"""Returns the value of the attr of this buf with the given `name`.
Args:
buf: attrvalue protobuf.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
x = buf
ret = []
try:
from tensorflow.python.framework import dtypes
except ImportError as e:
raise ImportError("Unable to import tensorflow which is required {}".format(e))
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return ret
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
ret += [dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
ret += list(getattr(x.list, f))
else:
for f in fields:
if x.HasField(f):
if f == "type":
ret = dtypes.as_dtype(getattr(x, f))
else:
ret = getattr(x, f)
return ret
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for key, value in attr_proto.items():
attrs[key] = self._get_attr(value)
return attrs
def _convert_control_flow_operator(self, node, inputs, attrs, control_flow_node_map):
"""
Convert the Relay control flow primitive into corresponding component
of a Relay control flow construct, i.e. `tf.cond` and `tf.while_loop`
are converted in Relay `If` and recusrive call, respectively.
Parameters
----------
node: TensorFlow graph node object.
A TensorFlow graph node object.
inputs : List[tvm.relay.Expr]
List of input symbols.
attrs : Dict[tvm.Attrs]
Dict of operator attributes.
control_flow_node_map : Dict[str, Set[str]]
A dictionary contains the execution frame name to primitives
mapping.
Returns
-------
op : tvm.relay.Expr
Converted relay expression.
"""
node_name_prefix = node.name.rsplit("/", 1)[0]
plname = find_parent_loop_name(node.name, self._while_loop_name_set)
if node.op == "Merge":
if _in_while_loop(self._control_flow_node_map, node_name_prefix):
op = self._licm_construct(plname, node.input[0])
if node_name_prefix not in self._loops:
self._loops[node_name_prefix] = Loop(self._mod, plname, self._lvar2expr)
else:
if node_name_prefix not in self._branches:
switch_prefix = node_name_prefix + "/Switch"
merge_idx = self._sorted_cf_node_names.index(node.name)
for i in range(merge_idx - 1, -1, -1):
cf_name = self._sorted_cf_node_names[i]
if cf_name.startswith(switch_prefix):
self._backtrack_construct(cf_name)
break
branch = self._branches[node_name_prefix]
false_br = self._licm_construct(plname, node.input[0])
true_br = self._licm_construct(plname, node.input[1])
branch.true_branch = true_br
branch.false_branch = false_br
op = branch.if_node()
if node_name_prefix not in self._while_loop_name_set:
try:
cond_val = np.all(
_infer_value(branch.cond, self._params, self._mod).numpy()
)
if cond_val:
op = branch.true_branch
else:
op = branch.false_branch
except Exception:
op = branch.if_node()
elif node.op == "Exit":
loop = self._loops[node_name_prefix]
# Check whether the order of loop variables aligns
# with loop body. If not, create new loop variable list
# with correct order.
if not loop.aligned:
loop_vars = []
for i in self._loop_body_order[node_name_prefix]:
for j, k in enumerate(self._loop_var_order[node_name_prefix]):
if k == i:
loop_vars.append(loop.loop_vars[j])
loop.loop_vars = loop_vars
loop.aligned = True
exit_name = node.name.split("/")[-1]
if "_" in exit_name:
exit_number = int(exit_name[5:])
else:
exit_number = 0
expr = loop.while_loop()
body_pos = exit_number
for i, j in enumerate(self._loop_body_order[node_name_prefix]):
if exit_number == j:
body_pos = i
break
op = _expr.TupleGetItem(expr, body_pos)
elif node.op == "Enter":
op = self._licm_construct(plname, node.input[0])
elif node.op == "LoopCond":
op = self._licm_construct(plname, node.input[0])
self._loops[node_name_prefix].cond = op
elif node.op == "Switch":
op = self._licm_construct(plname, node.input[0])
cond = self._licm_construct(plname, node.input[1])
if _in_while_loop(self._control_flow_node_map, node_name_prefix):
if node_name_prefix not in self._loop_var_order:
self._loop_var_order[node_name_prefix] = []
if node.name.endswith("Switch"):
self._loop_var_order[node_name_prefix].append(0)
else:
self._loop_var_order[node_name_prefix].append(
int(node.name.split("Switch_")[-1])
)
self._loops[node_name_prefix].loop_vars.append(op)
else:
if node_name_prefix not in self._branches:
self._branches[node_name_prefix] = Branch()
self._branches[node_name_prefix].cond = cond
elif node.op == "NextIteration":
if node_name_prefix not in self._loop_body_order:
self._loop_body_order[node_name_prefix] = []
if node.name.endswith("NextIteration"):
self._loop_body_order[node_name_prefix].append(0)
else:
self._loop_body_order[node_name_prefix].append(
int(node.name.split("NextIteration_")[-1])
)
op = self._licm_construct(plname, node.input[0])
self._loops[node_name_prefix].body.append(op)
else:
raise Exception("Cannot identify control flow operator: " + "{}".format(node.op))
return op
def _partition_call_operator(self, inputs, attr):
"""
Convert the Relay Partition call ops into Relay Function calls and
function definitions from Tensorflow graph library attribute to Relay global
functions
Parameters
----------
node: TensorFlow graph node object.
A TensorFlow graph node object.
inputs : List[tvm.relay.Expr]
List of input symbols.
attrs : Dict[tvm.Attrs]
Dict of operator attributes.
Returns
-------
op : tvm.relay.Expr
Converted relay expression.
"""
try:
from tensorflow.python.framework import function_def_to_graph
except ImportError as e:
raise ImportError("Unable to import tensorflow which is required {}".format(e))
main_graph_proto = self._main_graph_proto
outer_graph_def = main_graph_proto._graph
node_func_name = attr.get("f").name
func = next(
(f for f in outer_graph_def.library.function if f.signature.name == node_func_name),
None,
)
if func:
devices = set(node.device for node in func.node_def)
if len(devices) > 1:
raise Exception(
"Found inconsistent Device assignment in the "
"Stateful Partitioned SubGraph. Rejecting "
"the subgraph "
)
# Convert function definition to graph
func_input_shapes = func.attr["_input_shapes"].list.shape
subgraph, _ = function_def_to_graph.function_def_to_graph_def(func, func_input_shapes)
# Computing subgraph's input shape dictionary
subgraph_shape_dict, input_expr_dict = {}, {}
for f_arg, input in zip(func.signature.input_arg, inputs):
input_expr_dict[f_arg.name] = input
subgraph_shape_dict[f_arg.name] = _infer_shape(input, main_graph_proto._mod)
func_name = "func_{}".format(func.signature.name)
try:
global_func = main_graph_proto._mod[func_name]
sub_func = global_func
sub_params = main_graph_proto._params
except ValueError:
# Construct relay nodes from the subgraph
g1 = SubGraphProto(main_graph_proto)
sub_func, sub_params = g1.from_tensorflow(subgraph, shape=subgraph_shape_dict)
main_graph_proto._params.update(sub_params)
func_expr = _function.Function(sub_func.params, sub_func.body)
global_func = tvm.relay.GlobalVar(func_name)
main_graph_proto._mod[global_func] = func_expr
main_graph_proto._mod = InferType()(main_graph_proto._mod)
param_exprs = []
for param_expr in sub_func.params:
# sub_params is subset of sub_func.params
param_name = param_expr.vid.name_hint
if param_name in input_expr_dict.keys():
param_exprs.append(input_expr_dict[param_name])
elif param_name in sub_params.keys():
param_exprs.append(param_expr)
else:
raise Exception("Input parameter {} not found".format(param_name))
sb = tvm.relay.scope_builder.ScopeBuilder()
loop_ret = global_func(*param_exprs)
sb.ret(loop_ret)
ret = sb.get()
else:
raise Exception("Function not found - {}".format(node_func_name))
return ret
def _convert_operator(
self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None
):
"""Convert from Tensorflow operator to relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Conv2D, AvgPool
inputs : list of relay.op
List of input symbols.
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to relay, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
sym : relay.op
Converted relay operator
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
if _need_prelude_for_shape_inference(op_name):
sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)
else:
sym = convert_map[op_name](inputs, attrs, self._params, self._mod)
elif op_name in ["PartitionedCall", "StatefulPartitionedCall"]:
sym = self._partition_call_operator(inputs, attrs)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
sym = self._set_span(sym, node_name)
return sym
@staticmethod
def _set_span(sym, node_name):
span = tvm.relay.Span(tvm.relay.SourceName(node_name), 0, 0, 0, 0)
if isinstance(sym, _expr.Call) and sym.span is None:
sym = _expr.Call(sym.op, sym.args, sym.attrs, sym.type_args, span)
elif isinstance(sym, _expr.TupleWrapper):
tuple_value = sym.tuple_value
if isinstance(tuple_value, _expr.Call) and tuple_value.span is None:
tuple_value = _expr.Call(
tuple_value.op, tuple_value.args, tuple_value.attrs, tuple_value.type_args, span
)
sym = _expr.TupleWrapper(tuple_value, sym.size)
return sym
def _licm_construct(self, loop_name, node_name):
"""Construct a node by considering whether it is
loop invariant with the given while loop. If yes, we
generate a loop Variable. Otherwise, return regular
converted relay expression.
Parameters
----------
loop_name : str
TensorFlow while loop name to be checked.
node_name : str
TensorFlow node name.
Returns
-------
out : relay.Expr or relay.Var
Converted relay expression or loop var.
"""
actual_expr = self._backtrack_construct(node_name)
tn = node_name.split(":")
node_name = tn[0].split("^")[-1]
cloop_name = find_parent_loop_name(node_name, self._while_loop_name_set)
if loop_name in self._while_loop_name_set and not cloop_name.startswith(loop_name):
if loop_name not in self._lvar2expr:
self._lvar2expr[loop_name] = {}
if loop_name not in self._lname_map:
self._lname_map[loop_name] = {}
if node_name not in self._lname_map[loop_name]:
var_name = "{}_loop_var".format(node_name)
var_type = _infer_type(actual_expr, self._mod).checked_type
loop_var = tvm.relay.var(var_name, type_annotation=var_type)
try:
extra_param = _infer_value(actual_expr, self._params, self._mod)
self._params[var_name] = extra_param
except Exception:
pass
self._lvar2expr[loop_name][loop_var] = actual_expr
self._lname_map[loop_name][node_name] = loop_var
ret = loop_var
else:
ret = self._lname_map[loop_name][node_name]
else:
ret = actual_expr
return ret
def _backtrack_construct(self, node_name):
"""Convert a specific tensorflow node to relay expression.
If any of its ancestor node is not converted yet, backtrack as
far as input node and covert all nodes on the path.
This is required when parsing control flow nodes, since the parsing
order may not follow the original graph def.
Parameters
----------
node_name : str
TensorFlow node name.
Returns
-------
op : relay.Expr
Converted relay expression
"""
try:
from tensorflow.python.framework import tensor_util
except ImportError as e:
raise ImportError("Unable to import tensorflow which is required {}".format(e))
input_op_name = node_name.split(":")[0].split("^")[-1]
if input_op_name not in self._nodes:
node = self._tf_node_map[input_op_name]
attr = self._parse_attr(node.attr)
if node.op in _control_flow_nodes:
attr = self._parse_attr(node.attr)
op = self._convert_control_flow_operator(
node, [], attr, self._control_flow_node_map
)
else:
attr["_output_shapes"] = self._output_shapes[input_op_name]
attr["_node_name"] = node.name
attr["_target_layout"] = self._layout
inputs = [self._backtrack_construct(iname) for iname in node.input]
plname = find_parent_loop_name(node_name, self._while_loop_name_set)
# For TensorArrayV3 op, we need to infer shape first
if is_tensor_array_constuctor(node):
raw_elem_shape = tensor_util.TensorShapeProtoToList(attr["element_shape"])
elem_shape = []
for dim in raw_elem_shape:
if dim < 0:
elem_shape.append(Any())
else:
elem_shape.append(dim)
if elem_shape:
attr["shape"] = elem_shape
if attr["identical_element_shapes"] or elem_shape:
shape_node, wnode_op, output_index = self._tensor_array_shape_nodes[
node.name
]
name = shape_node.name
if output_index > 0:
name += ":" + str(output_index)
converted = self._backtrack_construct(name)
shape = _infer_shape(converted, self._mod)
if wnode_op.startswith("TensorArraySplit"):
shape = (Any(),) + shape[1:]
elif wnode_op.startswith("TensorArrayScatter"):
shape = shape[1:]
if node.name in self._tensor_array_shapes:
preset_shape = self._tensor_array_shapes[node.name]
shape = _get_more_static_shape(shape, preset_shape)
if "shape" in attr:
attr["shape"] = _get_more_static_shape(shape, attr["shape"])
else:
attr["shape"] = shape
# LICM
if plname in self._while_loop_name_set:
for i, iname in enumerate(node.input):
actual_input = self._licm_construct(plname, iname)
inputs[i] = actual_input
op = self._convert_operator(node.op, node.name, inputs, attr)
if isinstance(op, np.ndarray):
self._params[node.name] = tvm.nd.array(op)
op = [
_expr.var(
node.name,
shape=self._params[node.name].shape,
dtype=self._params[node.name].dtype,
)
]
elif isinstance(op, (_expr.Expr, _expr.TupleGetItem)):
op = [op]
self._nodes[input_op_name] = op
out = self._nodes[input_op_name]
if isinstance(out, _expr.TupleWrapper):
tn = node_name.split(":")
tensor_slot = int(tn[1]) if len(tn) > 1 else 0
return out[tensor_slot]
return out[0]
class SubGraphProto(GraphProto):
"""A helper class for handling relay subgraph copying from Tensorflow GraphDef."""
def __init__(self, main_graph_proto):
super().__init__()
self._main_graph_proto = main_graph_proto # holds main graph proto object
def from_tensorflow(self, graph, layout="NHWC", shape=None, outputs=None):
"""Wrapper to _get_relay_func which converts Tensorflow graph to Relay function.
Return Relay function and params
"""
func = self._get_relay_func(graph, layout=layout, shape=shape, outputs=outputs)
return func, self._params
def from_tensorflow(graph, layout="NHWC", shape=None, outputs=None, convert_config=None):
"""Load tensorflow graph which is a python tensorflow graph object into relay.
The companion parameters will be handled automatically.
Parameters
----------
graph : GraphDef object
Tensorflow GraphDef
layout : target layout to be used (Optional)
NCHW only supported now to enable NHWC models on GPU.
shape : Dictionary of input dimensions (Optional)
Graph level input shape dictionary.
outputs : List of output tensor names (Optional)
if not specified then the last node is assumed as graph output.
convert_config : Optional[Dict[str, Any]]
Default config:
use_dense : bool = True
Ture to convert `tf.matmul` to `nn.dense`, else to `nn.matmul`.
The `nn.dense` op requires the data tensor to be non-transposed and weight tensor
to be transposed, may insert extra `transpose` to the original graph.
use_nt_batch_matmul : bool = True
True to convert `tf.batch_matmul` to `nn.batch_matmul` strict to NT format
(transpose_a=False, transpose_b=True).
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.nd.NDArray
Dict of converted parameters stored in tvm.nd.NDArray format
"""
global TF_DEFAULT_CONFIGS
if convert_config is not None:
TF_DEFAULT_CONFIGS.update(convert_config)
g = GraphProto()
mod, params = g.from_tensorflow(graph, layout, shape, outputs)
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tensorflow2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except, too-many-nested-blocks
"""Tensorflow2.x graph to relay converter.
If model is constructed using tf2.x API, then use this converter:
from tvm.relay.frontend.tensorflow2 import from_tensorflow
Otherwise use the tf1.x converter:
from tvm.relay.frontend.tensorflow import from_tensorflow
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import function_def_to_graph, tensor_util, dtypes
import tvm
from tvm.relay.transform import InferType
from tvm.relay.prelude import Prelude
from tvm.ir import IRModule
from .. import expr as _expr
from .. import analysis
from .. import function as _function
from ..loops import while_loop as _while_loop
from .common import infer_type as _infer_type
from .tensorflow_ops import _convert_map as _convert_map_common
from .tensorflow_ops import _get_more_static_shape_rank
from .tensorflow2_ops import _convert_map as _convert_map_tf2
from .tensorflow2_ops import _need_prelude_for_shape_inference
from ..ty import Any
__all__ = ["from_tensorflow"]
# A map to record tensor list write ops and input tl/tensor indices
# Value is (index of tensor list, index of written node)
_tensor_list_write_ops = {
"TensorListSetItem": (0, 2),
}
def _infer_type_with_prelude(val, prelude):
body = _infer_type(val, prelude.mod)
return body.checked_type
def set_span(sym, node_name):
"""set span of symbol"""
span = tvm.relay.Span(tvm.relay.SourceName(node_name), 0, 0, 0, 0)
if isinstance(sym, _expr.Call):
sym = _expr.Call(sym.op, sym.args, sym.attrs, sym.type_args, span)
elif isinstance(sym, _expr.TupleWrapper):
tuple_value = sym.tuple_value
if isinstance(tuple_value, _expr.Call):
tuple_value = _expr.Call(
tuple_value.op, tuple_value.args, tuple_value.attrs, tuple_value.type_args, span
)
sym = _expr.TupleWrapper(tuple_value, sym.size)
return sym
def is_tensor_list_constuctor(tf_node):
"""Check whether is tensor list constructor node."""
return tf_node.op == "TensorListReserve"
def convert_const_node(node, shape):
"""convert tf const node into relay const or var"""
# get the value of the constant
tensor_value = node.attr["value"].tensor
np_array = tensor_util.MakeNdarray(tensor_value)
if np_array.dtype == np.dtype(object):
if shape and node.name in shape:
var_shape = shape[node.name]
else:
var_shape = tensor_util.TensorShapeProtoToList(tensor_value.tensor_shape)
param = None
sym = [_expr.var(node.name, shape=var_shape, dtype="uint8")]
return sym, param
if len(np_array.shape) == 0:
param = None
sym = [tvm.relay.const(np_array, np_array.dtype)]
else:
param = tvm.nd.array(np_array)
sym = [_expr.var(node.name, shape=param.shape, dtype=param.dtype)]
return sym, param
def get_attr(buf):
"""convert value of a node attribute. node attribute is part of a node in a graph.
Parameters
----------
buf: attrvalue protobuf. <class 'tensorflow.core.framework.attr_value_pb2.AttrValue'>
Returns
-------
The value of the attr, as a Python object.
Raises:
-------
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
ret = []
if not buf.WhichOneof("value"):
return ret
if buf.HasField("list"):
for f in fields:
if getattr(buf.list, f):
if f == "type":
ret += [dtypes.as_dtype(x) for x in list(getattr(buf.list, f))]
else:
ret += list(getattr(buf.list, f))
else:
for f in fields:
if buf.HasField(f):
if f == "type":
ret = dtypes.as_dtype(getattr(buf, f))
else:
ret = getattr(buf, f)
return ret
def parse_attr(attr_proto):
"""Convert node attributes (a serialized map of key-value pairs) in a node to a dict
Parameters
----------
attr_proto: <class 'google.protobuf.pyext._message.MessageMapContainer'>
Returns
-------
Dict {string: python object}
"""
attrs = {}
for key, value in attr_proto.items():
attrs[key] = get_attr(value)
return attrs
def convert_placeholder(shape, node, in_type=None):
"""convert tf placeholder into relay var.
Example
--------
a tf placeholder with name "x" is converted to [Var(x, ty=TensorType([], float32))]
"""
if shape and node.name in shape:
input_shape = list(shape[node.name])
else:
input_shape = tensor_util.TensorShapeProtoToList(node.attr["shape"].shape)
for idx, dim in enumerate(input_shape):
if dim < 0:
input_shape[idx] = Any()
attr = parse_attr(node.attr)
if in_type is not None:
sym = [_expr.var(node.name, type_annotation=in_type)]
else:
sym = [_expr.var(node.name, shape=input_shape, dtype=attr["dtype"].name)]
return input_shape, sym
class RelayModule:
"""states related to the entire relay module (multiple functions)
after converted from tf graphdef"""
def __init__(self):
self.mod = IRModule({})
self.params = {}
self.prelude = Prelude(self.mod)
class GraphProto:
"""Capturing states when converting a tf graph to a single relay function."""
def __init__(self, module):
self._module = module
self._prelude = self._module.prelude
self._params = {}
self._nodes = {}
self._input_shapes = {}
self._output_shapes = {}
self._tf_node_map = {}
self._gdef_lib = {}
self._tensor_list_shapes = {}
self._tensor_list_shape_nodes = {}
self._sub_map = {}
self._sub_input_idx_map = {}
def from_tensorflow(
self, graph, layout="NHWC", shape=None, outputs=None, input_types=None, gdef_lib=None
):
"""Wrapper to _get_relay_func which converts Tensorflow graph to Relay function
which is used as main function for the Relay module
"""
if input_types is None:
input_types = {}
if gdef_lib is None:
gdef_lib = {}
self._gdef_lib = gdef_lib
func = self._get_relay_func(
graph, layout=layout, shape=shape, outputs=outputs, input_types=input_types
)
return func, self._params
def _analysis_tensor_list_op(
self,
graph,
node,
tl_write_nodes,
tl_stack_nodes,
tl_construct_nodes,
sub_func_name="",
root_node="",
):
if sub_func_name and sub_func_name not in self._sub_input_idx_map:
self._sub_input_idx_map[sub_func_name] = {}
if node.op == "Placeholder":
# record placeholder node in sub functions
self._sub_map[sub_func_name] = node
self._sub_input_idx_map[sub_func_name][node.name] = len(
self._sub_input_idx_map[sub_func_name]
)
if node.op.startswith("TensorList"):
if is_tensor_list_constuctor(node):
tl_construct_nodes.append(node)
else:
for tl_write_name, idx in _tensor_list_write_ops.items():
if node.op.startswith(tl_write_name):
tl_write_nodes.append((node, idx, sub_func_name, root_node))
if node.op.startswith("TensorListStack"):
tl_stack_nodes.append(node)
elif node.op.startswith("StatelessWhile"):
root_node = node.name
cond_fn_name, body_fn_name = [
parse_attr(node.attr).get(x).name for x in ["cond", "body"]
]
for fn_name in [cond_fn_name, body_fn_name]:
subfunction = self._gdef_lib[fn_name]
sub_func_name = fn_name
for sub_node in subfunction.node:
# bypass const node
if sub_node.op == "Const":
continue
self._tf_node_map[sub_node.name] = sub_node
self._analysis_tensor_list_op(
subfunction,
sub_node,
tl_write_nodes,
tl_stack_nodes,
tl_construct_nodes,
sub_func_name=sub_func_name,
root_node=root_node,
)
def _infer_static_shape_stack_node(self, tl_stack_nodes):
for stack_node in tl_stack_nodes:
if len(stack_node.input) < 2:
# Stack node does not have shape
continue
input_shape_name = stack_node.input[1].split(":")[0]
input_shape_node = self._tf_node_map[input_shape_name]
stack = [self._tf_node_map[stack_node.input[0].split(":")[0]]]
in_idx = -1
while stack:
cnode = stack.pop(0)
if not cnode.op.startswith("TensorList"):
if in_idx and cnode.op.startswith("StatelessWhile"):
stack.append(self._tf_node_map[cnode.input[in_idx].split(":")[0]])
else:
for iname in cnode.input:
if self._tf_node_map[iname.split(":")[0]].op.startswith(
"StatelessWhile"
):
# identify input index based on output index
if iname.split(":")[1]:
in_idx = int(iname.split(":")[1])
stack.append(self._tf_node_map[iname.split(":")[0]])
# identify the corresponding constructor node and add shape to _tensor_list_shapes
elif cnode.name != stack_node.name:
if is_tensor_list_constuctor(cnode):
shape_attr = parse_attr(input_shape_node.attr)
if "value" not in shape_attr:
continue
raw_elem_shape = tensor_util.MakeNdarray(shape_attr["value"])
elem_shape = []
for dim in raw_elem_shape:
if dim < 0:
elem_shape.append(Any())
else:
elem_shape.append(int(dim))
self._tensor_list_shapes[cnode.name] = elem_shape
break
def _infer_static_shape_write_node(self, tl_write_nodes):
for item in tl_write_nodes:
wnode = item[0]
ta_idx, inode_idx = item[1]
sub_func_name = item[2]
root_name = item[3]
stack = [self._tf_node_map[wnode.input[ta_idx].split(":")[0]]]
while stack:
cnode = stack.pop(0)
if not cnode.op.startswith("TensorList"):
if cnode.op == "Placeholder" and sub_func_name:
# need to map subfunction
input_idx = self._sub_input_idx_map[sub_func_name][cnode.name]
stack.append(
self._tf_node_map[
self._tf_node_map[root_name].input[input_idx].split(":")[0]
]
)
else:
for iname in cnode.input:
stack.append(self._tf_node_map[iname.split(":")[0]])
# identify the corresponding constructor node and add it to _tensor_list_shape_nodes
elif cnode.name != wnode.name:
if is_tensor_list_constuctor(cnode):
inode = self._tf_node_map[wnode.input[inode_idx].split(":")[0]]
tn = wnode.input[inode_idx].split(":")
output_index = int(tn[1]) if len(tn) > 1 else 0
self._tensor_list_shape_nodes[cnode.name] = (inode, wnode.op, output_index)
break
def _get_relay_func(self, graph, layout="NHWC", shape=None, outputs=None, input_types=None):
if input_types is None:
input_types = {}
tl_write_nodes = []
tl_stack_nodes = []
tl_construct_nodes = []
self._layout = layout
for node in graph.node:
name = node.name
self._tf_node_map[name] = node
if node.op == "Placeholder":
in_type = None
if node.name in input_types:
in_type = input_types[node.name]
self._input_shapes[name], self._nodes[name] = convert_placeholder(
shape, node, in_type
)
elif node.op == "Const":
sym, param = convert_const_node(node, shape)
self._nodes[node.name] = sym
if param:
self._params[node.name] = param
# recursivly iterate tensorlist op if seen while loop
else:
self._analysis_tensor_list_op(
graph, node, tl_write_nodes, tl_stack_nodes, tl_construct_nodes
)
# Use tensor list stack to infer static tensor list shape
self._infer_static_shape_stack_node(tl_stack_nodes)
# Fetch node contains static tensor list shape
self._infer_static_shape_write_node(tl_write_nodes)
for node in graph.node:
self._backtrack_construct(graph, node.name)
return self._func(graph, outputs)
def _func(self, graph, outputs):
out = []
if outputs is None:
last_node = graph.node[-1]
op = self._nodes[last_node.name.split(":")[0]]
if last_node.op == "Exit":
out = [op[0].tuple_value]
else:
out = op
else:
for out_name in outputs:
if ":" in out_name:
out_name = out_name.split(":")
out_name, out_num = out_name[0], out_name[-1]
out_num = int(out_num)
out.append(self._nodes[out_name][out_num])
else:
out.append(self._nodes[out_name][0])
if isinstance(out, _expr.TupleWrapper):
out = out.astuple()
else:
out = out[0] if len(out) == 1 else _expr.Tuple(out)
fvars = analysis.free_vars(out)
func = _function.Function(fvars, out)
final_params = {}
for fv in fvars:
if fv.name_hint in self._params:
final_params[fv.name_hint] = self._params[fv.name_hint]
self._params = final_params
return func
def _convert_operator(self, graph, op_name, node_name, inputs, attrs):
"""Convert from Tensorflow operator to relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
graph: <class 'tensorflow.core.framework.graph_pb2.GraphDef'>
TF2 frozen graph def
op_name : str
Operator name, such as Conv2D, AvgPool
node_name: str
Name of the node in TF2 graph, such as Identity:0
inputs : list of relay.op
List of input symbols.
attrs : dict
Dict of operator attributes
Returns
-------
sym : relay.op
Converted relay operator
"""
if op_name in ["PartitionedCall", "StatefulPartitionedCall"]:
sym = _partition_call_operator(
self._module,
graph,
inputs,
attrs,
self._prelude,
gdef_lib=self._gdef_lib,
)
elif op_name in ["StatelessIf", "If"]:
sym = _convert_if(
self._module, graph, inputs, attrs, self._prelude, gdef_lib=self._gdef_lib
)
elif op_name in ["StatelessWhile", "While"]:
sym = _convert_loop(
self._module,
graph,
inputs,
attrs,
node_name,
self._tf_node_map,
self._prelude,
gdef_lib=self._gdef_lib,
)
elif op_name in _convert_map_common:
# assert op are exclusive
assert not set(_convert_map_common.keys()) & set(_convert_map_tf2.keys())
if _need_prelude_for_shape_inference(op_name):
sym = _convert_map_common[op_name](inputs, attrs, self._params, self._prelude)
else:
sym = _convert_map_common[op_name](inputs, attrs, self._params, self._module.mod)
elif op_name in _convert_map_tf2:
if _need_prelude_for_shape_inference(op_name):
sym = _convert_map_tf2[op_name](inputs, attrs, self._params, self._prelude)
else:
sym = _convert_map_tf2[op_name](inputs, attrs, self._params, self._module.mod)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
sym = set_span(sym, node_name)
return sym
def _parse_element_shape(self, elem_shape, shape_attr):
if "value" in shape_attr:
raw_elem_shape = tensor_util.MakeNdarray(shape_attr["value"])
if raw_elem_shape.size == 1 and raw_elem_shape == -1:
elem_shape.append(Any())
else:
for dim in raw_elem_shape:
if dim < 0:
elem_shape.append(Any())
else:
elem_shape.append(dim)
def _backtrack_construct(self, graph, node_name):
"""Convert a specific tensorflow node to relay expression.
If any of its ancestor node is not converted yet, backtrack as
far as input node and covert all nodes on the path. resurion is used here.
This is required when parsing control flow nodes, since the parsing
order may not follow the original graph def.
to discover input node, current tf node's input is iterated:
tensorflow/core/framework/node_def.proto
message NodeDef {
repeated string input = 3;
}
a node has many inputs (other nodes). each input has the following format:
data input is "node:src_output". node is the string name.
control input is "^node".
Parameters
----------
graph : <class 'tensorflow.core.framework.graph_pb2.GraphDef'>
TF2 frozen graph def
node_name : str
node name
Returns
-------
op : relay.Expr
Converted relay expression.
Examples
--------
tf expression "x+1" is converted to relay expression:
CallNode(Op(add), [Var(x, ty=TensorType([], float32)), Constant(1.0)], (nullptr), [])
"""
input_op_name = node_name.split(":")[0].split("^")[-1]
if input_op_name not in self._nodes:
node = self._tf_node_map[input_op_name]
attr = parse_attr(node.attr)
if "_output_shapes" in attr:
self._output_shapes[node.name] = [
tensor_util.TensorShapeProtoToList(tshape) for tshape in attr["_output_shapes"]
]
else:
self._output_shapes[node.name] = [None]
attr["_output_shapes"] = self._output_shapes[input_op_name]
attr["_node_name"] = node.name
attr["_target_layout"] = self._layout
inputs = [self._backtrack_construct(graph, iname) for iname in node.input]
# infer shape for TensorList op
if is_tensor_list_constuctor(node):
input_shape_name = (
node.input[1] if "TensorListFromTensor" in node.op else node.input[0]
)
input_shape_name = input_shape_name.split(":")[0]
input_shape_node = self._tf_node_map[input_shape_name]
shape_attr = parse_attr(input_shape_node.attr)
elem_shape = []
self._parse_element_shape(elem_shape, shape_attr)
if elem_shape:
attr["shape"] = elem_shape
if (
"identical_element_shapes" in attr and attr["identical_element_shapes"]
) or elem_shape:
shape = elem_shape
if node.name in self._tensor_list_shapes:
preset_shape = self._tensor_list_shapes[node.name]
shape = _get_more_static_shape_rank(shape, preset_shape)
attr["shape"] = shape
op = self._convert_operator(graph, node.op, node.name, inputs, attr)
if isinstance(op, np.ndarray):
self._params[node.name] = tvm.nd.array(op)
op = [
_expr.var(
node.name,
shape=self._params[node.name].shape,
dtype=self._params[node.name].dtype,
)
]
elif isinstance(op, (_expr.Expr, _expr.TupleGetItem)):
op = [op]
self._nodes[input_op_name] = op
out = self._nodes[input_op_name]
if isinstance(out, _expr.TupleWrapper):
tn = node_name.split(":")
tensor_slot = int(tn[1]) if len(tn) > 1 else 0
return out[tensor_slot]
return out[0]
def _partition_call_operator(module, graph, inputs, attr, prelude, gdef_lib):
"""convert tf PartitionedCall node to a relay function call"""
node_func_name = attr.get("f").name
return _convert_function(
module, graph, inputs, attr, node_func_name, prelude, gdef_lib=gdef_lib
)
def _convert_if(module, graph, inputs, attr, prelude, gdef_lib):
"""Convert tf If/StatelessIf to Relay If"""
cond_expr = inputs[0]
branch_names = [attr.get(x).name for x in ["then_branch", "else_branch"]]
then_fn, else_fn = [
_convert_function(module, graph, inputs[1:], attr, name, prelude, gdef_lib=gdef_lib)
for name in branch_names
]
out = _expr.If(cond_expr, then_fn, else_fn)
return out
def _convert_loop(module, graph, inputs, attr, node_name, nodes, prelude, gdef_lib):
"""convert tf while_loop to Relay loop"""
input_size = len(inputs)
cond_fn_name, body_fn_name = [attr.get(x).name for x in ["cond", "body"]]
def convert_vars(loop_inputs, input_signature):
"""convert inputs to relay vars to be used as loop variables
Loop inputs are packed as:
[iteration_number, max_iterations, loop_variables...]
"""
new_vars = []
for i, v in enumerate(loop_inputs):
if isinstance(v, _expr.Constant):
vtype = _infer_type(v).checked_type.dtype
new_vars.append(_expr.var(input_signature[i].name, shape=(), dtype=vtype))
else:
vtype = _infer_type_with_prelude(v, prelude)
new_vars.append(_expr.var(input_signature[i].name, type_annotation=vtype))
return new_vars
while_func = next(
(f for f in graph.library.function if f.signature.name == attr["body"].name),
None,
)
loop_inputs = convert_vars(inputs, while_func.signature.input_arg)
def cond_fn(*loop_inputs):
return _convert_function(
module, graph, loop_inputs, attr, cond_fn_name, prelude, gdef_lib=gdef_lib
)
# Define the loop body, in this function we need to unpack loop inputs,
# convert the loop subgraph, and pack outputs for the next iteration.
def body_fn(*loop_inputs):
# Increment loop iteration counter
loop_count = loop_inputs[0] + _expr.const(1, dtype="int32")
max_count = loop_inputs[1]
fn = _convert_function(
module, graph, loop_inputs, attr, body_fn_name, prelude, gdef_lib=gdef_lib
)
# Repack loop variables
out = [loop_count, max_count] + [_expr.TupleGetItem(fn, i) for i in range(2, input_size)]
return out
loop = _while_loop(cond_fn, loop_inputs, body_fn)
outputs = loop(*inputs)
outputs = _expr.TupleWrapper(
_expr.Tuple([_expr.TupleGetItem(outputs, i) for i in range(input_size)]), input_size
)
return outputs
def _convert_function(
module, graph, inputs, attr, node_func_name, prelude, gdef_lib, in_shapes=None
):
"""Convert given tf node to a relay function call
Parameters
----------
module : IRModule
where converted function is stored
graph: <class 'tensorflow.core.framework.graph_pb2.GraphDef'>
top level tf graphdef
inputs : List[tvm.relay.Expr]
List of input symbols. Parameters for the function.
attrs : Dict[tvm.Attrs]
Dict of operator attributes.
node_func_name : str
Name of tf2 node to be converted
Returns
-------
op : tvm.relay.Expr
<class 'tvm.relay.expr.Call'>
Examples
--------
a tf function "x+1", is implemented as a subgraph in the library section of the graph.
this subgraph is converted to a relay function such as
fn (%x: float32) {
add(%x, 1f) /* Identity */
}
the subgraph has a function name such as __inference_add_95
the tf function call operator is returned as relay expression, such as:
free_var %x: float32;
@func___inference_add_95(%x)
"""
func = next(
(f for f in graph.library.function if f.signature.name == node_func_name),
None,
)
if func is None:
raise Exception("Function not found - {}".format(node_func_name))
devices = set(node.device for node in func.node_def)
if len(devices) > 1:
raise Exception(
"node_def in function {} contains > 1 types of devices {}".format(
node_func_name, devices
)
)
subgraph = gdef_lib[node_func_name]
# preserve library functions in subgraphs to make them available to nested functions
for fn in graph.library.function:
subgraph.library.function.add().CopyFrom(fn)
# Computing subgraph's input shape and type dictionaries
input_expr_dict = {}
input_types = {}
for f_arg, input_ in zip(func.signature.input_arg, inputs):
input_expr_dict[f_arg.name] = input_
input_types[f_arg.name] = _infer_type_with_prelude(input_, prelude)
func_name = "func_{}".format(func.signature.name)
try:
global_func = module.mod[func_name]
sub_func = global_func
sub_params = module.params
except ValueError:
# Construct relay nodes from the subgraph
g1 = GraphProto(module)
output_sig = [func.ret[f.name] for f in func.signature.output_arg]
# TODO: unify prelude and main IRModules
sub_func, sub_params = g1.from_tensorflow(
subgraph, outputs=output_sig, input_types=input_types, gdef_lib=gdef_lib
)
module.params.update(sub_params)
func_expr = _function.Function(sub_func.params, sub_func.body)
global_func = tvm.relay.GlobalVar(func_name)
module.mod[global_func] = func_expr
module.mod = InferType()(module.mod)
prelude.mod = module.mod
param_exprs = []
for param_expr in sub_func.params:
# sub_params is subset of sub_func.params
param_name = param_expr.vid.name_hint
if param_name in input_expr_dict.keys():
param_exprs.append(input_expr_dict[param_name])
elif param_name in sub_params.keys():
param_exprs.append(param_expr)
else:
raise Exception("Input parameter {} not found".format(param_name))
sb = tvm.relay.scope_builder.ScopeBuilder()
loop_ret = global_func(*param_exprs)
sb.ret(loop_ret)
ret = sb.get()
return ret
def from_tensorflow(graph_def, layout="NHWC", shape=None, outputs=None):
"""convert tensorflow2.x graph into relay function.
Parameters
----------
graph_def : must be frozen graph (no variables allowed).
Placeholders are assumed to be inputs to the graph.
tensorflow/core/framework/graph.proto
message GraphDef {
repeated NodeDef node = 1;
FunctionDefLibrary library = 2;
}
tensorflow/core/framework/function.proto
message FunctionDef {
repeated NodeDef node_def = 3;
}
layout : str
The layout for the model.
shape : List[str, List[int]]
Input to the model. It is a key and shape vector mapping. Applies to placeholders.
outputs : List[str]
The list of output nodes. The last node is treated as the output if not
specified.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.nd.NDArray
Dict of converted parameters stored in tvm.nd.NDArray format.
Examples
--------
"x+1" tf module where x has a shape of (2,2) is converted as follows:
mod : tvm.IRModule
def @func___inference_add_95(%x: Tensor[(2, 2), float32], %add/y: Tensor[(2, 2), float32])
-> Tensor[(2, 2), float32] {
add(%x, %add/y) /* Identity */ /* ty=Tensor[(2, 2), float32] */
}
def @main(%x1: Tensor[(2, 2), float32], %add/y1: Tensor[(2, 2), float32]) {
@func___inference_add_95(%x1, %add/y1) /* Identity */
}
params : dict of str to tvm.nd.NDArray
{'add/y': <tvm.nd.NDArray shape=(2, 2), cpu(0)>
"""
with tf.Graph().as_default():
tf.import_graph_def(graph_def, name="")
# Subgraph graph_defs are cached here to avoid a TF error when parsing after prelude init
graph_def_library = {}
for func in graph_def.library.function:
inshape = func.attr["_input_shapes"].list.shape
(
graph_def_library[func.signature.name],
_,
) = function_def_to_graph.function_def_to_graph_def(func, inshape)
module = RelayModule()
g = GraphProto(module)
func, params = g.from_tensorflow(
graph_def, layout, shape, outputs, gdef_lib=graph_def_library
)
module.mod["main"] = func
module.params.update(params)
return module.mod, module.params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tensorflow2_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
"""Tensorflow2.x to relay converter ops and helper"""
import tvm
from tvm.relay.prelude import StaticTensorArrayOps, get_tensor_array_shape
from .. import op as _op
from ..ty import Any
from .common import infer_value as _infer_value
from .common import infer_type as _infer_type
from .tensorflow_ops import _get_more_static_shape_rank
def _infer_type_with_prelude(val, prelude):
body = _infer_type(val, prelude.mod)
return body.checked_type
def _need_prelude_for_shape_inference(op):
return "TensorList" in op or "TensorArray" in op
def _tensorlist_reserve():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("element_dtype").name
elem_shape = _infer_value(inputs[0], params, prelude.mod)
elem_shape = tuple(elem_shape.numpy().astype("int32").flatten())
if elem_shape or "shape" in attr:
shape = attr["shape"] if "shape" in attr else elem_shape
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, shape)
static_tensor_array_ops.register()
tensor_array_constructor = static_tensor_array_ops.get_global_var("tensor_array")
tensor_array = tensor_array_constructor(inputs[1])
else:
tensor_array_constructor = prelude.get_global_var("tensor_array", dtype_str)
tensor_array = tensor_array_constructor(inputs[1])
return tensor_array
return _impl
def _tensorlist_set_item():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("element_dtype").name
input_ta = inputs[0]
input_ta_shape = get_tensor_array_shape(input_ta, dtype_str, prelude)
input_t_shape = _infer_type_with_prelude(inputs[2], prelude).shape
input_rank = len(input_t_shape)
if input_ta_shape is None:
tensor_name = "tensor{}".format(input_rank)
tensor_func = prelude.get_tensor_ctor(tensor_name, dtype_str)
v = tensor_func(inputs[2])
write_func = prelude.get_global_var("tensor_array_write", dtype_str)
out = write_func(input_ta, inputs[1], v)
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape)
static_tensor_array_ops.register()
tensor_func = static_tensor_array_ops.get_ctor("tensor_constructor")
v = tensor_func(inputs[2])
# Write tensor with more static shape
# convert shape with -1 to any()
input_ta_shape_a = []
for dim in input_ta_shape:
if isinstance(dim, (int, tvm.tir.expr.IntImm)):
if dim < 0:
input_ta_shape_a.append(Any())
else:
input_ta_shape_a.append(dim)
else:
input_ta_shape_a.append(dim)
actual_shape = _get_more_static_shape_rank(input_t_shape, input_ta_shape_a)
if actual_shape != input_ta_shape_a:
new_shape = []
num_any_dim = 0
for dim in actual_shape:
if not isinstance(dim, int):
num_any_dim += 1
new_shape.append(dim if isinstance(dim, int) else -1)
if num_any_dim <= 1:
v = tensor_func(_op.reshape(inputs[2], new_shape))
write_func = prelude.get_global_var_static(
"tensor_array_write", dtype_str, input_ta_shape_a
)
out = write_func(input_ta, inputs[1], v)
return out
return _impl
def _tensorlist_get_item():
def _impl(inputs, attr, params, prelude):
dtype_str = attr["element_dtype"].name
input_shape = get_tensor_array_shape(inputs[0], dtype_str, prelude)
if input_shape is None:
read_func = prelude.get_global_var("tensor_array_read", dtype_str)
out = read_func(inputs[0], _op.take(inputs[1], tvm.relay.const(0)))
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_shape)
static_tensor_array_ops.register()
read_func = static_tensor_array_ops.get_global_var("tensor_array_read")
out_tensor = read_func(inputs[0], _op.take(inputs[1], tvm.relay.const(0)))
get_data_func = static_tensor_array_ops.get_global_var("tensor_get_data")
out = get_data_func(out_tensor)
return out
return _impl
def _tensorlist_stack():
def _impl(inputs, attr, params, prelude):
dtype_str = attr["element_dtype"].name
input_ta_shape = get_tensor_array_shape(inputs[0], dtype_str, prelude)
if input_ta_shape is None:
stack_func = prelude.get_global_var("tensor_array_stack", dtype_str)
out = stack_func(inputs[0])
else:
if "num_elements" in attr:
num_elements = attr["num_elements"]
static_tensor_array_ops = StaticTensorArrayOps(
prelude, dtype_str, input_ta_shape, num_elements
)
static_tensor_array_ops.register()
stack_func = prelude.get_global_var_static(
"tensor_array_stack", dtype_str, input_ta_shape, num_elements
)
out_tensor = stack_func(inputs[0])
out_shape = (
(num_elements,) + input_ta_shape
if num_elements and num_elements == 1
else (Any(),) + input_ta_shape
)
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, out_shape)
static_tensor_array_ops.register()
get_data_func = prelude.get_global_var_static("tensor_get_data", dtype_str, out_shape)
out = get_data_func(out_tensor)
return out
return _impl
def _tensorlist_from_tensor():
def _impl(inputs, attr, params, prelude):
dtype_str = attr["element_dtype"].name
input_ta_shape = _infer_type_with_prelude(inputs[0], prelude).shape
if input_ta_shape is None:
unstack_func = prelude.get_global_var("tensor_array_unstack", dtype_str)
out = unstack_func(inputs[0])
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape)
static_tensor_array_ops.register()
unstack_func = prelude.get_global_var_static(
"tensor_array_unstack", dtype_str, input_ta_shape
)
out = unstack_func(inputs[0])
return out
return _impl
_convert_map = {
"TensorListFromTensor": _tensorlist_from_tensor(),
"TensorListGetItem": _tensorlist_get_item(),
"TensorListReserve": _tensorlist_reserve(),
"TensorListSetItem": _tensorlist_set_item(),
"TensorListStack": _tensorlist_stack(),
}
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tensorflow_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
# pylint: disable=import-outside-toplevel, redefined-builtin
"""TF: Tensorflow frontend."""
import warnings
from collections import deque
# Numpy support
import numpy as np
import tvm
from tvm.relay.prelude import StaticTensorArrayOps, get_tensor_array_shape
from tvm.topi.utils import get_const_tuple
from .. import expr as _expr
from .. import op as _op
from ..ty import Any
from .common import AttrCvt, get_relay_op
from .common import infer_type as _infer_type
from .common import infer_shape as _infer_shape
from .common import infer_channels as _infer_channels
from .common import infer_value as _infer_value
def check_symbolic_shape(shape):
return not all([isinstance(dim, (int, tvm.tir.IntImm)) for dim in shape])
def list_shape_of(tensor, ndim):
shape_tensor = _op.shape_of(tensor)
return [
_op.strided_slice(shape_tensor, begin=[i], end=[i + 1], strides=[1]) for i in range(ndim)
]
def _get_pad_pair(input1d, kernel1d, stride1d):
if isinstance(input1d, tvm.tir.Any) and stride1d != 1:
raise tvm.error.OpAttributeUnImplemented(
"SAME padding is not supported in combination with dynamic height or width when stride"
" is not 1."
)
if stride1d == 1 or input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def _math_name_picker(surfix):
def _impl(attr):
return "broadcast_" + surfix
return _impl
def _dimension_picker(prefix, surfix=""):
def _impl(attr):
kernel = attr["kernel_shape"]
if len(kernel) == 2:
return prefix + "2d" + surfix
if len(kernel) == 3:
return prefix + "3d" + surfix
raise tvm.error.OpAttributeInvalid(
"Only 2D or 3D kernels are supported for operator {}".format(prefix + "2d or 3d")
)
return _impl
def _dimension_constraint():
def _dim_check(attrs):
if len(attrs["kernel_shape"]) in (2, 3):
return True
return False
return _dim_check, "Only 2d or 3d kernel supported."
def _get_param(params, input_node):
if isinstance(input_node, _expr.Constant):
return np.atleast_1d(input_node.data.numpy())
return params[input_node.name_hint].numpy()
def _get_num_param(params, input_node):
return _get_param(params, input_node).item()
def _get_list_param(params, input_node, mod):
try:
return _get_param(params, input_node).tolist()
except (IndexError, KeyError, AttributeError):
return _infer_value(input_node, params, mod).numpy().tolist()
def _get_tuple_param(params, input_node):
return tuple(_get_param(params, input_node))
def _need_prelude_for_shape_inference(op):
return "TensorArray" in op
def _get_more_static_shape(shape0, shape1):
"""Compare two shapes with the same rank,
and return the one with fewer symbolic dimension.
"""
assert len(shape0) == len(shape1)
num_sym_dim0 = 0
num_sym_dim1 = 0
for dim0, dim1 in zip(list(shape0), list(shape1)):
if not isinstance(dim0, int):
num_sym_dim0 += 1
if not isinstance(dim1, int):
num_sym_dim1 += 1
if num_sym_dim0 < num_sym_dim1:
return shape0
return shape1
def _get_more_static_shape_rank(shape0, shape1):
"""Compare two shapes with different rank,
and return the one with fewer symbolic dimension.
"""
num_sym_dim0 = sum([not isinstance(dim, (int, tvm.tir.expr.IntImm)) for dim in list(shape0)])
num_sym_dim1 = sum([not isinstance(dim, (int, tvm.tir.expr.IntImm)) for dim in list(shape1)])
if num_sym_dim0 < num_sym_dim1:
return shape0
return shape1
def _rsqrt():
def _impl(inputs, attr, params, mod):
inputs.append(tvm.relay.const(-0.5, attr["T"].name))
return AttrCvt(op_name="power")(inputs, attr)
return _impl
def _argx(func, func_name):
"""A common wrapper for argmin and argmax operations"""
def _impl(inputs, attr, params, mod):
try:
# In Tensorflow, `axis` argument is a Tensor, not attribute. We
# support the case where it inputs from a scalar constant.
axis_input_value = [_get_num_param(params, inputs[1])]
except (IndexError, KeyError):
raise TypeError(
"Unsupported argument for `{}` : `axis` should be a constant".format(func_name)
)
out = func(inputs[0], axis=axis_input_value, keepdims=False)
dtype = attr["output_type"].name
if dtype != "int32":
out = _op.cast(out, dtype=dtype)
return out
return _impl
def _elemwise(name):
def _impl(inputs, attr, params, mod):
assert len(inputs) == 2, "{} take 2 inputs, {} given".format(name, len(inputs))
return get_relay_op(name)(*inputs)
return _impl
def _pool3d(name):
def _impl(inputs, attr, params, mod):
attr["data_format"] = attr["data_format"].decode("utf-8")
flip_layout = False
input_shape = _infer_shape(inputs[0], mod)
if attr["data_format"] == "NDHWC":
attr["kernel_shape"] = (attr["ksize"][1], attr["ksize"][2], attr["ksize"][3])
attr["strides"] = (attr["strides"][1], attr["strides"][2], attr["strides"][3])
elif attr["data_format"] == "NCDHW":
attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3], attr["ksize"][4])
attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4])
else:
msg = 'Value {} of attribute "data_format" of operator Pooling ' "is not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"]))
if attr["data_format"] == "NDHWC":
input_shape = [_infer_shape(inputs[0], mod)[i] for i in (0, 4, 1, 2, 3)]
inputs[0] = _op.transpose(inputs[0], axes=(0, 4, 1, 2, 3))
attr["data_format"] = "NCDHW"
flip_layout = True
attr["padding"] = attr["padding"].decode("utf-8")
if attr["padding"] == "VALID":
attr["padding"] = [0, 0, 0, 0, 0, 0]
elif attr["padding"] == "SAME":
stride_d, stride_h, stride_w = attr["strides"]
kernel_d, kernel_h, kernel_w = attr["kernel_shape"]
if attr["data_format"] == "NDHWC":
in_d = input_shape[1]
in_h = input_shape[2]
in_w = input_shape[3]
else:
in_d = input_shape[2]
in_h = input_shape[3]
in_w = input_shape[4]
pad_d = _get_pad_pair(in_d, kernel_d, stride_d)
pad_v = _get_pad_pair(in_h, kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, kernel_w, stride_w)
attr["padding"] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]]
else:
msg = 'Value {} in attribute "padding" of operator Pooling is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
if name == "avg_pool":
attr["count_include_pad"] = False
attr["ceil_mode"] = False
out = AttrCvt(
op_name=name,
transforms={"kernel_shape": "pool_size", "data_format": "layout"},
ignores=["ksize"],
)(inputs, attr)
if flip_layout:
out = _op.transpose(out, axes=(0, 2, 3, 4, 1))
return out
return _impl
def _pooling(name):
def _impl(inputs, attr, params, mod):
attr["data_format"] = attr["data_format"].decode("utf-8")
flip_layout = False
input_shape = _infer_shape(inputs[0], mod)
if attr["data_format"] == "NHWC":
attr["kernel_shape"] = (attr["ksize"][1], attr["ksize"][2])
attr["strides"] = (attr["strides"][1], attr["strides"][2])
elif attr["data_format"] == "NCHW":
attr["kernel_shape"] = (attr["ksize"][2], attr["ksize"][3])
attr["strides"] = (attr["strides"][2], attr["strides"][3])
else:
msg = 'Value {} of attribute "data_format" of operator Pooling ' "is not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"]))
if attr["_target_layout"] == "NCHW" and attr["data_format"] == "NHWC":
tmp_shape = _infer_shape(inputs[0], mod)
input_shape = [tmp_shape[ii] for ii in (0, 3, 1, 2)]
inputs[0] = _op.transpose(inputs[0], axes=(0, 3, 1, 2))
attr["data_format"] = "NCHW"
flip_layout = True
# Fix padding
attr["padding"] = attr["padding"].decode("utf-8")
if attr["padding"] == "VALID":
attr["padding"] = [0, 0]
elif attr["padding"] == "SAME":
stride_h, stride_w = attr["strides"]
kernel_h, kernel_w = attr["kernel_shape"]
if attr["data_format"] == "NHWC":
in_h = input_shape[1]
in_w = input_shape[2]
else:
in_h = input_shape[2]
in_w = input_shape[3]
pad_v = _get_pad_pair(in_h, kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, kernel_w, stride_w)
attr["padding"] = [pad_v[0], pad_h[0], pad_v[1], pad_h[1]]
elif attr["padding"] == "EXPLICIT":
paddings = attr["explicit_paddings"]
assert len(paddings) == 8
if flip_layout or attr["data_format"] == "NHWC":
attr["padding"] = [paddings[2], paddings[4], paddings[3], paddings[5]]
else:
attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]]
else:
msg = 'Value {} in attribute "padding" of operator Pooling is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
if name == "avg_pool":
attr["count_include_pad"] = False
out = AttrCvt(
op_name=_dimension_picker(name),
transforms={"kernel_shape": "pool_size", "data_format": "layout"},
ignores=["ksize", "explicit_paddings"],
extras={"ceil_mode": False},
custom_check=_dimension_constraint(),
)(inputs, attr)
if flip_layout:
out = _op.transpose(out, axes=(0, 2, 3, 1))
return out
return _impl
def _conv(opname):
def _impl(inputs, attr, params, mod):
attr["data_format"] = attr["data_format"].decode("utf-8")
flip_layout = False
if opname == "conv_transpose" and attr["data_format"] == "NHWC":
# transform to NCHW for TVM backend compatible and set 'flip_layout'
# to have output flip back to NHWC
inputs[2] = _op.transpose(inputs[2], axes=(0, 3, 1, 2))
attr["strides"][1], attr["strides"][2], attr["strides"][3] = (
attr["strides"][3],
attr["strides"][1],
attr["strides"][2],
)
attr["data_format"] = "NCHW"
# Check whether output shapes attribute is set and not None
if (
opname == "conv_transpose"
and len(attr["_output_shapes"]) > 0
and attr["_output_shapes"][0]
):
tmp_shape = attr["_output_shapes"][0]
tmp_shape = [tmp_shape[ii] for ii in (0, 3, 1, 2)]
attr["_output_shapes"][0] = tmp_shape
flip_layout = True
inputs_data = inputs[0] if opname != "conv_transpose" else inputs[2]
# NCHW Layout require weights transpose
weights_shape = _infer_shape(inputs[1], mod)
if attr["data_format"] == "NCHW":
tmp_shape = weights_shape
if opname in ["conv", "conv_transpose"]:
tmp_shape = [tmp_shape[ii] for ii in (3, 2, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(3, 2, 0, 1))
else:
tmp_shape = [tmp_shape[ii] for ii in (2, 3, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(2, 3, 0, 1))
weights_shape = tmp_shape
input_shape = _infer_shape(inputs_data, mod)
if attr["_target_layout"] == "NCHW" and attr["data_format"] == "NHWC":
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
inputs_data = _op.transpose(inputs_data, axes=(0, 3, 1, 2))
if opname in ["conv", "conv_transpose"]:
weights_shape = [weights_shape[ii] for ii in (3, 2, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(3, 2, 0, 1))
else:
weights_shape = [weights_shape[ii] for ii in (2, 3, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(2, 3, 0, 1))
attr["data_format"] = "NCHW"
attr["strides"] = [attr["strides"][ii] for ii in (0, 3, 1, 2)]
flip_layout = True
if attr["data_format"] == "NHWC":
in_channels = input_shape[3]
kernel_h, kernel_w, _, depth_mult = weights_shape
attr["kernel_shape"] = (weights_shape[0], weights_shape[1])
if opname == "conv":
attr["channels"] = weights_shape[3]
elif opname == "conv_transpose":
attr["channels"] = weights_shape[2]
else:
attr["channels"] = input_shape[3] * depth_mult
if "dilations" in attr:
attr["dilations"] = (attr["dilations"][1], attr["dilations"][2])
attr["strides"] = (attr["strides"][1], attr["strides"][2])
elif attr["data_format"] == "NCHW":
in_channels = input_shape[1]
_, depth_mult, kernel_h, kernel_w = weights_shape
attr["kernel_shape"] = (weights_shape[2], weights_shape[3])
if opname == "conv":
attr["channels"] = weights_shape[0]
elif opname == "conv_transpose":
attr["channels"] = weights_shape[1]
else:
attr["channels"] = input_shape[1] * depth_mult
if attr["channels"] < 0:
attr["channels"] *= -1
if "dilations" in attr:
attr["dilations"] = (attr["dilations"][2], attr["dilations"][3])
attr["strides"] = (attr["strides"][2], attr["strides"][3])
else:
msg = 'Value {} in attribute "data_format" of operator Conv is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"]))
if opname == "depthwise":
attr["groups"] = in_channels
# Fix padding
attr["padding"] = attr["padding"].decode("utf-8")
if attr["padding"] == "VALID":
attr["padding"] = [0, 0]
elif attr["padding"] == "SAME":
stride_h, stride_w = attr["strides"]
kernel_h, kernel_w = attr["kernel_shape"]
pdata_shape = input_shape
# Check whether output shapes attribute is set and not None
if (
opname == "conv_transpose"
and len(attr["_output_shapes"]) > 0
and attr["_output_shapes"][0]
):
pdata_shape = attr["_output_shapes"][0]
if attr["data_format"] == "NHWC":
in_h = pdata_shape[1]
in_w = pdata_shape[2]
else:
in_h = pdata_shape[2]
in_w = pdata_shape[3]
dilation_h = attr["dilations"][0]
dilation_w = attr["dilations"][1]
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
attr["padding"] = [pad_v[0], pad_h[0], pad_v[1], pad_h[1]]
elif attr["padding"] == "EXPLICIT":
paddings = attr["explicit_paddings"]
assert len(paddings) == 8
if flip_layout or attr["data_format"] == "NHWC":
attr["padding"] = [paddings[2], paddings[4], paddings[3], paddings[5]]
else:
attr["padding"] = [paddings[4], paddings[6], paddings[5], paddings[7]]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not ' "valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
if "kernel_layout" not in attr:
if opname == "conv":
attr["kernel_layout"] = "HWIO" if attr["data_format"] == "NHWC" else "OIHW"
elif opname == "conv_transpose":
# conv_transpose in TVM has weights be IOHW for NCHW
attr["kernel_layout"] = "HWIO" if attr["data_format"] == "NHWC" else "IOHW"
else:
attr["kernel_layout"] = "HWOI" if attr["data_format"] == "NHWC" else "OIHW"
# Ignore the new attributes from TF2.0, for now.
out = AttrCvt(
op_name=_dimension_picker(
"conv", surfix="_transpose" if opname == "conv_transpose" else ""
),
ignores=["explicit_paddings"],
transforms={
"kernel_shape": "kernel_size",
"data_format": "data_layout",
"dilations": ("dilation", (0, 0)),
"group": ("groups", 1),
},
custom_check=_dimension_constraint(),
)([inputs_data, inputs[1]], attr)
if flip_layout:
out = _op.transpose(out, axes=(0, 2, 3, 1))
return out
return _impl
# Dilation2d
def _dilation2d():
def _impl(inputs, attr, params, mod):
if "data_format" not in attr:
attr["data_format"] = "NHWC"
input_shape = _infer_shape(inputs[0], mod)
weights_shape = _infer_shape(inputs[1], mod)
if attr["_target_layout"] == "NCHW" and attr["data_format"] == "NHWC":
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
inputs[0] = _op.transpose(inputs[0], axes=(0, 3, 1, 2))
weights_shape = [weights_shape[ii] for ii in (2, 0, 1)]
inputs[1] = _op.transpose(inputs[1], axes=(2, 0, 1))
attr["data_format"] = "NCHW"
if attr["data_format"] in ["NHWC", "NCHW"]:
if "rates" in attr:
attr["dilations"] = attr["rates"]
if "dilations" in attr:
attr["dilations"] = (attr["dilations"][1], attr["dilations"][2])
attr["strides"] = (attr["strides"][1], attr["strides"][2])
else:
msg = 'Value {} in attribute "data_format" of operator Dilation2D is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"]))
attr["padding"] = attr["padding"].decode("utf-8")
if attr["padding"] == "VALID":
attr["padding"] = [0, 0]
elif attr["padding"] == "SAME":
stride_h, stride_w = attr["strides"]
if attr["data_format"] == "NHWC":
kernel_h, kernel_w = weights_shape[0], weights_shape[1]
else:
kernel_h, kernel_w = weights_shape[1], weights_shape[2]
if attr["data_format"] == "NHWC":
in_h = input_shape[1]
in_w = input_shape[2]
else:
in_h = input_shape[2]
in_w = input_shape[3]
dilation_h = attr["dilations"][0]
dilation_w = attr["dilations"][1]
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
if attr["data_format"] == "NHWC":
inputs[0] = _op.nn.pad(
data=inputs[0],
pad_width=((0, 0), (pad_v[0], pad_v[1]), (pad_h[0], pad_h[1]), (0, 0)),
)
else:
inputs[0] = _op.nn.pad(
data=inputs[0],
pad_width=((0, 0), (0, 0), (pad_v[0], pad_v[1]), (pad_h[0], pad_h[1])),
)
attr["padding"] = [0, 0]
else:
msg = 'Value {} in attribute "padding" of operator Dilation2d is not ' "valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
attr["kernel_layout"] = "HWI" if attr["data_format"] == "NHWC" else "IHW"
out = AttrCvt(
op_name="dilation2d",
ignores=["explicit_paddings", "rates"],
transforms={
"data_format": "data_layout",
},
)([inputs[0], inputs[1]], attr)
if attr["_target_layout"] == "NCHW":
out = _op.transpose(out, axes=(0, 2, 3, 1))
return out
return _impl
def _conv3d(opname):
def _impl(inputs, attr, params, mod):
attr["data_format"] = attr["data_format"].decode("utf-8")
flip_layout = False
inputs_data = inputs[0] if opname != "conv_transpose" else inputs[2]
# NCDHW Layout require weights transpose
weights_shape = _infer_shape(inputs[1], mod)
if attr["data_format"] == "NCDHW":
tmp_shape = weights_shape
tmp_shape = [tmp_shape[ii] for ii in (4, 3, 0, 1, 2)]
inputs[1] = _op.transpose(inputs[1], axes=(4, 3, 0, 1, 2))
weights_shape = tmp_shape
input_shape = _infer_shape(inputs_data, mod)
if attr["_target_layout"] == "NCDHW" and attr["data_format"] == "NDHWC":
input_shape = [input_shape[ii] for ii in (0, 4, 1, 2, 3)]
inputs_data = _op.transpose(inputs_data, axes=(0, 4, 1, 2, 3))
weights_shape = [weights_shape[ii] for ii in (4, 3, 0, 1, 2)]
inputs[1] = _op.transpose(inputs[1], axes=(4, 3, 0, 1, 2))
attr["data_format"] = "NCDHW"
attr["strides"] = [attr["strides"][ii] for ii in (0, 4, 1, 2, 3)]
flip_layout = True
if attr["data_format"] == "NDHWC":
kernel_d, kernel_h, kernel_w, _, _ = weights_shape
attr["kernel_shape"] = (kernel_d, kernel_h, kernel_w)
if opname == "conv":
attr["channels"] = weights_shape[4]
elif opname == "conv_transpose":
attr["channels"] = weights_shape[3]
if "dilations" in attr:
attr["dilations"] = (
attr["dilations"][1],
attr["dilations"][2],
attr["dilations"][3],
)
attr["strides"] = (attr["strides"][1], attr["strides"][2], attr["strides"][3])
elif attr["data_format"] == "NCDHW":
_, _, kernel_d, kernel_h, kernel_w = weights_shape
attr["kernel_shape"] = (kernel_d, kernel_h, kernel_w)
if opname == "conv":
attr["channels"] = weights_shape[0]
elif opname == "conv_transpose":
attr["channels"] = weights_shape[1]
if "dilations" in attr:
attr["dilations"] = (
attr["dilations"][2],
attr["dilations"][3],
attr["dilations"][4],
)
attr["strides"] = (attr["strides"][2], attr["strides"][3], attr["strides"][4])
else:
msg = 'Value {} in attribute "data_format" of operator Conv is ' "not valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["data_format"]))
# Fix padding
attr["padding"] = attr["padding"].decode("utf-8")
if attr["padding"] == "VALID":
attr["padding"] = [0, 0, 0]
elif attr["padding"] == "SAME":
stride_d, stride_h, stride_w = attr["strides"]
kernel_d, kernel_h, kernel_w = attr["kernel_shape"]
pdata_shape = input_shape
if opname == "conv_transpose" and len(attr["_output_shapes"]) > 0:
pdata_shape = attr["_output_shapes"][0]
if attr["data_format"] == "NDHWC":
in_d = pdata_shape[1]
in_h = pdata_shape[2]
in_w = pdata_shape[3]
else:
in_d = pdata_shape[2]
in_h = pdata_shape[3]
in_w = pdata_shape[4]
dilation_d = attr["dilations"][0]
dilation_h = attr["dilations"][1]
dilation_w = attr["dilations"][2]
dilated_kernel_d = (kernel_d - 1) * dilation_d + 1
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_d = _get_pad_pair(in_d, dilated_kernel_d, stride_d)
pad_v = _get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_h = _get_pad_pair(in_w, dilated_kernel_w, stride_w)
attr["padding"] = [pad_d[0], pad_v[0], pad_h[0], pad_d[1], pad_v[1], pad_h[1]]
elif attr["padding"] == "EXPLICIT":
paddings = attr["explicit_paddings"]
assert len(paddings) == 10
if flip_layout or attr["data_format"] == "NDHWC":
attr["padding"] = [
paddings[2],
paddings[4],
paddings[6],
paddings[3],
paddings[5],
paddings[7],
]
else:
attr["padding"] = [
paddings[4],
paddings[6],
paddings[8],
paddings[5],
paddings[7],
paddings[9],
]
else:
msg = 'Value {} in attribute "padding" of operator Conv is not ' "valid."
raise tvm.error.OpAttributeInvalid(msg.format(attr["padding"]))
if "kernel_layout" not in attr:
attr["kernel_layout"] = "DHWIO" if attr["data_format"] == "NDHWC" else "OIDHW"
use_bias = len(inputs) == (3 if opname != "conv_transpose" else 4)
channel_axis = 1 if attr["data_format"] == "NCDHW" else 4
# Ignore the new attributes from TF2.0, for now.
out = AttrCvt(
op_name=_dimension_picker(
"conv", surfix="_transpose" if opname == "conv_transpose" else ""
),
ignores=["explicit_paddings", "Tshape"],
transforms={
"kernel_shape": "kernel_size",
"data_format": "data_layout",
"dilations": ("dilation", (0, 0)),
"group": ("groups", 1),
},
custom_check=_dimension_constraint(),
)([inputs_data, inputs[1]], attr)
if use_bias:
out = _op.nn.bias_add(
out, inputs[2] if opname != "conv_transpose" else inputs[3], axis=channel_axis
)
if flip_layout:
out = _op.transpose(out, axes=(0, 2, 3, 4, 1))
return out
return _impl
def _nms(return_scores=False):
def _impl(inputs, attr, params, mod):
# Get parameter values
try:
max_output_size = int(np.atleast_1d(inputs[2].data.numpy().astype("int64"))[0])
except Exception:
try:
max_output_size = (
_infer_value(inputs[2], params, mod).numpy().astype("int64").tolist()[0]
)
except Exception:
max_output_size = inputs[2]
iou_threshold = np.atleast_1d(inputs[3].data.numpy())[0]
# score_threshold was introduced from V3
score_threshold = np.atleast_1d(inputs[4].data.numpy())[0] if len(inputs) > 4 else 0.0
pad_output = "pad_to_max_output_size"
# Generate data with shape (1, num_anchors, 5)
scores = AttrCvt(
op_name="expand_dims",
ignores=["T_threshold", pad_output],
extras={"axis": -1, "num_newaxis": 1},
)([inputs[1]], attr)
data = get_relay_op("concatenate")([scores, inputs[0]], -1)
data = get_relay_op("expand_dims")(data, 0, 1)
# reason why using get_valid_counts is for inference performance
ct, data, indices = get_relay_op("get_valid_counts")(
data, score_threshold=score_threshold, id_index=-1, score_index=0
)
# TensorFlow NMS doesn't have parameter top_k
top_k = -1
# TF doesn't have class id for nms input
score_index = 0
nms_ret = get_relay_op("non_max_suppression")(
data=data,
valid_count=ct,
indices=indices,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=top_k,
coord_start=1,
score_index=score_index,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
if pad_output in attr and attr[pad_output]:
return nms_ret
# squeeze it, TF NMS is not batched
size = get_relay_op("squeeze")(nms_ret[1], axis=[1])
data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0])
# slice to get the dynamic result
ret = get_relay_op("strided_slice")(
data_slice, begin=_expr.const([0]), end=size, slice_mode="size"
)
# NonMaxSuppressionV5 returns scores. pad_output is always False for NMSv5.
if return_scores:
if "soft_nms_sigma" in attr and attr["soft_nms_sigma"] != 0.0:
raise tvm.error.OpAttributeUnImplemented(
"soft_nms_sigma for NonMaxSuppressionV5 is not supported"
)
ret_scores = _op.take(inputs[1], ret, axis=0)
return _expr.TupleWrapper(_expr.Tuple([ret, ret_scores, size]), 3)
return ret
return _impl
def convert_combined_nms_with_all_class_nms(
batch_size,
max_output_boxes_per_batch,
num_class,
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
max_total_size,
clip_boxes,
):
"""Converts TF combined_nms using Relay all_class_max_suppression op"""
(selected_indices, selected_scores, num_detections,) = _op.vision.all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format="tensorflow",
)
box_range = _op.arange(
_op.const(0, dtype="int64"), _op.const(max_total_size, dtype="int64"), dtype="int64"
)
assert isinstance(batch_size, int), "dynamic batch size not supported yet."
tile_batch_reps = _op.const([batch_size, 1])
box_range_2d = _op.tile(box_range, tile_batch_reps)
valid_mask = _op.cast(
_op.less(box_range_2d, _op.expand_dims(num_detections, axis=1)), "float32"
)
def select_topk(do_zero_pad):
def true_branch():
arange = _op.arange(
_op.const(0, dtype="int64"),
_op.const(max_output_boxes_per_batch, dtype="int64"),
dtype="int64",
)
pad = _op.full(
_op.const(0, dtype="int64"), (max_total_size - max_output_boxes_per_batch,)
)
topk_indices = _op.tile(_op.concatenate([arange, pad], 0), tile_batch_reps)
nmsed_scores = _op.gather(selected_scores, 1, topk_indices)
nmsed_scores = nmsed_scores * valid_mask
return nmsed_scores, topk_indices
def false_branch():
if isinstance(max_output_boxes_per_class, int):
# Do topk on smaller input if possible
slice_mx = _op.const([max_output_boxes_per_class * num_class], dtype="int64")
selected_scores_slice = _op.strided_slice(
selected_scores, begin=_op.const([0], dtype="int64"), end=slice_mx, axes=[1]
)
else:
selected_scores_slice = selected_scores
return _op.topk(selected_scores_slice, k=max_total_size, axis=1, ret_type="both")
# TODO(masahi): support dynamic num_boxes
# return _expr.If(do_zero_pad, true_branch(), false_branch())
return true_branch() if do_zero_pad else false_branch()
assert isinstance(max_output_boxes_per_batch, int), "dynamic number of boxes not supported yet."
nmsed_scores, topk_indices = select_topk(max_output_boxes_per_batch < max_total_size)
indices = _op.take(selected_indices, topk_indices, axis=1, batch_dims=1)
nmsed_box_indices = _op.take(indices, _op.const(1), axis=2)
nmsed_classes = _op.take(indices, _op.const(0), axis=2)
nmsed_classes = _op.cast(nmsed_classes, "float32")
nmsed_boxes = _op.take(boxes, nmsed_box_indices, axis=1, batch_dims=1)
num_detections = _op.minimum(num_detections, _op.const(max_total_size, dtype="int64"))
if clip_boxes:
nmsed_boxes = _op.maximum(nmsed_boxes, _expr.const(0, dtype="float32"))
nmsed_boxes = _op.minimum(nmsed_boxes, _expr.const(1, dtype="float32"))
nmsed_boxes = nmsed_boxes * _op.expand_dims(valid_mask, axis=2)
return _expr.TupleWrapper(
_expr.Tuple([nmsed_boxes, nmsed_scores, nmsed_classes, num_detections]), 4
)
def _combined_nms():
def _impl(inputs, attr, params, mod):
# Get parameter values
boxes = inputs[0]
scores = inputs[1]
try:
max_output_size = int(np.atleast_1d(inputs[2].data.numpy().astype("int64"))[0])
except Exception:
try:
max_output_size = (
_infer_value(inputs[2], params, mod).numpy().astype("int64").tolist()[0]
)
except Exception:
max_output_size = inputs[2]
max_total_size = inputs[3]
iou_threshold = np.atleast_1d(inputs[4].data.numpy())[0]
score_threshold = np.atleast_1d(inputs[5].data.numpy())[0]
if attr["pad_per_class"]:
raise tvm.error.OpAttributeUnImplemented(
"pad_per_class for CombinedNonMaxSuppression is not supported"
)
boxes_shape = _infer_shape(inputs[0], mod)
scores_shape = _infer_shape(inputs[1], mod)
batch_size = boxes_shape[0]
num_anchors = boxes_shape[1]
q = boxes_shape[2]
num_classes = scores_shape[2]
assert isinstance(batch_size, int) and isinstance(
num_anchors, int
), "Dynamic inputs not supported yet"
if q == 1:
boxes = _op.squeeze(boxes, axis=[2])
scores_trans = _op.transpose(scores, [0, 2, 1])
max_output_boxes_per_batch = num_anchors * num_classes
return convert_combined_nms_with_all_class_nms(
batch_size,
max_output_boxes_per_batch,
num_classes,
boxes,
scores_trans,
max_output_size,
iou_threshold,
score_threshold,
max_total_size.data.numpy().item(),
attr["clip_boxes"],
)
boxes = _op.reshape(boxes, newshape=[batch_size, num_anchors * num_classes, 4])
scores = _op.reshape(scores, newshape=[batch_size, num_anchors * num_classes, 1])
# In TF, class is specified by memory layout only.
ids = _op.arange(_op.const(num_classes, dtype="float32"))
ids = _op.broadcast_to(ids, (batch_size, num_anchors, num_classes))
ids = _op.reshape(ids, newshape=[batch_size, num_anchors * num_classes, 1])
data = _op.concatenate([ids, scores, boxes], -1)
ct, data, indices = _op.vision.get_valid_counts(
data, score_threshold=score_threshold, id_index=0, score_index=1
)
nms_ret = _op.vision.non_max_suppression(
data=data,
valid_count=ct,
indices=indices,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=False,
invalid_to_bottom=True,
)
# Dynamic slice to max_total_size
neg_one = _expr.const([-1])
slice_end = _op.concatenate(
[neg_one, _op.expand_dims(max_total_size, axis=0), neg_one], axis=0
)
nms_ret = _op.strided_slice(
nms_ret, begin=[0, 0, 0], end=slice_end, strides=[1, 1, 1], slice_mode="size"
)
# Slice output into boxes, scores, classes
nmsed_boxes = _op.strided_slice(
nms_ret, begin=[0, 0, 2], end=[-1, -1, 4], slice_mode="size"
)
if attr["clip_boxes"]:
nmsed_boxes = _op.maximum(nmsed_boxes, _expr.const(0, dtype="float32"))
nmsed_boxes = _op.minimum(nmsed_boxes, _expr.const(1, dtype="float32"))
nmsed_scores = _op.strided_slice(
nms_ret, begin=[0, 0, 1], end=[-1, -1, 1], slice_mode="size"
)
nmsed_scores = _op.squeeze(nmsed_scores, axis=[2])
nmsed_classes = _op.strided_slice(
nms_ret, begin=[0, 0, 0], end=[-1, -1, 1], slice_mode="size"
)
nmsed_classes = _op.squeeze(nmsed_classes, axis=[2])
# Get number of valid boxes
nms_count = _op.sum(
_op.cast(_op.greater(nmsed_scores, _expr.const(0, dtype="float32")), "int32"), axis=1
)
# TVM uses -1 for invalid outputs while TF uses 0
box_range = _op.arange(_expr.const(0, dtype="int32"), max_total_size, dtype="int32")
shape = _op.strided_slice(_op.shape_of(nmsed_boxes), begin=[0], end=[2])
box_range = _op.broadcast_to(box_range, shape)
valid_mask = _op.cast(_op.less(box_range, _op.expand_dims(nms_count, axis=1)), "float32")
nmsed_boxes = nmsed_boxes * _op.expand_dims(valid_mask, axis=2)
# Could instead use mask for scores, classes if negative values are possible.
nmsed_scores = _op.maximum(nmsed_scores, _expr.const(0, dtype="float32"))
nmsed_classes = _op.maximum(nmsed_classes, _expr.const(0, dtype="float32"))
return _expr.TupleWrapper(
_expr.Tuple([nmsed_boxes, nmsed_scores, nmsed_classes, nms_count]), 4
)
return _impl
def _decode_image():
def _impl(inputs, attr, params, mod):
# Image decode wrapper: Expecting user to feed decoded input to next layer drop this layer.
warnings.warn("DecodeJpeg: It's a pass through, please handle preprocessing before input")
return inputs[0]
return _impl
def _unravel_index():
def _impl(inputs, attr, params, mod):
return _op.unravel_index(inputs[0], inputs[1])
return _impl
def _crop_and_resize():
def _impl(inputs, attr, params, mod):
# input image is a 4-D tensor of shape [batch, image_height, image_width, depth]
# boxes is a 2-D tensor of shape [num_boxes, 4], 4 is for [y1, x1, y2, x2]
crop_size = _get_list_param(params, inputs[3], mod)
method = attr["method"].decode()
method = "nearest_neighbor" if method == "nearest" else method
if method not in ["bilinear", "nearest_neighbor"]:
raise tvm.error.OpAttributeUnImplemented("Method {} is not supported".format(method))
layout = attr["layout"] if "layout" in attr else "NHWC"
extrapolation_value = attr["extrapolation_value"]
return get_relay_op("crop_and_resize")(
inputs[0], inputs[1], inputs[2], crop_size, layout, method, extrapolation_value
)
return _impl
def _cast():
def _impl(inputs, attr, params, mod):
return inputs[0].astype(attr["DstT"].name)
return _impl
def _expand_dims():
def _impl(inputs, attr, params, mod):
dim_input = inputs.pop(1)
axis = _get_num_param(params, dim_input)
return AttrCvt(
op_name="expand_dims",
ignores=["Tdim", "N"],
extras={"axis": int(axis), "num_newaxis": 1},
)(inputs, attr)
return _impl
def _expm1():
# op description: https://www.tensorflow.org/api_docs/python/tf/math/expm1
def _impl(inputs, attr, params, mod):
exp_out = get_relay_op("exp")(inputs[0])
return exp_out - tvm.relay.const(1.0)
return _impl
def _resize(method):
def _impl(inputs, attr, params, mod):
if attr["_output_shapes"][0] is not None:
size = attr["_output_shapes"][0][1:3]
# Important that the size is defined. If an axis is not, we need to infer what
# the shape should be.
if -1 in size:
size = _infer_value(inputs[1], params, mod).numpy().reshape([-1]).tolist()
else:
size = _infer_value(inputs[1], params, mod).numpy().reshape([-1]).tolist()
attr["size"] = size
inputs.pop(1)
# NHWC
attr["layout"] = "NHWC"
if attr.pop("align_corners") is True:
attr["coordinate_transformation_mode"] = "align_corners"
else:
attr["coordinate_transformation_mode"] = "asymmetric"
# Ignore the new attributes from TF2.0, for now.
return AttrCvt(
op_name="resize2d",
ignores=["Tdim", "half_pixel_centers"],
extras={"method": method, "roi": None},
)(inputs, attr)
return _impl
def _check_numerics():
def _impl(inputs, attr, params, mod):
# Making a copy node assuming no need to verify
return AttrCvt(op_name="copy", ignores=["message"])(inputs, attr)
return _impl
def _assert():
# ToDo: In general people want asserts to be gone from TensorFlow graphs
# when they are optimizing them, so converting it to a no-op is
# reasonable. However, it would be nice to have the option to keep them
# once Relay gets a Halt or Assert op.
return _no_op()
def _no_op():
def _impl(inputs, attr, params, mod):
# ToDo: This should really be an op that returns nothing, which could
# be represented as an empty tuple. It turns out that TVM
# infrastructure doesn't like running functions that return None and
# also don't like running functions that return an empty tuple. So it
# doesn't work, but it should be made to work and then this could be
# improved. In the mean time, it is hard to imagine a case where it
# matters in any real way that a no-op is converted to a constant 0.
return tvm.relay.const(0)
return _impl
def _matmul():
def _impl(inputs, attr, params, mod):
from .tensorflow import TF_DEFAULT_CONFIGS
channels = _infer_channels(inputs[1], not attr["transpose_b"])
if TF_DEFAULT_CONFIGS["use_dense"]:
if attr["transpose_a"]:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not attr["transpose_b"]:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
return AttrCvt(
op_name="dense",
extras={"units": channels},
ignores=["transpose_a", "transpose_b", "T"],
)(inputs, attr)
return AttrCvt(
op_name="matmul",
extras={"units": channels},
ignores=["T"],
)(inputs, attr)
return _impl
def _batch_matmul():
def _impl(inputs, attr, params, mod):
from .tensorflow import TF_DEFAULT_CONFIGS
input_x = inputs[0]
input_y = inputs[1]
orig_shape_x = _infer_shape(input_x, mod)
orig_shape_y = _infer_shape(input_y, mod)
ndim = len(orig_shape_x)
ndim_y = len(orig_shape_y)
is_static = not check_symbolic_shape(orig_shape_x)
# reshape n-dimensional batch matmul into 3d
if ndim > 3:
outer_dims = [orig_shape_x[i] for i in range(0, len(orig_shape_x) - 2)]
if is_static:
num_outer_elts = np.prod(outer_dims)
new_shape_x = (num_outer_elts, orig_shape_x[-2], orig_shape_x[-1])
if ndim_y > 2:
new_shape_y = (num_outer_elts, orig_shape_y[-2], orig_shape_y[-1])
elif ndim_y == 2:
new_shape_y = (1, orig_shape_y[-2], orig_shape_y[-1])
else: # handle dynamic shape (dyn.reshape op)
shape_of_x = list_shape_of(inputs[0], ndim)
shape_of_y = list_shape_of(inputs[1], ndim)
new_shape_x = [_op.const(1), shape_of_x[-2], shape_of_x[-1]]
new_shape_y = [_op.const(1), shape_of_y[-2], shape_of_y[-1]]
for i in range(ndim - 2):
new_shape_x[0] *= shape_of_x[i]
new_shape_y[0] *= shape_of_y[i]
new_shape_x = _op.concatenate(_op.Tuple(new_shape_x), axis=0)
new_shape_y = _op.concatenate(_op.Tuple(new_shape_y), axis=0)
input_x = _op.reshape(input_x, newshape=new_shape_x)
input_y = _op.reshape(input_y, newshape=new_shape_y)
elif ndim_y == 2:
input_y = _op.reshape(input_y, (1, orig_shape_y[-2], orig_shape_y[-1]))
adj_x = attr["adj_x"]
adj_y = attr["adj_y"]
if TF_DEFAULT_CONFIGS["use_nt_batch_matmul"]:
# Strictly convert all batch_matmul to NT format
input_x = _op.transpose(input_x, axes=[0, 2, 1]) if adj_x else input_x
input_y = _op.transpose(input_y, axes=[0, 2, 1]) if not adj_y else input_y
ret = get_relay_op("batch_matmul")(input_x, input_y)
else:
ret = get_relay_op("batch_matmul")(
input_x, input_y, transpose_a=adj_x, transpose_b=adj_y
)
# reshape result back to n-dimensional
if ndim > 3:
if is_static:
final_shape = list(orig_shape_x)
final_shape[-2] = orig_shape_x[-1] if adj_x else orig_shape_x[-2]
final_shape[-1] = orig_shape_y[-2] if adj_y else orig_shape_y[-1]
else:
# calculate the resulting shape = [shape[:-2], 0, 0]
final_shape = list(shape_of_x)
final_shape[-2] = shape_of_x[-1] if adj_x else shape_of_x[-2]
final_shape[-1] = shape_of_y[-2] if adj_y else shape_of_y[-1]
final_shape = _op.concatenate(_op.Tuple(final_shape), axis=0)
ret = _op.reshape(ret, newshape=final_shape)
return ret
return _impl
def _sparse_tensor_dense_matmul():
def _impl(inputs, attr, params, mod):
# Loading this by default causes TVM to not be loadable from other languages.
# Sparse utility from scipy
from scipy.sparse import csr_matrix
assert len(inputs) == 4, "There should be 4 input tensors"
indices_tensor = _infer_value(inputs[0], params, mod).numpy()
values_tensor = _infer_value(inputs[1], params, mod).numpy()
dense_shape_tensor = _infer_value(inputs[2], params, mod).numpy()
data = inputs[3]
rows = [x[0] for x in indices_tensor]
cols = [x[1] for x in indices_tensor]
# Create scipy sparse Tensor(CSR)
weight_sp = csr_matrix(
(values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist())
)
# As per tensorflow implementation, we have 4 possible input combination
# and the first input(A) is always sparse and second input(B) is always dense.
# Case 1: A , B , adjoint_a=False, adjoint_b=False --> A * B
# Case 2: A , B , adjoint_a=True, adjoint_b=False --> A.T * B
# Case 3: A , B , adjoint_a=False, adjoint_b=True --> A * B.T
# Case 4: A , B , adjoint_a=True, adjoint_b=True --> A.T * B.T
#
# Topi implementation for sparse_dense(matmul) has 2 possible input
# combination where first input(A) is always dense
# and second input(B) is always sparse.
# Case 1: A , B, sparse_lhs = False --> A * B.T
# Case 2: A , B, sparse_lhs = True --> B * A.T
#
# The mapping would be as below:
# TF Case 1: A , B , adjoint_a=False, adjoint_b=False
# --> In TF: A * B --> In Topi: A * B.T.T
# --> sparse_dense(transpose(B), A, sparse_lhs=True)
#
# TF Case 2: A , B , adjoint_a=True, adjoint_b=False
# --> In TF: A.T * B --> In Topi: A.T * B.T.T
# --> sparse_dense(transpose(B), transpose(A), sparse_lhs=True)
#
# TF Case 3: A , B , adjoint_a=False, adjoint_b=True
# --> In TF: A * B.T --> In Topi: A * B
# --> sparse_dense(B, A, sparse_lhs=True)
#
# TF Case 4: A , B , adjoint_a=True, adjoint_b=True
# --> In TF: A.T * B.T --> In Topi: (B * A.T).T
# --> transpose(sparse_dense(B, transpose(A), sparse_lhs=False))
# By default, in tensorflow the first input ,i.e., data is sparse
sparse_lhs = True
# TF Case 1:
if not attr.get("adjoint_a") and not attr.get("adjoint_b"):
data = _op.transpose(data)
# TF Case 2:
elif attr.get("adjoint_a") and not attr.get("adjoint_b"):
data = _op.transpose(data)
weight_sp = csr_matrix(weight_sp.transpose())
# TF Case 3:
elif not attr.get("adjoint_a") and attr.get("adjoint_b"):
pass
# TF Case 4:
# attr.get("adjoint_a") and attr.get("adjoint_b"):
else:
sparse_lhs = False
weight_sp = csr_matrix(weight_sp.transpose())
weight_data = _expr.const(weight_sp.data, weight_sp.data.dtype)
weight_indptrs = _expr.const(weight_sp.indptr, weight_sp.indptr.dtype)
weight_indices = _expr.const(weight_sp.indices, weight_sp.indices.dtype)
ret = _op.nn.sparse_dense(data, [weight_data, weight_indices, weight_indptrs], sparse_lhs)
if not sparse_lhs:
# TF Case 4
ret = _op.transpose(ret)
return ret
return _impl
def _sparse_fill_empty_rows():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 4, "There should be 4 input tensors"
sparse_indices = inputs[0]
sparse_values = inputs[1]
sparse_indices_num_cols = _infer_shape(sparse_indices, mod)[1]
first_column = _op.split(sparse_indices, sparse_indices_num_cols, axis=1)[0]
sorted_indices = _op.argsort(_op.squeeze(first_column))
sorted_sparse_indices = _op.take(sparse_indices, sorted_indices, axis=0)
sorted_sparse_values = _op.take(sparse_values, sorted_indices, axis=0)
new_sparse_indices, new_sparse_values, empty_row_indicator = _op.sparse_fill_empty_rows(
sorted_sparse_indices, sorted_sparse_values, inputs[2], inputs[3]
)
return _expr.TupleWrapper(
_expr.Tuple([new_sparse_indices, new_sparse_values, empty_row_indicator]),
3,
)
return _impl
def _sparse_reshape():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 3, "There should be 3 input tensors"
new_indices, new_shape = get_relay_op("sparse_reshape")(inputs[0], inputs[1], inputs[2])
return _expr.TupleWrapper(_expr.Tuple([new_indices, new_shape]), 2)
return _impl
def _math_segment_sum():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 2, "There should be 2 input tensors"
return get_relay_op("segment_sum")(inputs[0], inputs[1])
return _impl
def _sparse_segment_sum():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 3, "There should be 3 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
return _op.segment_sum(data, inputs[2])
return _impl
def _sparse_segment_sum_with_num_segments():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 4, "There should be 4 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
num_segments = int(inputs[3].data.numpy().item())
return _op.segment_sum(data, inputs[2], num_segments)
return _impl
def row_wise_divide(multi_dim_tensor, one_dim_vector):
"""
This function enables row-wise division of multi_dim_tensor and one_dim_vector.
To achieve this, it is first tiled to the appropriate shape and then elemwise_division
"""
multi_dim_tensor_offrow_shape = _op.strided_slice(
_op.shape_of(multi_dim_tensor, "int32"), [1], [-1], slice_mode="size"
)
one_dim_vector_tiled_shape = _op.concatenate(
[_op.reverse(multi_dim_tensor_offrow_shape, 0), _expr.const([1])], axis=0
)
one_dim_vector_tiled = _op.transpose(_op.tile(one_dim_vector, one_dim_vector_tiled_shape))
return _op.divide(multi_dim_tensor, one_dim_vector_tiled)
def count_all_indices(segment_ids, counts_dtype, num_segments=None):
"""
This snippet calculates the sqrt count of each index among all valid indices
Valid indices are from 0 to max of [segment ids, num_segments]
"""
max_segments = _op.reshape(_op.max(segment_ids), -1) + _expr.const([1])
if num_segments:
max_segments = _op.maximum(max_segments, _expr.const([num_segments]))
max_ones = _op.maximum(max_segments, _op.shape_of(segment_ids))
counts = _op.segment_sum(
_op.ones(max_ones, counts_dtype), segment_ids, num_segments=num_segments
)
real_counts = _op.clip(counts, 1, 2147483647) # Clip max doesn't work over int32
return real_counts
def _sparse_segment_sum_sqrtn():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 3, "There should be 3 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
real_counts = count_all_indices(inputs[2], attr["T"].name)
real_sqrt_counts = _op.sqrt(_op.cast_like(real_counts, data))
# Calculate regular segment sum
segment_sum = _op.segment_sum(data, inputs[2])
return row_wise_divide(segment_sum, real_sqrt_counts)
return _impl
def _sparse_segment_sum_sqrtn_with_num_segments():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 4, "There should be 4 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
num_segments = int(inputs[3].data.numpy().item())
real_counts = count_all_indices(inputs[2], attr["T"].name, num_segments=num_segments)
real_sqrt_counts = _op.sqrt(_op.cast_like(real_counts, data))
# Calculate regular segment sum
segment_sum = _op.segment_sum(data, inputs[2], num_segments=num_segments)
return row_wise_divide(segment_sum, real_sqrt_counts)
return _impl
def _sparse_segment_mean():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 3, "There should be 3 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
real_counts = count_all_indices(inputs[2], attr["T"].name)
# Calculate regular segment sum
segment_sum = _op.segment_sum(data, inputs[2])
return row_wise_divide(segment_sum, real_counts)
return _impl
def _sparse_segment_mean_with_num_segments():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 4, "There should be 4 input tensors"
data = _op.take(inputs[0], inputs[1], axis=0)
num_segments = int(inputs[3].data.numpy().item())
real_counts = count_all_indices(inputs[2], attr["T"].name, num_segments=num_segments)
# Calculate regular segment sum
segment_sum = _op.segment_sum(data, inputs[2], num_segments=num_segments)
return row_wise_divide(segment_sum, real_counts)
return _impl
def _sparse_tensor_dense_add():
# Sparse utility from scipy
from scipy.sparse import csr_matrix
def _impl(inputs, attr, params, mod):
assert (
len(inputs) == 4
), "There should be 4 input tensors [sparse_indices, sparse_values, sparse_shape, dense]."
indices_tensor = _infer_value(inputs[0], params, mod).numpy()
values_tensor = _infer_value(inputs[1], params, mod).numpy()
dense_shape_tensor = _infer_value(inputs[2], params, mod).numpy()
data = inputs[3]
rows = [x[0] for x in indices_tensor]
cols = [x[1] for x in indices_tensor]
# Create scipy sparse Tensor(CSR)
weight_sp = csr_matrix(
(values_tensor, (rows, cols)), shape=tuple(dense_shape_tensor.tolist())
)
weight_data = _expr.const(weight_sp.data, weight_sp.data.dtype)
weight_indptrs = _expr.const(weight_sp.indptr, weight_sp.indptr.dtype)
weight_indices = _expr.const(weight_sp.indices, weight_sp.indices.dtype)
ret = _op.nn.sparse_add(data, [weight_data, weight_indices, weight_indptrs])
return ret
return _impl
def _identity():
def _impl(inputs, attr, params, mod):
return inputs[0]
return _impl
def _identityn():
def _impl(inputs, attr, params, mod):
return inputs
return _impl
def _concatV2():
def _impl(inputs, attr, params, mod):
pop_node = inputs.pop(len(inputs) - 1)
try:
axis = int(_get_num_param(params, pop_node))
except (IndexError, KeyError, AttributeError):
try:
axis = int(_infer_value(pop_node, params, mod).numpy())
except Exception:
axis = int(pop_node)
return AttrCvt(op_name="concatenate", ignores=["T", "N", "Tidx"], extras={"axis": axis})(
[inputs], attr
)
return _impl
def _concat():
def _impl(inputs, attr, params, mod):
pop_node = inputs.pop(0)
axis = int(_get_num_param(params, pop_node))
return AttrCvt(op_name="concatenate", ignores=["N"], extras={"axis": axis})([inputs], attr)
return _impl
def _pack():
def _impl(inputs, attr, params, mod):
axis = int(attr["axis"])
inputs_reshaped = [_op.expand_dims(i, axis=axis, num_newaxis=1) for i in inputs]
return _op.concatenate(inputs_reshaped, axis)
return _impl
def _tensor_array():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("dtype").name
assert not attr["dynamic_size"], "Dynamic size tensor array is " "not supported in TVM yet."
if "shape" in attr:
shape = attr["shape"]
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, shape)
static_tensor_array_ops.register()
tensor_array_constructor = static_tensor_array_ops.get_global_var("tensor_array")
tensor_array = tensor_array_constructor(inputs[0])
else:
tensor_array_constructor = prelude.get_global_var("tensor_array", dtype_str)
tensor_array = tensor_array_constructor(inputs[0])
return tensor_array
return _impl
def _tensor_array_scatter():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("T").name
input_ta = inputs[0]
input_shape = get_tensor_array_shape(input_ta, dtype_str, prelude)
values_shape = _infer_shape(inputs[2], prelude.mod)
input_t_shape = values_shape[1:]
indices_shape = _infer_shape(inputs[1], prelude.mod)
if input_shape is None:
values_rank = len(values_shape)
unstack_name = "tensor_array_unstack_tensor{}".format(values_rank)
unstack_function = prelude.get_global_var(unstack_name, dtype_str)
values = unstack_function(inputs[2])
tensor_array_scatter_func = prelude.get_global_var("tensor_array_scatter", dtype_str)
else:
input_t_shape = _get_more_static_shape(input_t_shape, input_shape)
values_shape = (values_shape[0],) + input_t_shape
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_t_shape)
static_tensor_array_ops.register()
# Register static indices shape
if isinstance(indices_shape[0], int):
static_tensor_array_ops.define_tensor_array_scatter(indices_shape, True)
tensor_array_scatter_func = prelude.get_global_var_static(
"tensor_array_scatter", dtype_str, input_t_shape
)
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, values_shape)
static_tensor_array_ops.register()
unstack_function = prelude.get_global_var_static(
"tensor_array_unstack", dtype_str, values_shape
)
values = unstack_function(inputs[2])
ret = tensor_array_scatter_func(input_ta, inputs[1], values)
return ret
return _impl
def _tensor_array_gather():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("dtype").name
input_shape = get_tensor_array_shape(inputs[2], dtype_str, prelude)
indices_shape = _infer_shape(inputs[1], prelude.mod)
if input_shape is None:
gather_func = prelude.get_var("tensor_array_gather", dtype_str)
out = gather_func(inputs[2], inputs[1])
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_shape)
static_tensor_array_ops.register()
if not isinstance(indices_shape[0], int):
gather_function = prelude.get_global_var_static(
"tensor_array_gather", dtype_str, input_shape
)
out_tensor_t = gather_function(inputs[2], inputs[1])
out_shape = (indices_shape[0],) + input_shape
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, out_shape)
static_tensor_array_ops.register()
# Output shape is (indices_shape[0],) + input_shape
get_data_func = prelude.get_global_var_static(
"tensor_get_data", dtype_str, out_shape
)
out = get_data_func(out_tensor_t)
else:
# For fixed length indices, directly generate static shape output
read_func = prelude.get_global_var_static(
"tensor_array_read", dtype_str, input_shape
)
get_data_func = prelude.get_global_var_static(
"tensor_get_data", dtype_str, input_shape
)
tensor_list = []
for i in range(indices_shape[0]):
index = _op.take(inputs[1], tvm.relay.const(i))
out_tensor = get_data_func(read_func(inputs[2], index))
tensor_list.append(_op.expand_dims(out_tensor, axis=0))
if indices_shape[0] > 1:
out = _op.concatenate(tensor_list, axis=0)
else:
out = tensor_list[0]
return out
return _impl
def _tensor_array_size():
def _impl(inputs, attr, params, prelude):
return prelude.length(inputs[0])
return _impl
def _tensor_array_write():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("T").name
input_ta = inputs[3]
input_ta_shape = get_tensor_array_shape(input_ta, dtype_str, prelude)
input_t_shape = _infer_shape(inputs[2], prelude.mod)
input_rank = len(input_t_shape)
if input_ta_shape is None:
tensor_name = "tensor{}".format(input_rank)
tensor_func = prelude.get_tensor_ctor(tensor_name, dtype_str)
v = tensor_func(inputs[2])
write_func = prelude.get_global_var("tensor_array_write", dtype_str)
else:
input_ta_rank = len(input_ta_shape)
assert input_ta_rank == input_rank, "Shape rank mismatch: {} vs {}".format(
input_ta_rank, input_rank
)
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape)
static_tensor_array_ops.register()
tensor_func = static_tensor_array_ops.get_ctor("tensor_constructor")
v = tensor_func(inputs[2])
# Write tensor with more static shape
actual_shape = _get_more_static_shape(input_t_shape, input_ta_shape)
if actual_shape != input_t_shape:
new_shape = []
num_any_dim = 0
for dim in actual_shape:
if not isinstance(dim, int):
num_any_dim += 1
new_shape.append(dim if isinstance(dim, int) else -1)
if num_any_dim <= 1:
v = tensor_func(_op.reshape(inputs[2], new_shape))
write_func = prelude.get_global_var_static(
"tensor_array_write", dtype_str, input_ta_shape
)
return write_func(input_ta, _op.take(inputs[1], tvm.relay.const(0)), v)
return _impl
def _tensor_array_read():
def _impl(inputs, attr, params, prelude):
dtype_str = attr["dtype"].name
input_shape = get_tensor_array_shape(inputs[2], dtype_str, prelude)
if input_shape is None:
read_func = prelude.get_global_var("tensor_array_read", dtype_str)
out = read_func(inputs[2], _op.take(inputs[1], tvm.relay.const(0)))
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_shape)
static_tensor_array_ops.register()
read_func = static_tensor_array_ops.get_global_var("tensor_array_read")
out_tensor = read_func(inputs[2], _op.take(inputs[1], tvm.relay.const(0)))
get_data_func = static_tensor_array_ops.get_global_var("tensor_get_data")
out = get_data_func(out_tensor)
return out
return _impl
def _tensor_array_split():
def _impl(inputs, attr, params, prelude):
dtype_str = attr.get("T").name
input_ta = inputs[0]
input_ta_shape = get_tensor_array_shape(input_ta, dtype_str, prelude)
lengths = _op.cast(inputs[2], "int32")
lengths_shape = _infer_shape(lengths, prelude.mod)
value_shape = _infer_shape(inputs[1], prelude.mod)
input_rank = len(value_shape)
if input_ta_shape is None:
tensor_name = "tensor{}".format(input_rank)
tensor_ctor = prelude.get_tensor_ctor(tensor_name, dtype_str)
v = tensor_ctor(inputs[1])
split_func = prelude.get_global_var("tensor_array_split", dtype_str)
else:
input_ta_rank = len(input_ta_shape)
assert input_ta_rank == input_rank, "Shape rank mismatch: {} vs {}".format(
input_ta_rank, input_rank
)
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape)
static_tensor_array_ops.register()
# Check static value/indices shape
if isinstance(value_shape[0], int) or isinstance(lengths_shape[0], int):
static_tensor_array_ops.define_tensor_array_split(value_shape, lengths_shape, True)
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, value_shape)
static_tensor_array_ops.register()
tensor_ctor = static_tensor_array_ops.get_ctor("tensor_constructor")
v = tensor_ctor(inputs[1])
split_func = prelude.get_global_var_static(
"tensor_array_split", dtype_str, input_ta_shape
)
return split_func(input_ta, v, lengths)
return _impl
def _tensor_array_concat():
def _impl(inputs, attr, params, prelude):
dtype_str = attr["dtype"].name
input_shape = get_tensor_array_shape(inputs[1], dtype_str, prelude)
if input_shape is None:
concat_func = prelude.get_global_var("tensor_array_concat", dtype_str)
out = concat_func(inputs[1])
else:
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_shape)
static_tensor_array_ops.register()
concat_func = prelude.get_global_var_static(
"tensor_array_concat", dtype_str, input_shape
)
out_tensor = concat_func(inputs[1])
out_shape = (Any(),) + input_shape[1:]
static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, out_shape)
static_tensor_array_ops.register()
get_data_func = prelude.get_global_var_static("tensor_get_data", dtype_str, out_shape)
out = get_data_func(out_tensor)
return out
return _impl
def _tile():
def _impl(inputs, attr, params, mod):
reps_input = inputs.pop()
if isinstance(reps_input, _expr.Call):
np_reps = _infer_value(reps_input, params, mod).numpy()
reps = [np_reps.flatten()[i] for i in range(np_reps.flatten().shape[0])]
else:
reps = _get_list_param(params, reps_input, mod)
new_input = [inputs.pop(0)]
return AttrCvt(op_name="tile", extras={"reps": tuple(reps)}, ignores=["Tmultiples"])(
new_input, attr
)
return _impl
def _slice():
def _impl(inputs, attr, params, mod):
try:
begin = _get_list_param(params, inputs[1], mod)
except Exception:
# Handle symbolic begin
begin = inputs[1]
try:
size = _get_list_param(params, inputs[2], mod)
except Exception:
# Handle symbolic size
size = inputs[2]
# Align begin and strides for dynamic shape.
data_dim = len(_infer_shape(inputs[0], mod))
strides = [1] * data_dim
if not isinstance(begin, (_expr.Call, _expr.Var)):
for _ in range(len(begin), data_dim):
begin.append(0)
elif not isinstance(size, (_expr.Call, _expr.Var)):
for _ in range(len(size), data_dim):
size.append(-1)
return _op.strided_slice(
inputs[0], begin=begin, end=size, strides=strides, slice_mode="size"
)
return _impl
def _reshape():
def _impl(inputs, attr, params, mod):
pop_node = inputs.pop(1)
try:
shape_arg = _get_tuple_param(params, pop_node)
except AttributeError:
# Shape operator is already pruned, hence
# try to infer shape by precompute prune if possible.
try:
params_new = _infer_value(pop_node, params, mod)
shape_arg = tuple(params_new.numpy().astype("int32").flatten())
except Exception:
# Deal with symbolic shape case.
if isinstance(pop_node, _expr.Call) and "shape_of" in str(pop_node.op):
# shape_of is the direct ancestor.
return _op.reshape_like(inputs[0], pop_node.args[0])
shape_arg = pop_node
return AttrCvt(op_name="reshape", extras={"newshape": shape_arg}, ignores=["Tshape"])(
inputs, attr
)
return _impl
def _depth_to_space():
def _impl(inputs, attr, params, mod):
block_size = int(attr["block_size"])
layout = attr["data_format"].decode("utf-8")
return _op.nn.depth_to_space(inputs[0], block_size, layout)
return _impl
def _space_to_depth():
def _impl(inputs, attr, params, mod):
block_size = int(attr["block_size"])
layout = attr["data_format"].decode("utf-8")
return _op.nn.space_to_depth(inputs[0], block_size, layout)
return _impl
def _sparse_to_dense():
def _impl(inputs, attr, params, mod):
sparse_indices = inputs[0]
output_shape = inputs[1]
sparse_values = inputs[2]
default_value = inputs[3]
return _op.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
return _impl
def _bias_add():
def _impl(inputs, attr, params, mod):
# Must expand for proper broadcasting in NCHW.
if "data_format" in attr and attr["data_format"].decode("utf-8") == "NCHW":
bias = _op.reshape(inputs[1], newshape=(1, -1, 1, 1))
else:
bias = inputs[1]
return _op.add(inputs[0], bias)
return _impl
def _broadcast_args():
def _impl(inputs, attr, params, mod):
if isinstance(inputs[0], _expr.Var):
s0 = params[inputs[0].name_hint]
else:
s0 = _infer_value(inputs[0], params, mod)
if isinstance(inputs[1], _expr.Var):
s1 = params[inputs[1].name_hint]
else:
s1 = _infer_value(inputs[1], params, mod)
s0 = list(s0.numpy().reshape([-1]))
s1 = list(s1.numpy().reshape([-1]))
s0_size, s1_size = len(s0), len(s1)
out = deque([])
for i in range(1, min(s0_size, s1_size) + 1):
if s0[s0_size - i] == s1[s1_size - i]:
out.appendleft(s0[s0_size - i])
elif s0[s0_size - i] == 1:
out.appendleft(s1[s1_size - i])
else:
assert s1[s1_size - i] == 1, "Incompatible broadcast type %s and %s" % (
s0[s0_size - i],
s1[s1_size - i],
)
out.appendleft(s0[s0_size - i])
if s0_size < s1_size:
for i in range(s0_size + 1, s1_size + 1):
out.appendleft(s1[s1_size - i])
if s1_size < s0_size:
for i in range(s1_size + 1, s0_size + 1):
out.appendleft(s0[s0_size - i])
return _expr.const(list(out), attr["T"].name)
return _impl
def _broadcast_to():
def _impl(inputs, attr, params, mod):
if isinstance(inputs[1], _expr.Var):
shape = params[inputs[1].name_hint]
else:
shape = _infer_value(inputs[1], params, mod)
shape = list(shape.numpy().reshape([-1]))
return _op.broadcast_to(inputs[0], shape)
return _impl
def _squeeze():
def _impl(inputs, attr, params, mod):
if len(attr["squeeze_dims"]) == 0:
attr["squeeze_dims"] = None
return AttrCvt(
op_name="squeeze", transforms={"squeeze_dims": "axis"}, ignores=["T", "_cloned"]
)(inputs, attr)
return _impl
def _fused_batch_norm():
def _impl(inputs, attr, params, mod):
# Tensorflow: (data, gamma, beta, moving_mean, moving_variance)
# Relay: (data, gamma, beta, moving_mean, moving_varience)
assert len(inputs) == 5
axis = 3
need_cast = False
if "data_format" in attr:
attr["data_format"] = attr["data_format"].decode("utf-8")
if attr["data_format"] == "NCHW":
axis = 1
if "U" in attr and attr["U"].name != attr["T"].name:
need_cast = True
inputs[0] = _op.cast(inputs[0], dtype=attr["U"].name)
# Check if mean and variance are empty
# If so, replace them with Mean and Variance Ops
# For run-time calculation
moving_mean_shape = [int(n) for n in inputs[3].type_annotation.shape]
moving_variance_shape = [int(n) for n in inputs[4].type_annotation.shape]
if moving_mean_shape[0] == 0 and moving_variance_shape[0] == 0:
inputs[3] = _op.mean(inputs[0], axis=axis, keepdims=False, exclude=True)
inputs[4] = _op.variance(inputs[0], axis=axis, keepdims=False, exclude=True)
out = AttrCvt(
op_name="batch_norm",
transforms={"scale_after_normalization": "scale", "variance_epsilon": "epsilon"},
extras={"axis": axis},
ignores=["data_format", "U", "exponential_avg_factor"],
disables=["momentum"],
)(inputs, attr)
if need_cast:
out = _expr.TupleGetItem(out.astuple(), 0)
out = _op.cast(out, dtype=attr["T"].name)
return out
return _impl
def _batch_norm():
def _impl(inputs, attr, params, mod):
# Rearrange inputs from
# (data, moving_mean, moving_variance, beta, gamma)
# to
# (data, gamma, beta, moving_mean, moving_var)
new_inputs = [inputs[0], inputs[4], inputs[3], inputs[1], inputs[2]]
axis = 3
if "data_format" in attr:
attr["data_format"] = attr["data_format"].decode("utf-8")
if attr["data_format"] == "NCHW":
axis = 1
return AttrCvt(
op_name="batch_norm",
transforms={"scale_after_normalization": "scale", "variance_epsilon": "epsilon"},
extras={"axis": axis},
ignores=["data_format", "exponential_avg_factor"],
disables=["momentum"],
)(new_inputs, attr)
return _impl
def _relu6():
def _impl(inputs, attr, params, mod):
return _op.clip(inputs[0], a_min=0, a_max=6)
return _impl
def _shape():
def _impl(inputs, attr, params, mod):
is_symbolic_shape = False
input_shape = _infer_shape(inputs[0], mod)
for axis in input_shape:
if not isinstance(axis, (int, tvm.tir.IntImm)):
is_symbolic_shape = True
break
if is_symbolic_shape:
ret = _op.shape_of(inputs[0], dtype=attr["out_type"].name)
else:
ret = np.array(input_shape, dtype=attr["out_type"].name)
return ret
return _impl
def _fill():
def _impl(inputs, attr, params, mod):
try:
output_shape = _infer_value(inputs[0], params, mod).numpy().tolist()
except Exception:
output_shape = inputs[0]
return _op.full(inputs[1], output_shape, attr["T"].name)
return _impl
def _lrn():
def _impl(inputs, attr, params, mod):
attr_new = {}
depth_radius = attr.get("depth_radius", 5)
size = (depth_radius * 2) + 1
attr_new["axis"] = 3 # Fix axis, NHWC format
attr_new["size"] = size
attr_new["bias"] = attr.get("bias", 1)
attr_new["alpha"] = attr.get("alpha", 1) * size
attr_new["beta"] = attr.get("beta", 0.5)
return AttrCvt(op_name="lrn")(inputs, attr_new)
return _impl
def _sum():
def _impl(inputs, attr, params, mod):
axis = _get_tuple_param(params, inputs[1])
return AttrCvt(
op_name="sum",
extras={"axis": axis},
transforms={"keep_dims": "keepdims"},
ignores=["name", "Tidx"],
)([inputs[0]], attr)
return _impl
def _reduce(op):
def _impl(inputs, attr, params, mod):
axis = _get_list_param(params, inputs[1], mod)
axis = tuple(axis)
if not axis:
axis = None
return AttrCvt(
op_name=op,
extras={"axis": axis},
transforms={"keep_dims": "keepdims"},
ignores=["name", "Tidx"],
)([inputs[0]], attr)
return _impl
def _euclidean_norm():
def _impl(inputs, attr, params, mod):
axis = tuple(_get_list_param(params, inputs[1], mod))
keep_dims = bool(attr.get("keep_dims", False))
return _op.sqrt(
_op.cast(_op.reduce.sum(_op.multiply(inputs[0], inputs[0]), axis, keep_dims), "float32")
)
return _impl
def _square():
def _impl(inputs, attr, params, mod):
return _op.multiply(inputs[0], inputs[0])
return _impl
def _gather():
"GatherV2, Gather"
def _impl(inputs, attr, params, mod):
if len(inputs) > 2:
axis = _get_num_param(params, inputs.pop(2))
else:
axis = 0
batch_dims = 0
if int(attr.get("batch_dims", 0)) != 0:
batch_dims = int(attr.get("batch_dims", 0))
new_input = inputs[0:2]
op_ = AttrCvt(
op_name="take",
extras={
"axis": tvm.tir.const(axis, "int32"),
"batch_dims": tvm.tir.const(batch_dims, "int32"),
},
ignores=["Tindices", "Tparams", "validate_indices", "Taxis", "_class"],
)(new_input, attr)
return op_
return _impl
def _gather_nd():
"""GatherNd"""
def _impl(inputs, attr, params, mod):
indices_dims = len(_infer_shape(inputs[1], mod))
indices = _op.transpose(inputs[1], axes=[-1] + list(range(indices_dims - 1)))
return AttrCvt(op_name="gather_nd", ignores=["Tindices", "Tparams", "Taxis", "_class"])(
[inputs[0], indices], attr
)
return _impl
def _stridedSlice():
def _impl(inputs, attr, params, mod):
"""Strided Slice.
Operator description: https://www.tensorflow.org/api_docs/python/tf/strided_slice
Tensorflow mask validation: https://github.com/tensorflow/tensorflow/blob/master/
tensorflow/core/util/strided_slice_op.cc#L147-L368
"""
begin = _get_list_param(params, inputs[1], mod)
end = _get_list_param(params, inputs[2], mod)
stride = _get_list_param(params, inputs[3], mod)
begin_mask = int(attr.get("begin_mask", 0))
end_mask = int(attr.get("end_mask", 0))
ellipsis_mask = int(attr.get("ellipsis_mask", 0))
new_axis_mask = int(attr.get("new_axis_mask", 0))
shrink_axis_mask = int(attr.get("shrink_axis_mask", 0))
in_type = _infer_type(inputs[0], mod)
data_shape = get_const_tuple(in_type.checked_type.shape)
data_dim = len(data_shape)
stride_dim = len(stride)
if data_dim == 0 and isinstance(inputs[0], _expr.Constant):
new_data = inputs[0].data.numpy().reshape(1)
return _expr.const(new_data, inputs[0].data.dtype)
# This is a special routine to handle strided_slice after shape_of.
# We need this since in some cases we want to do strided_slice on
# a partial symbolic shape, such as (1, ?), and get a static shape
# (1,). Directly slice on shape_of will result in fully dynamic shape.
# TODO(kevinthesun): Can we generalize this process with partial eval?
if isinstance(inputs[0], _expr.Call) and inputs[0].op == _op.get("shape_of"):
bg = begin[0]
ed = end[0]
st = stride[0]
if ed <= 0 < st:
ed += data_shape[0]
in_shape = _infer_shape(inputs[0].args[0], mod)
dtype = in_type.checked_type.dtype
out_data = []
idx = bg
while idx < ed:
if isinstance(in_shape[idx], int):
out_data.append(in_shape[idx])
else:
break
idx += st
# Only return when in_shape is fully static in the range from begin to end.
if idx >= ed:
ret = _expr.const(out_data, dtype)
if shrink_axis_mask:
ret = _op.squeeze(ret)
return ret
def _transform_mask(stride_dim, ellipsis_mask):
"""Handle mask inputs to create new begin, end, stride and output shape"""
m_begin = [0] * data_dim
m_end = [0] * data_dim
m_stride = [0] * data_dim
fshape_indices = []
# Count new axis after ellipsis_mask, consider while applying ellipsis_mask.
ellipsis_seen = False
new_axes_after_ellipsis = 0
for i in range(stride_dim):
mask = 1 << i
if ellipsis_seen and (mask & new_axis_mask) != 0:
new_axes_after_ellipsis += 1
if (mask & ellipsis_mask) != 0:
ellipsis_seen = True
if not ellipsis_seen:
# Used later for extending the stride attributes in the below loop.
ellipsis_mask |= 1 << stride_dim
stride_dim += 1
final_index = 0
for index in range(stride_dim):
mask = 1 << index
if mask & ellipsis_mask:
# Identify the end index for applying ellipsis_mask
to_index = min(
((data_dim - (stride_dim - index)) + 1 + new_axes_after_ellipsis), data_dim
)
for i in range(final_index, to_index):
m_begin[final_index] = 0
m_end[final_index] = data_shape[final_index]
m_stride[final_index] = 1
fshape_indices.append(final_index)
final_index += 1
elif mask & new_axis_mask:
fshape_indices.append(-1)
elif not mask & new_axis_mask:
if final_index == len(m_begin):
break
if mask & begin_mask:
m_begin[final_index] = -1 if stride[index] < 0 else 0
elif begin[index]:
m_begin[final_index] = begin[index]
if mask & end_mask:
m_end[final_index] = (
-(data_shape[final_index] + 1)
if stride[index] < 0
else data_shape[final_index]
)
elif end[index]:
m_end[final_index] = end[index]
m_stride[final_index] = stride[index]
if mask & shrink_axis_mask:
# Tensorflow make axis with shrink_axis_mask as dimension 1
m_begin[final_index] = (
data_shape[final_index] + begin[index]
if begin[index] < 0
else begin[index]
)
m_end[final_index] = m_begin[final_index] + 1
m_stride[final_index] = 1
fshape_indices.append(-2)
else:
fshape_indices.append(final_index)
final_index += 1
return m_begin, m_end, m_stride, fshape_indices
fshape_indices = None
if begin_mask or end_mask or ellipsis_mask or new_axis_mask or shrink_axis_mask:
begin, end, stride, fshape_indices = _transform_mask(stride_dim, ellipsis_mask)
out = _op.strided_slice(inputs[0], begin=begin, end=end, strides=stride)
out_shape = _infer_shape(out, mod=mod)
if not fshape_indices:
fshape_indices = range(len(out_shape))
# Create final output shape.
final_output = []
for gather_index in fshape_indices:
if gather_index == -1:
final_output.append(1)
elif gather_index == -2:
pass
else:
final_output.append(out_shape[gather_index])
if not final_output:
if not shrink_axis_mask:
ret = out
else:
final_shape = []
for dim in out_shape:
if dim != 1:
final_shape.append(dim)
if len(final_shape) == 0:
ret = _op.squeeze(out)
else:
# We need reshape to handle dynamic shape.
ret = _op.reshape(out, newshape=tuple(final_shape))
else:
ret = _op.reshape(out, newshape=tuple(final_output))
return ret
return _impl
def _pad(name):
def _impl(inputs, attr, params, mod):
try:
padlist = _get_param(params, inputs[1])
except (IndexError, KeyError, AttributeError):
try:
padlist = _infer_value(inputs[1], params, mod).numpy().tolist()
except Exception:
padlist = inputs[1]
if isinstance(padlist, _expr.Expr):
paddings = padlist
else:
paddings = tuple(tuple(l) for l in padlist)
attr["pad_width"] = paddings
attr["pad_value"] = 0
new_inputs = [inputs[0]]
if name == "PadV2":
try:
attr["pad_value"] = _get_num_param(params, inputs[2])
except (IndexError, KeyError, AttributeError):
attr["pad_value"] = inputs[2]
return AttrCvt(
op_name="pad",
ignores=["Tpaddings"],
)(new_inputs, attr)
return _impl
def _mirror_pad():
def _impl(inputs, attr, params, mod):
padlist = _get_param(params, inputs[1])
paddings = tuple(tuple(l) for l in padlist)
attr["pad_width"] = paddings
mode = attr["mode"].decode("utf-8")
attr["mode"] = mode
new_inputs = [inputs[0]]
return AttrCvt(
op_name="mirror_pad",
ignores=["Tpaddings"],
)(new_inputs, attr)
return _impl
def _transpose():
def _impl(inputs, attr, params, mod):
# If perm is not specified, axes is left empty,
# otherwise its value is get from params
axes = _get_list_param(params, inputs[1], mod)
return _op.transpose(inputs[0], axes=axes)
return _impl
def _where():
def _impl(inputs, attr, params, mod):
if len(inputs) == 1:
return AttrCvt(op_name="argwhere")(inputs, attr)
return AttrCvt(op_name="where")(inputs, attr)
return _impl
def _clip_by_value():
def _impl(inputs, attr, params, mod):
a_min = _get_num_param(params, inputs[1])
a_max = _get_num_param(params, inputs[2])
return _op.clip(inputs[0], a_min=a_min, a_max=a_max)
return _impl
def _reverse_v2():
def _impl(inputs, attr, params, mod):
axis = _get_num_param(params, inputs[1])
return AttrCvt(op_name="reverse", ignores=["Tidx"], extras={"axis": int(axis)})(
[inputs[0]], attr
)
return _impl
def _rank():
def _impl(inputs, attr, params, mod):
input_shape = _infer_shape(inputs[0], mod)
name = attr["_node_name"]
params[name] = tvm.nd.array(np.array([len(input_shape)]).astype("int32"))
return [_expr.var(name, shape=params[name].shape, dtype="int32")]
return _impl
def _range():
def _impl(inputs, attr, params, mod):
try:
start = _get_param(params, inputs[0])[0]
except (IndexError, KeyError, AttributeError):
try:
start = _infer_value(inputs[1], params, mod).numpy().tolist()
start = start if not isinstance(start, list) else start[0]
except Exception:
# Symbolic start
start = inputs[0]
try:
limit = (
_get_param(params, inputs[1])[0]
if hasattr(inputs[1], "name_hint") or isinstance(inputs[1], _expr.Constant)
else params.pop("Rank").numpy()[0]
)
except (IndexError, KeyError, AttributeError):
try:
limit = _infer_value(inputs[1], params, mod).numpy().tolist()
limit = limit if not isinstance(limit, list) else limit[0]
except Exception:
limit = inputs[1]
try:
delta = _get_param(params, inputs[2])[0]
except (IndexError, KeyError, AttributeError):
try:
delta = _infer_value(inputs[2], params, mod).numpy().tolist()
delta = delta if not isinstance(delta, list) else delta[0]
except Exception:
# Symbolic delta
delta = inputs[2]
# if all attributes are constant, evalute the range function and return relay.const
dtype = attr["Tidx"].name if "Tidx" in attr else str(start.dtype)
if all(
[
isinstance(start, (np.int32, np.int64, int, np.float32, np.float64, float)),
isinstance(limit, (np.int32, np.int64, int, np.float32, np.float64, float)),
isinstance(delta, (np.int32, np.int64, int, np.float32, np.float64, float)),
]
):
return tvm.relay.const(list(range(int(start), int(limit), int(delta))), dtype=dtype)
if isinstance(start, (np.int32, np.int64, int, np.float32, np.float64, float)):
start = _expr.const(start, dtype=dtype)
if isinstance(limit, (np.int32, np.int64, int, np.float32, np.float64, float)):
limit = _expr.const(limit, dtype=dtype)
if isinstance(delta, (np.int32, np.int64, int, np.float32, np.float64, float)):
delta = _expr.const(delta, dtype=dtype)
return AttrCvt(
op_name="arange",
ignores=["Tidx", "_class"],
extras={"start": start, "stop": limit, "step": delta, "dtype": dtype},
)([], attr)
return _impl
def _einsum():
def _impl(inputs, attr, params, mod):
einsum_attr = dict(attr)
einsum_attr["equation"] = einsum_attr["equation"].decode("utf-8")
return AttrCvt(op_name="einsum", ignores=["N"])([inputs], einsum_attr)
return _impl
def _elu():
def _impl(inputs, attr, params, mod):
dtype = attr["T"].name
alpha = tvm.relay.const(-1.0, dtype)
return alpha * _op.nn.relu(tvm.relay.const(1, dtype) - _op.exp(inputs[0])) + _op.nn.relu(
inputs[0]
)
return _impl
def _selu():
def _impl(inputs, attr, params, mod):
dtype = attr["T"].name
alpha = tvm.relay.const(-1.6732632423543772848170429916717, dtype)
gamma = tvm.relay.const(1.0507009873554804934193349852946, dtype)
return gamma * (
alpha * _op.nn.relu(tvm.relay.const(1, dtype) - _op.exp(inputs[0]))
+ _op.nn.relu(inputs[0])
)
return _impl
def _mean():
def _impl(inputs, attr, params, mod):
axis = _get_tuple_param(params, inputs[1])
return AttrCvt(
op_name="mean",
ignores=["Tdim", "Tidx"],
transforms={"keep_dims": "keepdims"},
extras={"axis": axis},
)([inputs[0]], attr)
return _impl
def _broadcast(name):
def _impl(inputs, attr, params, mod):
return AttrCvt(op_name=name, ignores=["name", "incompatible_shape_error", "Tidx"])(
inputs, attr
)
return _impl
def _split(has_size_vector):
# TF documentation https://www.tensorflow.org/api_docs/python/tf/split
def _impl(inputs, attr, params, mod):
try:
# order and number of inputs are different:
# if has_size_vector:
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/split-v
# else:
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/split
# in addition, `axis` and `num_or_size_splits` can be tensors in TensorFlow,
# we can only support constants
if has_size_vector:
input_node_index = 0
input_axis_index = 2
size_splits = _get_param(params, inputs[1])
section_beginnings = np.cumsum(size_splits)[:-1]
indices_or_sections = tuple(section_beginnings)
else:
input_node_index = 1
input_axis_index = 0
indices_or_sections = attr["num_split"]
input_node = inputs[input_node_index]
axis_input_value = _get_num_param(params, inputs[input_axis_index])
except (IndexError, KeyError, AttributeError):
raise TypeError(
"Unsupported argument for split: `axis` and `num_or_size_splits` "
"should be constants"
)
return _op.split(
input_node, indices_or_sections=indices_or_sections, axis=int(axis_input_value)
)
return _impl
def _unpack():
def _impl(inputs, attr, params, mod):
input_node = inputs[0]
axis = attr["axis"]
input_shape = _infer_shape(input_node, mod)
axis_length = input_shape[axis]
if axis_length < 0:
raise TypeError("Unstack with unknown axis length")
splitted = _op.split(input_node, indices_or_sections=axis_length, axis=axis)
axis = [axis]
return _expr.TupleWrapper(
_expr.Tuple([_op.squeeze(split_item, axis=axis) for split_item in splitted]),
len(splitted),
)
return _impl
def _softmax():
def _impl(inputs, attr, params, mod):
return AttrCvt(op_name="softmax", transforms={"axis": ("axis", 1)})([inputs[0]], attr)
return _impl
def _softsign():
# op description: https://www.tensorflow.org/api_docs/python/tf/math/softsign
def _impl(inputs, attr, params, mod):
abs_out = get_relay_op("abs")(inputs[0])
add_out = abs_out + tvm.relay.const(1, attr["T"].name)
return inputs[0] / add_out
return _impl
def _softplus():
# op description: https://www.tensorflow.org/api_docs/python/tf/math/softplus
def _impl(inputs, attr, params, mod):
exp_out = AttrCvt("exp")(inputs, attr)
inputs.append(tvm.relay.const(1, attr["T"].name))
rh = tvm.relay.const(1, attr["T"].name)
add_out = get_relay_op("add")(exp_out, rh)
return get_relay_op("log")(add_out)
return _impl
def _topk():
def _impl(inputs, attr, params, mod):
k_input = inputs.pop(1)
try:
k = int(_get_num_param(params, k_input))
except (IndexError, KeyError, AttributeError):
try:
k = int(_infer_value(k_input, params, mod).numpy().tolist())
except Exception:
k = k_input
if isinstance(k, int):
if k < 1:
raise tvm.error.OpAttributeInvalid(
"Attribute k must be positive in operator TopKV2"
)
k = _expr.const(k)
if attr["sorted"] is False:
raise tvm.error.OpAttributeUnImplemented(
"Attribute sorted=False is not supported in operator TopKV2"
)
return AttrCvt(
op_name="topk",
ignores=["sorted"],
extras={"k": k, "is_ascend": False, "dtype": "int32"},
)([inputs[0]], attr)
return _impl
def _floordiv():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 2
return AttrCvt("floor_divide")(inputs, attr)
return _impl
def _floormod():
def _impl(inputs, attr, params, mod):
assert len(inputs) == 2
return AttrCvt("floor_mod")(inputs, attr)
return _impl
def _logical(name):
def _impl(inputs, attr, params, mod):
return AttrCvt(op_name=name)(inputs, attr)
return _impl
def _space_to_batch_nd():
def _impl(inputs, attr, params, mod):
block_shape = _get_list_param(params, inputs[1], mod)
paddings = _get_list_param(params, inputs[2], mod)
paddings = np.squeeze(paddings)
if len(paddings.shape) == 1:
paddings = np.expand_dims(paddings, axis=0)
paddings = paddings.tolist()
attr["block_shape"] = block_shape
attr["paddings"] = paddings
out = AttrCvt("space_to_batch_nd", ignores=["Tblock_shape", "Tpaddings"])([inputs[0]], attr)
return out
return _impl
def _batch_to_space_nd():
def _impl(inputs, attr, params, mod):
block_shape = _get_list_param(params, inputs[1], mod)
crops = _get_list_param(params, inputs[2], mod)
crops = np.squeeze(crops)
if len(crops.shape) == 1:
crops = np.expand_dims(crops, axis=0)
crops = crops.tolist()
attr["block_shape"] = block_shape
attr["crops"] = crops
out = AttrCvt("batch_to_space_nd", ignores=["Tblock_shape", "Tcrops"])([inputs[0]], attr)
return out
return _impl
def _atan2():
def _impl(inputs, attr, params, mod):
divide = _elemwise("divide")(inputs, attr, params, mod)
return get_relay_op("atan")(divide)
return _impl
def _prod():
def _impl(inputs, attr, params, mod):
axis = _get_num_param(params, inputs[1])
keepdims = attr["keep_dims"]
return _op.prod(inputs[0], int(axis), keepdims=keepdims)
return _impl
def _log1p():
# op description: https://www.tensorflow.org/api_docs/python/tf/math/log1p
def _impl(inputs, attr, params, mod):
one = tvm.relay.const(1, attr["T"].name)
add_out = get_relay_op("add")(inputs[0], one)
return get_relay_op("log")(add_out)
return _impl
def _one_hot():
def _impl(inputs, attr, params, mod):
depth = int(_get_num_param(params, inputs[1]))
dtype = attr["T"].name
on_value = _get_num_param(params, inputs[2])
off_value = _get_num_param(params, inputs[3])
new_inputs = [
inputs[0],
tvm.relay.const(on_value, dtype),
tvm.relay.const(off_value, dtype),
]
return AttrCvt("one_hot", ignores=["TI"], extras={"depth": depth, "dtype": dtype})(
new_inputs, attr
)
return _impl
def _squared_difference():
def _impl(inputs, attr, params, mod):
difference = _op.subtract(inputs[0], inputs[1])
return _op.multiply(difference, difference)
return _impl
def _size():
def _impl(inputs, attr, params, mod):
new_attr = attr
new_attr["out_type"] = attr["out_type"].name
return AttrCvt("ndarray_size", transforms={"out_type": "dtype"})(inputs, new_attr)
return _impl
def _add_n():
def _impl(inputs, attr, params, mod):
if not isinstance(inputs, tuple):
inputs = list(inputs)
assert len(inputs) > 0, "add_n take >=1 inputs, but 0 given."
_res = inputs[0]
for each in inputs[1:]:
_res = _op.add(_res, each)
return _res
return _impl
def _LSTMBlockCell():
def _impl(inputs, attr, params, mod):
"""LSTM Block cell.
Calculations and return values are described in:
https://github.com/tensorflow/tensorflow/blob/
r1.8/tensorflow/contrib/rnn/python/ops/lstm_ops.py#L41-L114
Parameters
----------
inputs : relay.Expr
Input data
in_state_c: list of relay.Expr
Cell state input values for all the layers
in_state_h: list of relay.Expr
Hidden state input values for all the layers
attrs : dict
Dict of operator attributes
params : dict
List of pretrained weights and bias
Returns
-------
relay.Expr.TupleWapper
[i, cs, f, o, ci, co, h]
"""
in_data = inputs[0]
in_state_c = inputs[1]
in_state_h = inputs[2]
in_weight = inputs[3]
in_bias = inputs[7]
forget_bias = attr.pop("forget_bias")
input_shape = _infer_shape(inputs[0], mod)
weight_shape = _infer_shape(inputs[3], mod)
batch_size, input_size = input_shape[0], input_shape[1]
num_hidden_layers = weight_shape[1]
in_data = _op.reshape(in_data, newshape=(batch_size, input_size))
ixh = _op.concatenate([in_data, in_state_h], axis=1)
in_weight = _op.transpose(in_weight, axes=None)
gates = _op.nn.dense(ixh, in_weight, units=num_hidden_layers)
gates_bias = _op.add(gates, in_bias)
gate_list = _op.split(gates_bias, indices_or_sections=4, axis=1)
in_gate = _op.sigmoid(gate_list[0])
in_transform = _op.tanh(gate_list[1])
forget_gate = _op.add(gate_list[2], tvm.relay.const(forget_bias, attr["T"].name))
forget_gate = _op.sigmoid(forget_gate)
out_gate = _op.sigmoid(gate_list[3])
next_c = _op.add(_op.multiply(forget_gate, in_state_c), _op.multiply(in_gate, in_transform))
co = _op.tanh(next_c)
next_h = out_gate * co
return tvm.relay.TupleWrapper(
tvm.relay.Tuple([in_gate, next_c, forget_gate, out_gate, in_transform, co, next_h]), 7
)
return _impl
def _unique(return_counts=True):
def _impl(inputs, attr, params, mod):
assert len(inputs) == 1
data = inputs[0]
if return_counts:
[unique, _, inverse_indices, num_uniq, counts] = _op.unique(
data, is_sorted=False, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
return _expr.TupleWrapper(
_expr.Tuple([unique_sliced, inverse_indices, counts_sliced]),
3,
)
[unique, _, inverse_indices, num_uniq] = _op.unique(
data, is_sorted=False, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return _expr.TupleWrapper(
_expr.Tuple([unique_sliced, inverse_indices]),
2,
)
return _impl
def _bincount():
def _impl(inputs, attr, params, mod):
input = inputs[0] # arr: int32 Tensor
size = inputs[1] # size: non-negative int scalar Tensor
# weights: int32, int64, float32, or float64 Tensor with the same shape as arr
# or a length-0 Tensor, in which case it acts as all weights equal to 1.
weights = inputs[2]
# Returns: Output: 1D Tensor with length equal to size
# The counts or summed weights for each value in the range [0, size).
input_shape = _infer_shape(input, mod)
if len(input_shape) > 1:
input = _op.reshape(input, [-1])
is_weights_zero_tensor = True
if weights:
weights_shape = _infer_shape(weights, mod)
is_weights_zero_tensor = weights_shape == (0,)
if len(weights_shape) > 1:
weights = _op.reshape(weights, [-1])
# Output should have the same dtype as weights.
if is_weights_zero_tensor:
# if weights are length-0 Tensor - output dtype is float32
out_dtype = "float32"
updates = _op.cast(_op.ones_like(input), out_dtype)
else:
out_dtype = _infer_type(weights, mod).checked_type.dtype
updates = weights
counts_shape = _op.reshape(size, [1])
counts = _op.zeros(counts_shape, out_dtype)
out = _op.scatter_add(counts, input, updates, axis=0)
return out
return _impl
def _dense_bincount():
def _impl(inputs, attr, params, mod):
input = inputs[0] # input: int32, int64. 1D or 2D int Tensor
size = inputs[1] # size: non-negative int scalar Tensor
# weights: int32, int64, float32, or float64 Tensor with the same shape as input
# or a length-0 Tensor, in which case it acts as all weights equal to 1.
weights = inputs[2]
# Returns: Output: 1D Tensor with length equal to size
# or 2D Tensor with [batch_size, size].
# The counts or summed weights for each value in the range [0, size).
input_dtype = _infer_type(input, mod).checked_type.dtype
input_shape = _infer_shape(input, mod)
is_2d_input = len(input_shape) == 2
if input_dtype == "int64":
warnings.warn(
"Casting an int64 input to int32, since we do not have int64 atomic add"
"needed for bincount yet."
)
input = _op.cast(input, "int32")
is_weights_zero_tensor = True
if weights:
weights_shape = _infer_shape(weights, mod)
is_weights_zero_tensor = weights_shape == (0,)
# Output should have the same dtype as weights.
if is_weights_zero_tensor:
# if weights are length-0 Tensor - output dtype is float32
out_dtype = "float32"
updates = _op.cast(_op.ones_like(input), out_dtype)
else:
out_dtype = _infer_type(weights, mod).checked_type.dtype
updates = weights
if is_2d_input:
batch_arr = _op.take(_op.shape_of(input), _expr.const([0]))
size_arr = _op.reshape(size, [1])
counts_shape = _op.concatenate([batch_arr, size_arr], axis=0)
counts = _op.zeros(counts_shape, out_dtype)
out = _op.scatter_add(counts, input, updates, axis=1)
else:
counts_shape = _op.reshape(size, [1])
counts = _op.zeros(counts_shape, out_dtype)
out = _op.scatter_add(counts, input, updates, axis=0)
if attr["binary_output"]:
out = _op.cast(_op.cast(out, "bool"), out_dtype)
return out
return _impl
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
_convert_map = {
"Abs": AttrCvt("abs"),
"Acos": AttrCvt("acos"),
"Acosh": AttrCvt("acosh"),
"Add": _elemwise("add"),
"AddN": _add_n(),
"AddV2": _elemwise("add"),
"All": _reduce("all"),
"Any": _reduce("any"),
"ArgMax": _argx(_op.argmax, "argmax"),
"ArgMin": _argx(_op.argmin, "argmin"),
"Asin": AttrCvt("asin"),
"Asinh": AttrCvt("asinh"),
"Assert": _assert(),
"Atan": AttrCvt("atan"),
"Atanh": AttrCvt("atanh"),
"Atan2": _atan2(),
"AvgPool": _pooling("avg_pool"),
"AvgPool3D": _pool3d("avg_pool3d"),
"BatchMatMul": _batch_matmul(),
"BatchMatMulV2": _batch_matmul(),
"BatchNormWithGlobalNormalization": _batch_norm(),
"BatchToSpaceND": _batch_to_space_nd(),
"BiasAdd": _bias_add(),
"Bincount": _bincount(),
"BroadcastTo": _broadcast_to(),
"BroadcastArgs": _broadcast_args(),
"Cast": _cast(),
"Ceil": AttrCvt("ceil"),
"CheckNumerics": _check_numerics(),
"ClipByValue": _clip_by_value(),
"Concat": _concat(),
"ConcatV2": _concatV2(),
"Conv2D": _conv("conv"),
"Conv2DBackpropInput": _conv("conv_transpose"),
"Conv3D": _conv3d("conv"),
"Conv3DBackpropInputV2": _conv3d("conv_transpose"),
"Cos": AttrCvt("cos"),
"Cosh": AttrCvt("cosh"),
"CropAndResize": _crop_and_resize(),
"DecodeJpeg": _decode_image(),
"DenseBincount": _dense_bincount(),
"DepthToSpace": _depth_to_space(),
"DepthwiseConv2dNative": _conv("depthwise"),
"Dilation2D": _dilation2d(),
"Einsum": _einsum(),
"Elu": _elu(),
"Equal": _broadcast("equal"),
"Erf": AttrCvt("erf"),
"EuclideanNorm": _euclidean_norm(),
"Exp": AttrCvt("exp"),
"ExpandDims": _expand_dims(),
"Expm1": _expm1(),
"Fill": _fill(),
"Floor": AttrCvt("floor"),
"FloorDiv": _floordiv(),
"FloorMod": _floormod(),
"FusedBatchNorm": _fused_batch_norm(),
"FusedBatchNormV2": _fused_batch_norm(),
"FusedBatchNormV3": _fused_batch_norm(),
"Gather": _gather(),
"GatherNd": _gather_nd(),
"GatherV2": _gather(),
"Greater": _broadcast("greater"),
"GreaterEqual": _broadcast("greater_equal"),
"Identity": _identity(),
"IdentityN": _identityn(),
"InvertPermutation": AttrCvt("invert_permutation"),
"IsFinite": AttrCvt("isfinite"),
"IsInf": AttrCvt("isinf"),
"IsNan": AttrCvt("isnan"),
"LeakyRelu": AttrCvt("leaky_relu"),
"LeftShift": AttrCvt("left_shift"),
"Less": _broadcast("less"),
"LessEqual": _broadcast("less_equal"),
"Log": AttrCvt("log"),
"Log1p": _log1p(),
"LogicalAnd": _logical("logical_and"),
"LogicalNot": _logical("logical_not"),
"LogicalOr": _logical("logical_or"),
"LogSoftmax": AttrCvt("log_softmax"),
"LRN": _lrn(),
"LSTMBlockCell": _LSTMBlockCell(),
"MatMul": _matmul(),
"Max": _reduce("max"),
"Maximum": _elemwise("maximum"),
"MaxPool": _pooling("max_pool"),
"MaxPool3D": _pool3d("max_pool3d"),
"Mean": _mean(),
"Min": _reduce("min"),
"Minimum": _elemwise("minimum"),
"MirrorPad": _mirror_pad(),
"Mod": _elemwise("mod"),
"Mul": _elemwise("multiply"),
"Neg": AttrCvt("negative"),
"NonMaxSuppressionV2": _nms(),
"NonMaxSuppressionV3": _nms(),
"NonMaxSuppressionV4": _nms(),
"NonMaxSuppressionV5": _nms(True),
"CombinedNonMaxSuppression": _combined_nms(),
"NoOp": _no_op(),
"NotEqual": _broadcast("not_equal"),
"OneHot": _one_hot(),
"Pack": _pack(),
"Pad": _pad("Pad"),
"PadV2": _pad("PadV2"),
"Pow": _elemwise("power"),
"Prod": _prod(),
"Range": _range(),
"Rank": _rank(),
"RealDiv": _elemwise("divide"),
"Relu": AttrCvt("relu"),
"Relu6": _relu6(),
"Reshape": _reshape(),
"ResizeBicubic": _resize("cubic"),
"ResizeBilinear": _resize("linear"),
"ResizeNearestNeighbor": _resize("nearest_neighbor"),
"ReverseV2": _reverse_v2(),
"RightShift": AttrCvt("right_shift"),
"Rint": AttrCvt("round"),
"Round": AttrCvt("round"),
"Rsqrt": _rsqrt(),
"Select": _where(),
"SelectV2": _where(),
"Selu": _selu(),
"Shape": _shape(),
"Sigmoid": AttrCvt("sigmoid"),
"Sign": AttrCvt("sign"),
"Sin": AttrCvt("sin"),
"Sinh": AttrCvt("sinh"),
"Size": _size(),
"Slice": _slice(),
"Softmax": _softmax(),
"Softplus": _softplus(),
"Softsign": _softsign(),
"SpaceToBatchND": _space_to_batch_nd(),
"SpaceToDepth": _space_to_depth(),
"SparseToDense": _sparse_to_dense(),
"SparseTensorDenseMatMul": _sparse_tensor_dense_matmul(),
"SparseFillEmptyRows": _sparse_fill_empty_rows(),
"SparseReshape": _sparse_reshape(),
"SegmentSum": _math_segment_sum(),
"SparseSegmentSum": _sparse_segment_sum(),
"SparseSegmentSumWithNumSegments": _sparse_segment_sum_with_num_segments(),
"SparseSegmentSqrtN": _sparse_segment_sum_sqrtn(),
"SparseSegmentSqrtNWithNumSegments": _sparse_segment_sum_sqrtn_with_num_segments(),
"SparseSegmentMean": _sparse_segment_mean(),
"SparseSegmentMeanWithNumSegments": _sparse_segment_mean_with_num_segments(),
"SparseTensorDenseAdd": _sparse_tensor_dense_add(),
"Split": _split(False),
"SplitV": _split(True),
"Sqrt": AttrCvt("sqrt"),
"Square": _square(),
"SquaredDifference": _squared_difference(),
"Squeeze": _squeeze(),
"StopGradient": _identity(),
"StridedSlice": _stridedSlice(),
"Sub": _elemwise("subtract"),
"Sum": _sum(),
"Tan": AttrCvt("tan"),
"Tanh": AttrCvt("tanh"),
"TensorArrayConcatV3": _tensor_array_concat(),
"TensorArrayGatherV3": _tensor_array_gather(),
"TensorArrayReadV3": _tensor_array_read(),
"TensorArrayScatterV3": _tensor_array_scatter(),
"TensorArraySizeV3": _tensor_array_size(),
"TensorArraySplitV3": _tensor_array_split(),
"TensorArrayV3": _tensor_array(),
"TensorArrayWriteV3": _tensor_array_write(),
"Tile": _tile(),
"TopKV2": _topk(),
"Transpose": _transpose(),
"TruncateMod": _elemwise("mod"),
"Unique": _unique(False),
"UniqueWithCounts": _unique(True),
"Unpack": _unpack(),
"UnravelIndex": _unravel_index(),
"Where": _where(),
"ZerosLike": AttrCvt("zeros_like"),
}
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tensorflow_parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TF: Tensorflow parser"""
# pylint: disable=import-outside-toplevel, assignment-from-no-return
import os
from tvm.contrib import utils
class TFParser(object):
"""
A Wrapper to handle tensorflow models parsing, TensorFlow is needed
Parameters
----------
model_dir : tensorflow frozen pb file or a directory that contains saved
model or checkpoints.
outputs : List of output tensor names (Optional)
Optional output node names. This will be protected for saved model
when we do remove training nodes.
Examples
--------
.. code-block:: python
parser = TFParser(model_dir)
graphdef = parser.parse()
"""
def __init__(self, model_dir, outputs=None):
from tensorflow.core.framework import graph_pb2
self._tmp_dir = utils.tempdir()
self._model_dir = model_dir
self._graph = graph_pb2.GraphDef()
self._outputs = outputs or []
def _set_graph(self, graph):
"""Set Graph"""
self._graph = graph
def _get_graph(self):
"""Get Graph"""
return self._graph
def _load_pb_file(self):
"""Load single pb file"""
graph = self._get_graph()
with open(self._model_dir, "rb") as f:
graph.ParseFromString(f.read())
return graph
def _get_tag_set(self):
"""Return the tag set of saved model, multiple metagraphs are not supported"""
try:
from tensorflow.contrib.saved_model.python.saved_model.reader import (
get_saved_model_tag_sets,
)
except ImportError:
try:
from tensorflow.python.tools.saved_model_utils import get_saved_model_tag_sets
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import get_saved_model_tag_sets which is "
"required to get tag set from saved model."
)
tag_sets = get_saved_model_tag_sets(self._model_dir)
return tag_sets[0]
def _get_output_names(self):
"""Return the concatenated output names"""
try:
import tensorflow.compat.v1 as tf
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import tensorflow which is "
"required to restore from saved model."
)
tags = self._get_tag_set()
output_names = set()
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(sess, tags, self._model_dir)
for sig_def in meta_graph_def.signature_def.values():
for output_tensor in sig_def.outputs.values():
output_names.add(output_tensor.name.replace(":0", ""))
tf.reset_default_graph()
return ",".join(output_names)
def _load_saved_model(self):
"""Load the tensorflow saved model."""
try:
from tensorflow.python.tools import freeze_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import graph_util
from tensorflow.core.framework import graph_pb2
except ImportError:
raise ImportError(
"InputConfiguration: Unable to import tensorflow which is "
"required to restore from saved model."
)
saved_model_dir = self._model_dir
output_graph_filename = self._tmp_dir.relpath("tf_frozen_model.pb")
input_saved_model_dir = saved_model_dir
output_node_names = self._get_output_names()
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = True
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = ",".join(self._get_tag_set())
freeze_graph.freeze_graph(
input_graph_filename,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_filename,
clear_devices,
"",
"",
"",
input_meta_graph,
input_saved_model_dir,
saved_model_tags,
)
with ops.Graph().as_default(): # pylint: disable=not-context-manager
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
output_graph_def = graph_util.remove_training_nodes(
output_graph_def, protected_nodes=self._outputs
)
return output_graph_def
def _load_ckpt(self):
"""TODO: Load checkpoint model."""
raise RuntimeError(
"InputConfiguration: Loading tf checkpoint model is " "not supported yet."
)
def parse(self):
"""
Parse tensorflow models: checkpoints, saved models, and single frozen pb file.
Returns
-------
GraphDef of the passed model
"""
graph = None
if os.path.isdir(self._model_dir):
ckpt = os.path.join(self._model_dir, "checkpoint")
if not os.path.isfile(ckpt):
if not os.path.isdir(os.path.join(self._model_dir, "variables")):
raise RuntimeError("InputConfiguration: Invalid model path.")
graph = self._load_saved_model()
else:
graph = self._load_ckpt()
elif os.path.isfile(self._model_dir):
# Only .pb or .pbtxt is a valid suffix name.
if self._model_dir.endswith(".pb") or self._model_dir.endswith(".pbtxt"):
cur_dir = os.path.dirname(self._model_dir)
else:
raise RuntimeError("InputConfiguration: Invalid model format.")
# It is a saved model if `variables` directory is present at the
# same directory with the pb or pbtxt file.
if os.path.isdir(os.path.join(cur_dir, "variables")):
self._model_dir = cur_dir
graph = self._load_saved_model()
else:
graph = self._load_pb_file()
else:
raise RuntimeError("InputConfiguration: Unrecognized model " "file or path.")
self._set_graph(graph)
return graph
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend."""
import itertools
import math
import numpy as np
import tvm
from tvm import relay
from tvm.ir import IRModule
from tvm.runtime.name_transforms import sanitize_name
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn as _qnn
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import lstm_cell, to_int_list, shape_of, try_infer_value
from .tflite_flexbuffer import FlexBufferDecoder
__all__ = ["from_tflite"]
class TensorWrapper(object):
"""Tensor wrapper for TFLite Tensor"""
def __init__(self, tensor_idx, tensor, buffer, qnn_params=None):
self.tensor_idx = tensor_idx
self.tensor = tensor
self.buffer = buffer
self.qnn_params = qnn_params
class OperatorConverter(object):
"""Operator Converted for converting TFLite ops to Relay ops"""
def __init__(self, model, subgraph, exp_tab):
try:
from tflite.ActivationFunctionType import ActivationFunctionType
from tflite.BuiltinOperator import BuiltinOperator
from tflite.BuiltinOptions import BuiltinOptions
except ImportError:
raise ImportError("The tflite package must be installed")
self.model = model
self.subgraph = subgraph
self.exp_tab = exp_tab
self.builtin_op_code = build_str_map(BuiltinOperator())
self.activation_fn_type = build_str_map(ActivationFunctionType())
self.builtin_options = build_str_map(BuiltinOptions())
self.prefetched_nodes = {}
self.allow_custom_ops = False
# Add more operators
self.convert_map = {
"ABS": self.convert_abs,
"ADD": self.convert_add,
"ADD_N": self.convert_add_n,
"ARG_MAX": self.convert_arg_max,
"ARG_MIN": self.convert_arg_min,
"AVERAGE_POOL_2D": self.convert_average_pool2d,
"BATCH_TO_SPACE_ND": self.convert_batch_to_space_nd,
"CAST": self.convert_cast,
"CEIL": self.convert_ceil,
"CONCATENATION": self.convert_concatenation,
"CONV_2D": self.convert_conv2d,
"COS": self.convert_cos,
"DENSIFY": self.convert_densify,
"DEPTH_TO_SPACE": self.convert_depth_to_space,
"DEPTHWISE_CONV_2D": self.convert_depthwise_conv2d,
"DEQUANTIZE": self.convert_dequantize,
"DETECTION_POSTPROCESS": self.convert_detection_postprocess,
"DIV": self.convert_div,
"ELU": self.convert_elu,
"EQUAL": self.convert_equal,
"EXP": self.convert_exp,
"EXPAND_DIMS": self.convert_expand_dims,
"FAKE_QUANT": self.convert_fake_quant,
"FILL": self.convert_fill,
"FLOOR_DIV": self.convert_floor_div,
"FLOOR_MOD": self.convert_floor_mod,
"FLOOR": self.convert_floor,
"FULLY_CONNECTED": self.convert_fully_connected,
"GATHER": self.convert_gather,
"GATHER_ND": self.convert_gather_nd,
"GREATER_EQUAL": self.convert_greater_equal,
"GREATER": self.convert_greater,
"HARD_SWISH": self.convert_hard_swish,
"L2_NORMALIZATION": self.convert_l2_normalization,
"L2_POOL_2D": self.convert_l2_pool2d,
"LEAKY_RELU": self.convert_leaky_relu,
"LESS_EQUAL": self.convert_less_equal,
"LESS": self.convert_less,
"LOCAL_RESPONSE_NORMALIZATION": self.convert_lrn,
"LOG": self.convert_log,
"LOG_SOFTMAX": self.convert_log_softmax,
"LOGICAL_AND": self.convert_logical_and,
"LOGICAL_NOT": self.convert_logical_not,
"LOGICAL_OR": self.convert_logical_or,
"LOGISTIC": self.convert_logistic,
"MATRIX_DIAG": self.convert_matrix_diag,
"MATRIX_SET_DIAG": self.convert_matrix_set_diag,
"MAX_POOL_2D": self.convert_max_pool2d,
"MAXIMUM": self.convert_maximum,
"MEAN": self.convert_reduce_mean,
"MINIMUM": self.convert_minimum,
"MIRROR_PAD": self.convert_mirror_pad,
"MUL": self.convert_mul,
"NEG": self.convert_neg,
"NOT_EQUAL": self.convert_not_equal,
"ONE_HOT": self.convert_one_hot,
"PACK": self.convert_pack,
"PAD": self.convert_pad,
"PADV2": self.convert_pad,
"POW": self.convert_pow,
"PRELU": self.convert_prelu,
"RANGE": self.convert_range,
"QUANTIZE": self.convert_quantize,
"REDUCE_ANY": self.convert_reduce_any,
"REDUCE_MAX": self.convert_reduce_max,
"REDUCE_MIN": self.convert_reduce_min,
"REDUCE_PROD": self.convert_reduce_prod,
"RELU": self.convert_relu,
"RELU6": self.convert_relu6,
"RELU_N1_TO_1": self.convert_relu_n1_to_1,
"RESHAPE": self.convert_reshape,
"RESIZE_BILINEAR": self.convert_resize_bilinear,
"RESIZE_NEAREST_NEIGHBOR": self.convert_resize_nearest_neighbor,
"ROUND": self.convert_round,
"RSQRT": self.convert_rsqrt,
"REVERSE_SEQUENCE": self.convert_reverse_sequence,
"REVERSE_V2": self.convert_reverse_v2,
"SELECT": self.convert_select,
"SHAPE": self.convert_shape,
"SIN": self.convert_sin,
"SLICE": self.convert_slice,
"SOFTMAX": self.convert_softmax,
"SPACE_TO_BATCH_ND": self.convert_space_to_batch_nd,
"SPACE_TO_DEPTH": self.convert_space_to_depth,
"SPARSE_TO_DENSE": self.convert_sparse_to_dense,
"SPLIT": self.convert_split,
"SPLIT_V": self.convert_split_v,
"SQRT": self.convert_sqrt,
"SQUARE": self.convert_square,
"SQUARED_DIFFERENCE": self.convert_squared_difference,
"SQUEEZE": self.convert_squeeze,
"STRIDED_SLICE": self.convert_strided_slice,
"SUB": self.convert_sub,
"SUM": self.convert_reduce_sum,
"TAN": self.convert_tan,
"TANH": self.convert_tanh,
"TILE": self.convert_tile,
"TOPK_V2": self.convert_topk_v2,
"TRANSPOSE_CONV": self.convert_transpose_conv,
"TRANSPOSE": self.convert_transpose,
"UNPACK": self.convert_unpack,
"UNIDIRECTIONAL_SEQUENCE_LSTM": self.convert_unidirectional_sequence_lstm,
"WHERE": self.convert_select,
"ZEROS_LIKE": self.convert_zeros_like,
"NON_MAX_SUPPRESSION_V5": self.convert_nms_v5,
}
def check_unsupported_ops(self):
"""Check unsupported TFLite ops in our converter."""
unsupported_ops_set = set()
dynamic_range_ops_set = set()
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
if op_code_str not in self.convert_map:
unsupported_ops_set.add(op_code_str)
continue
# Trying to exclude "dynamic range quantization" optimized ops as not supported in TVM
qnn_in_cnt = len(
[_.qnn_params for _ in self.get_input_tensors(op)[0:1] if _.qnn_params is not None]
)
qnn_weight_cnt = len(
[_.qnn_params for _ in self.get_input_tensors(op)[1:] if _.qnn_params is not None]
)
qnn_out_cnt = len(
[_.qnn_params for _ in self.get_output_tensors(op) if _.qnn_params is not None]
)
if qnn_in_cnt == 0 and qnn_out_cnt == 0 and qnn_weight_cnt > 0:
dynamic_range_ops_set.add(op_code_str)
raise_msg = ""
if unsupported_ops_set:
msg = "The following operators are not supported in frontend " "TFLite: {}\n"
ops = str(list(unsupported_ops_set)).strip("[,]")
raise_msg += msg.format(ops)
if dynamic_range_ops_set:
msg = (
"The following operators are likely to have dynamic range quantization: {}. "
"If you are running an optimized graph, please turn off dynamic range quantization "
"or use full integer quantization"
)
raise_msg += msg.format(str(list(dynamic_range_ops_set)).strip("[,]"))
if len(raise_msg) > 0:
raise tvm.error.OpNotImplemented(raise_msg)
def unbind(self, data, axis=1):
"""
This is a modified version compared to the one in common.py.
The onnx version takes a relay.Expr.Call, the tflite
version a TensorWrapper. Also this version by default splits
along axis 1 and not axis 0 as the onnx version.
Parameters
----------
data : tvm.relay.frontend.tflite.TensorWrapper
Input tensor
axis : int
Axis along which tensor is split.
Returns
-------
result : List[relay.Expr]
The sequence of computed tensors
"""
shape = to_int_list(self.get_tensor_shape(data))
if axis >= len(shape):
msg = "Please check input dim, it shouldn't be greater than or equal to rank."
raise AttributeError(msg)
selections = shape[axis]
shape.pop(axis)
timestep = 0 # Reshape to make time step as the first dim
shape.insert(timestep, selections)
res_split = _op.split(
_op.reshape(self.get_expr(data.tensor_idx), tuple(shape)), selections, timestep
)
ret = []
for i in range(selections):
ret.append(_op.squeeze(res_split[i], axis=[timestep]))
return _expr.TupleWrapper(_expr.Tuple(ret), selections)
def convert_op_to_relay(self):
"""Convert TFLite ops to relay ops"""
for op_idx in range(self.subgraph.OperatorsLength()):
op = self.subgraph.Operators(op_idx)
op_code_str = self.get_op_code_str(op)
output_tensors = self.get_output_tensors(op)
try:
from tflite.Operator import Operator
except ImportError:
raise ImportError("The tflite package must be installed")
assert isinstance(op, Operator)
ret = self.convert_map[op_code_str](op)
# In case the Op can be prefetched, the output can be optimized out
if ret is None:
continue
if len(output_tensors) == 1:
tensor_idx = output_tensors[0].tensor_idx
self.exp_tab.set_expr(get_tensor_name(self.subgraph, tensor_idx), ret)
else:
for idx, output_tensor in enumerate(output_tensors):
self.exp_tab.set_expr(
get_tensor_name(self.subgraph, output_tensor.tensor_idx), ret[idx]
)
def get_op_code_str(self, op):
"""Get TFLite ops string representation"""
try:
from tflite.BuiltinOperator import BuiltinOperator
except ImportError:
raise ImportError("The tflite package must be installed")
op_code_list_idx = op.OpcodeIndex()
op_c = self.model.OperatorCodes(op_code_list_idx)
# In TFlite 2.4.x there was a change where the type of the field that contained
# the builtin code changed from int8 to int32 in the flat buffer representation.
# However, to retain support for old flat buffers that were created, they retained
# the original 8 bit field, but named it "deprecated_builtin_code" in TFLite 2.4.
# This means that the API function BuiltinCode() which originally returned the value
# of the 8 bit field would now look for the value in the new int32 field in the
# schema and DeprecatedBuiltinCode() will look at the old 8 bit field.
# In TFLite 2.4, if the opcode value is less than 127, it can be in either field
# (however, if it is only in the "builtin_code" field, the model is not backward
# compatible), so similarly to TFLite 2.4 reader, we'll pick the higher value of the
# two fields.
# Remember however that this value came into existence only after Tensorflow
# lite 2.4.x and hence encase it in a try -except block.
# Phew !
try:
opc = max(op_c.DeprecatedBuiltinCode(), op_c.BuiltinCode())
except AttributeError:
# In versions before 2.4 the int8 field that holds the builtin code is accessed
# by BuiltinCode() and DeprecatedBuiltinCode() doesn't exist
opc = op_c.BuiltinCode()
op_code_id = opc
try:
op_code_str = self.builtin_op_code[op_code_id]
except KeyError:
raise NotImplementedError(
"TFLite operator with code "
+ str(op_code_id)
+ " is not supported by this version of the fbs schema."
)
if op_code_id == BuiltinOperator.CUSTOM:
# Custom operator
custom_op_code_str = self.model.OperatorCodes(op_code_list_idx).CustomCode()
if self.allow_custom_ops:
return "CUSTOM"
if custom_op_code_str == b"TFLite_Detection_PostProcess":
return "DETECTION_POSTPROCESS"
raise NotImplementedError("Custom operators are currently not supported")
return op_code_str
def get_input_tensors(self, op):
operator_inputs = op.InputsAsNumpy()
return self.get_tensors(operator_inputs)
def get_output_tensors(self, op):
operator_outputs = op.OutputsAsNumpy()
return self.get_tensors(operator_outputs)
def get_tensors(self, tensors_idx_list):
"""Get tensor wrapper list from given TFLite tensor index list"""
return_list = list()
for tensor_idx in tensors_idx_list:
if tensor_idx < 0:
return_list.append(TensorWrapper(tensor_idx, 0, 0))
continue
tensor = self.subgraph.Tensors(tensor_idx)
buffer_idx = tensor.Buffer()
buffer = self.model.Buffers(buffer_idx)
# Check if the tensors are quantized. Parse if yes.
qnn_params = None
tflite_qnn_params = tensor.Quantization()
if tflite_qnn_params is not None:
# TFLite supports both per-tensor and per-axis (aka channel) quantization. For
# per-tensor quantization, scale and zero points are scalar values. For per-axis
# quantization, scale and zero points for the weights are tensors (activations are
# per-tensor quantized). However, the TFLite quantization spec puts restrictions on
# zero points for per-axis quantization. Specifically, the zero point is a tensor
# but all values are 0. More information can be found here -
# https://www.tensorflow.org/lite/performance/quantization_spec
tflite_scale = tflite_qnn_params.ScaleAsNumpy()
tflite_zero_point = tflite_qnn_params.ZeroPointAsNumpy()
is_qnn_params_valid = True
# Handle Per-axis and per-tensor cases
if isinstance(tflite_scale, np.ndarray):
assert isinstance(tflite_zero_point, np.ndarray)
# Tensor - Per-axis quantization
if tflite_scale.size != 1 and tflite_zero_point.size != 1:
scale = tflite_scale
# Ensure that all zero points are zeros
zero_point = tflite_zero_point
if not np.all(zero_point == 0):
raise tvm.error.OpAttributeInvalid(
"TFLite per-axis quantization restricts all zero points to be"
+ " 0, but a non-zero value is observed"
)
zero_point = int(zero_point[0])
# Scalar - Per-tensor quantization
elif tflite_scale.size == 1 and tflite_zero_point.size == 1:
scale = float(tflite_scale[0])
zero_point = int(tflite_zero_point[0])
else:
raise NotImplementedError(
"Quantized type {} (scale) and {} (zero point) not supported".format(
type(tflite_scale), type(tflite_zero_point)
)
)
elif tflite_scale == 0 and tflite_zero_point == 0:
# Handle corner case for ops like quantized reshape whose second operand (shape)
# has zero scale and zero zero point. This is not used.
is_qnn_params_valid = False
else:
raise NotImplementedError(
"Quantized type {} not supported".format(type(tflite_scale))
)
# Check that the scale and zero points are valid.
if is_qnn_params_valid:
qnn_params = dict()
qnn_params["scale"] = relay.const(scale, "float32")
qnn_params["zero_point"] = relay.const(zero_point, "int32")
return_list.append(TensorWrapper(tensor_idx, tensor, buffer, qnn_params))
return return_list
def get_tensor_type_as_numpy(self, tensor_wrapper):
"""Returns np.dtype out of TensorType"""
assert isinstance(tensor_wrapper, TensorWrapper)
try:
from tflite.TensorType import TensorType
return {
TensorType.UINT8: np.uint8,
TensorType.INT8: np.int8,
TensorType.INT16: np.int16,
TensorType.FLOAT16: np.float16,
TensorType.FLOAT32: np.float32,
TensorType.INT32: np.int32,
TensorType.INT64: np.int64,
TensorType.BOOL: np.bool_,
}[tensor_wrapper.tensor.Type()]
except ImportError:
raise ImportError("The tflite package must be installed")
except KeyError:
raise NotImplementedError(
"Tensor type '{}' currently not supported".format(tensor_wrapper.tensor.Type())
)
# pylint: disable=no-else-return
def get_tensor_value(self, tensor_wrapper, is_sparse=False):
"""Get tensor buffer value from given tensor wrapper"""
assert isinstance(tensor_wrapper, TensorWrapper)
dtype = self.get_tensor_type_as_numpy(tensor_wrapper)
data = tensor_wrapper.buffer.DataAsNumpy()
if tensor_wrapper.tensor.ShapeLength() != 0:
shape = to_int_list(self.get_tensor_shape(tensor_wrapper))
else:
shape = []
if is_sparse:
return np.frombuffer(data, dtype=dtype)
else:
return np.frombuffer(data, dtype=dtype).reshape(shape)
def get_tensor_type_str(self, tensor_type):
"""Get tensor type string representation when given TFLite tensor type"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
if tensor_type == TensorType.INT8:
return "int8"
if tensor_type == TensorType.INT16:
return "int16"
if tensor_type == TensorType.UINT8:
return "uint8"
if tensor_type == TensorType.FLOAT16:
return "float16"
if tensor_type == TensorType.FLOAT32:
return "float32"
if tensor_type == TensorType.INT32:
return "int32"
if tensor_type == TensorType.INT64:
return "int64"
if tensor_type == TensorType.BOOL:
return "bool"
raise NotImplementedError(
"Tensor type {} is currently not supported".format(str(tensor_type))
)
def has_same_qnn_params(self, lhs_tensor, rhs_tensor):
lhs_scale = lhs_tensor.qnn_params["scale"]
rhs_scale = rhs_tensor.qnn_params["scale"]
lhs_zero_point = lhs_tensor.qnn_params["zero_point"]
rhs_zero_point = rhs_tensor.qnn_params["zero_point"]
# 0.1 + 0.2 != 0.3
return np.allclose(
lhs_scale.data.numpy(), rhs_scale.data.numpy(), rtol=1e-5, atol=1e-5
) and np.allclose(
lhs_zero_point.data.numpy(), rhs_zero_point.data.numpy(), rtol=1e-5, atol=1e-5
)
def is_quantized(self, op):
"""Check if an input tensor is quantized."""
input_tensors = self.get_input_tensors(op)
first_tensor = input_tensors[0]
return first_tensor.qnn_params is not None
def quantize(self, expr, tensor_to_quantize):
"""Helper function to quantize a tensor with Relay"""
tensor_type = tensor_to_quantize.tensor.Type()
tensor_type_str = self.get_tensor_type_str(tensor_type)
quantized = _qnn.op.quantize(
data=expr,
output_scale=tensor_to_quantize.qnn_params["scale"],
output_zero_point=tensor_to_quantize.qnn_params["zero_point"],
out_dtype=tensor_type_str,
)
return quantized
def dequantize(self, expr, tensor):
"""Helper function to dequantize a tensor with Relay"""
dequantized = _qnn.op.dequantize(
data=expr,
input_scale=tensor.qnn_params["scale"],
input_zero_point=tensor.qnn_params["zero_point"],
)
return dequantized
def convert_qnn_fused_activation_function(
self, expr, fused_activation_fn, scale, zero_point, dtype
):
"""Convert TFLite fused activation function. The expr is an input quantized tensor with
scale and zero point"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
# Quantize a float value to an quantized integer value
quantize = lambda x: float(int(round(x / scale)) + zero_point)
# Get min/max of the output dtype. This will be used to ensure that clip a_min/a_max are not
# beyond the dtype range.
qmin = float(tvm.tir.op.min_value(dtype).value)
qmax = float(tvm.tir.op.max_value(dtype).value)
# The input expr is a quantized tensor with its scale and zero point. We calculate the
# suitable clip off points based on these scale and zero point.
if fused_activation_fn == ActivationFunctionType.NONE:
return expr
if fused_activation_fn == ActivationFunctionType.RELU6:
return _op.clip(expr, a_min=max(qmin, quantize(0)), a_max=min(qmax, quantize(6.0)))
if fused_activation_fn == ActivationFunctionType.RELU_N1_TO_1:
return _op.clip(expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0)))
if fused_activation_fn == ActivationFunctionType.RELU:
return _op.clip(expr, a_min=max(qmin, quantize(0.0)), a_max=qmax)
fused_activation_fn_str = self.activation_fn_type[fused_activation_fn]
raise tvm.error.OpNotImplemented(
"Quantized activation {} is not supported yet.".format(fused_activation_fn_str)
)
def convert_conv2d(self, op):
"""Convert TFLite conv2d"""
return self.convert_conv(op, "conv2d")
def convert_depthwise_conv2d(self, op):
"""Convert TFLite depthwise conv2d"""
return self.convert_conv(op, "depthwise")
def convert_average_pool2d(self, op):
"""Convert TFLite average pool2d"""
return self.convert_pool2d(op, "average")
def convert_max_pool2d(self, op):
"""Convert TFLite max pool2d"""
return self.convert_pool2d(op, "max")
def convert_l2_pool2d(self, op):
"""Convert TFLite l2 pool2d"""
return self.convert_pool2d(op, "l2")
def convert_reshape(self, op):
"""Convert TFLite reshape"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReshapeOptions import ReshapeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) in (1, 2), "input tensors should not be empty"
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "There should be only 1 output tensor"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
if len(input_tensors) == 2:
shape_tensor = input_tensors[1]
if self.has_expr(shape_tensor.tensor_idx):
target_expr = self.get_expr(shape_tensor.tensor_idx)
target_value, success = try_infer_value(
target_expr,
parameters={k: _nd.array(np.array(v)) for k, v in self.exp_tab.params.items()},
)
if success:
# convert to flattened list
from itertools import chain
try:
target_shape = list(chain(*target_value))
except TypeError:
target_shape = list(chain(target_value))
else:
target_shape = target_expr
else:
target_shape = self.get_tensor_value(shape_tensor)
# convert to flattened list
from itertools import chain
try:
target_shape = list(chain(*target_shape))
except TypeError:
target_shape = list(chain(target_shape))
else:
assert op.BuiltinOptionsType() == BuiltinOptions.ReshapeOptions
op_options = op.BuiltinOptions()
reshape_options = ReshapeOptions()
reshape_options.Init(op_options.Bytes, op_options.Pos)
target_shape = to_int_list(reshape_options.NewShapeAsNumpy())
in_expr = self.get_expr(input_tensor_idx)
# If the tensors are quantized, ensure that input/output qnn params are same.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
if input_tensor.qnn_params and input_tensor_type_str == "int8":
# TFLite 2.x quantization spec requires qnn params to be same and dtype to be int8.
# For TFLite 1.x, dtype can be uint8 and qnn params can be different
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "TFLite reshape requires input and output scale and zero points to be equal"
out = _op.reshape(in_expr, newshape=target_shape)
if input_tensor.qnn_params and input_tensor_type_str == "uint8":
output_tensor = output_tensors[0]
if not self.has_same_qnn_params(input_tensor, output_tensor):
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def _convert_resize(self, method, op):
"""Generic method to Convert TFLite RESIZE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ResizeBilinearOptions import ResizeBilinearOptions
# ResizeNearestNeighborOptions was added in tflite v1.13
tflite_ver = 1120
if "ResizeNearestNeighborOptions" in dir(BuiltinOptions):
from tflite.ResizeNearestNeighborOptions import ResizeNearestNeighborOptions
tflite_ver = 1130
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# images, 4-D Tensor with shape NHWC.
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
# size - 1-D int32 Tensor of 2 elements: new_height, new_width
target_size = tuple(self.get_tensor_value(input_tensors[1]))
# Options - align_corners (bool)
resize_options = None
align_corners = False
bilinear_method = method == "linear"
if bilinear_method:
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeBilinearOptions
resize_options = ResizeBilinearOptions()
elif tflite_ver >= 1130:
assert op.BuiltinOptionsType() == BuiltinOptions.ResizeNearestNeighborOptions
resize_options = ResizeNearestNeighborOptions()
if resize_options is not None:
op_options = op.BuiltinOptions()
resize_options.Init(op_options.Bytes, op_options.Pos)
align_corners = resize_options.AlignCorners()
half_pixel_centers = resize_options.HalfPixelCenters()
# Use layout NHWC
coord_trans = "align_corners" if align_corners else "asymmetric"
coord_trans = "half_pixel" if half_pixel_centers else coord_trans
rounding_method = ""
if method == "nearest_neighbor":
if not align_corners and half_pixel_centers:
rounding_method = "round_prefer_ceil"
if bilinear_method and input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.image.resize2d(
in_expr, target_size, None, "NHWC", method, coord_trans, rounding_method
)
if bilinear_method and output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_resize_bilinear(self, op):
"""Convert TFLite RESIZE_BILINEAR"""
return self._convert_resize("linear", op)
def convert_resize_nearest_neighbor(self, op):
"""Convert TFLite RESIZE_NEAREST_NEIGHBOR"""
return self._convert_resize("nearest_neighbor", op)
def convert_l2_normalization(self, op):
"""Convert TFLite L2_NORMALIZATION"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.L2NormOptions import L2NormOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
assert op.BuiltinOptionsType() == BuiltinOptions.L2NormOptions
op_options = op.BuiltinOptions()
l2_norm_options = L2NormOptions()
l2_norm_options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = l2_norm_options.FusedActivationFunction()
# TFLite supports normalization only over the last dim
input_tensor_rank = len(input_tensor.tensor.ShapeAsNumpy())
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFLite quantized L2_NORMALIZATION operator is not supported yet."
)
# TFL uses only the default epsilon value
out = _op.nn.l2_normalize(in_expr, eps=1e-12, axis=[input_tensor_rank - 1])
# if we have fused activation fn
if output_tensor.qnn_params:
raise tvm.error.OpNotImplemented(
"TFLite quantized L2_NORMALIZATION operator is not supported yet."
)
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_lrn(self, op):
"""Convert TFLite LOCAL_RESPONSE_NORMALIZATION"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.LocalResponseNormalizationOptions import LocalResponseNormalizationOptions
except ImportError:
raise ImportError("The tflite package must be installed")
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized LRN operator is not supported yet.")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
assert op.BuiltinOptionsType() == BuiltinOptions.LocalResponseNormalizationOptions
op_options = op.BuiltinOptions()
lrn_options = LocalResponseNormalizationOptions()
lrn_options.Init(op_options.Bytes, op_options.Pos)
radius = lrn_options.Radius()
bias = lrn_options.Bias()
alpha = lrn_options.Alpha()
beta = lrn_options.Beta()
size = (radius * 2) + 1
alpha = alpha * size
axis = 3 # NHWC format
out = _op.nn.lrn(in_expr, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
return out
def convert_logistic(self, op):
"""Convert TFLite LOGISTIC"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.sigmoid(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_softmax(self, op):
"""Convert TFLite softmax"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
params = {"axis": -1} # -1 is channel
in_expr = self.get_expr(input_tensor_idx)
# TODO - Naive softmax int8 implementation leads to bad accuracy. Currently, we can
# dequantize to FP32 and perform softmax on FP32. We can investigate an integer only softmax
# implementation in future.
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.softmax(in_expr, **params)
# Go back to integer dataype if the original operator was quantized.
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_tanh(self, op):
"""Convert TFLite TANH"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.tanh(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_range(self, op):
"""Convert TFLite Range"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
start, limit, delta = input_tensors[0], input_tensors[1], input_tensors[2]
expressions = [self.get_tensor_expr(t) for t in [start, limit, delta]]
# out type inference
if delta.tensor.Type() == TensorType.FLOAT32:
out_type = self.get_tensor_type_str(delta.tensor.Type())
else:
out_type = self.get_tensor_type_str(start.tensor.Type())
out = _op.arange(expressions[0], expressions[1], expressions[2], out_type)
return out
def convert_shape(self, op):
"""Convert TFLite Shape"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ShapeOptions import ShapeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
assert op.BuiltinOptionsType() == BuiltinOptions.ShapeOptions
op_options = op.BuiltinOptions()
shape_options = ShapeOptions()
shape_options.Init(op_options.Bytes, op_options.Pos)
out_type = self.get_tensor_type_str(shape_options.OutType())
out = shape_of(self.get_tensor_expr(input_tensors[0]), dtype=out_type)
return out
def convert_relu(self, op):
"""Convert TFLite ReLU"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = _op.nn.relu(in_expr)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_hard_swish(self, op):
"""Convert TFLite Hard swish"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
def _relu6(data):
return _op.tensor.clip(data, 0.0, 6.0)
def _hard_swish(data):
return data * _relu6(data + relay.const(3.0)) / relay.const(6.0)
# Dequantize if the input is quantized.
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
# Perform hardswish
out = _hard_swish(in_expr)
# Go back to integer dataype if the original operator was quantized.
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_relu6(self, op):
"""Convert TFLite ReLU6"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=in_expr,
fused_activation_fn=ActivationFunctionType.RELU6,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = _op.clip(in_expr, a_min=0, a_max=6)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_leaky_relu(self, op):
"""Convert TFLite LEAKY_RELU"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.LeakyReluOptions import LeakyReluOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.LeakyReluOptions
op_options = op.BuiltinOptions()
leaky_relu_options = LeakyReluOptions()
leaky_relu_options.Init(op_options.Bytes, op_options.Pos)
alpha_tensor = leaky_relu_options.Alpha()
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.leaky_relu(in_expr, alpha_tensor)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_relu_n1_to_1(self, op):
"""Convert TFLite RELU_N1_TO_1"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
# Quantize a float value to an quantized integer value
scale_val = get_scalar_from_constant(input_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(input_tensor.qnn_params["zero_point"])
quantize = lambda x: float(int(round(x / scale_val)) + zero_point_val)
# Get min/max of the input dtype. This will be used to ensure that
# clip a_min/a_max are not beyond the dtype range.
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
qmin = float(tvm.tir.op.min_value(input_tensor_type_str).value)
qmax = float(tvm.tir.op.max_value(input_tensor_type_str).value)
out = _op.clip(in_expr, a_min=max(qmin, quantize(-1.0)), a_max=min(qmax, quantize(1.0)))
else:
out = _op.clip(in_expr, a_min=-1, a_max=1)
if output_tensor.qnn_params:
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_log_softmax(self, op):
"""Convert TFLite LOG_SOFTMAX"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = _op.nn.log_softmax(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_concatenation(self, op):
"""Convert TFLite concatenation"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ConcatenationOptions import ConcatenationOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 1, "input tensors should greater than 1"
in_exprs = [self.get_tensor_expr(_) for _ in input_tensors]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
assert op.BuiltinOptionsType() == BuiltinOptions.ConcatenationOptions
op_options = op.BuiltinOptions()
concatenation_options = ConcatenationOptions()
concatenation_options.Init(op_options.Bytes, op_options.Pos)
concatenation_axis = concatenation_options.Axis()
fused_activation_fn = concatenation_options.FusedActivationFunction()
if not input_tensors[0].qnn_params:
out = _op.concatenate(in_exprs, axis=concatenation_axis)
else:
input_scales = [input_tensor.qnn_params["scale"] for input_tensor in input_tensors]
input_zero_points = [
input_tensor.qnn_params["zero_point"] for input_tensor in input_tensors
]
out = _qnn.op.concatenate(
in_exprs,
input_scales=input_scales,
input_zero_points=input_zero_points,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
axis=concatenation_axis,
)
# Handle fused activations
if output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def _convert_unary_elemwise(self, relay_op, op):
"""Generic method to convert TFLite unary elemwise functions"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if input_tensor.qnn_params:
in_expr = self.dequantize(in_expr, input_tensor)
out = relay_op(in_expr)
if output_tensor.qnn_params:
out = self.quantize(out, output_tensor)
return out
def convert_abs(self, op):
"""Convert TFLite ABS"""
return self._convert_unary_elemwise(_op.abs, op)
def convert_ceil(self, op):
"""Convert TFLite CEIL"""
return self._convert_unary_elemwise(_op.ceil, op)
def convert_floor(self, op):
"""Convert TFLite FLOOR"""
return self._convert_unary_elemwise(_op.floor, op)
def convert_round(self, op):
"""Convert TFLite ROUND"""
return self._convert_unary_elemwise(_op.round, op)
def convert_exp(self, op):
"""Convert TFLite EXP"""
return self._convert_unary_elemwise(_op.exp, op)
def convert_log(self, op):
"""Convert TFLite LOG"""
return self._convert_unary_elemwise(_op.log, op)
def convert_sin(self, op):
"""Convert TFLite SIN"""
return self._convert_unary_elemwise(_op.sin, op)
def convert_tan(self, op):
"""Convert TFLite TAN"""
return self._convert_unary_elemwise(_op.tan, op)
def convert_cos(self, op):
"""Convert TFLite COS"""
return self._convert_unary_elemwise(_op.cos, op)
def convert_sqrt(self, op):
"""Convert TFLite SQRT"""
return self._convert_unary_elemwise(_op.sqrt, op)
def convert_rsqrt(self, op):
"""Convert TFLite RSQRT"""
return self._convert_unary_elemwise(_op.rsqrt, op)
def convert_neg(self, op):
"""Convert TFLite NEG"""
return self._convert_unary_elemwise(_op.negative, op)
def convert_elu(self, op):
"""Convert TFLite ELU"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized ELU operator is not supported yet.")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
exp_type = self.get_tensor_type_str(input_tensor.tensor.Type())
out = relay.const(-1.0, exp_type) * _op.nn.relu(
relay.const(1.0, exp_type) - _op.exp(in_expr)
) + _op.nn.relu(in_expr)
return out
def convert_square(self, op):
"""Convert TFLite SQUARE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized SQUARE operator is not supported yet."
)
exp_type = self.get_tensor_type_str(output_tensor.tensor.Type())
out = _op.power(in_expr, relay.const(2, exp_type))
return out
def _convert_elemwise(
self,
relay_op,
op,
ignore_qnn_params=False,
comparison_op=False,
):
"""Generic method to Convert TFLite elemwise"""
try:
from tflite.AddOptions import AddOptions
from tflite.BuiltinOptions import BuiltinOptions
from tflite.DivOptions import DivOptions
from tflite.MulOptions import MulOptions
from tflite.SubOptions import SubOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
rhs_tensor = input_tensors[1]
lhs_expr = self.get_tensor_expr(lhs_tensor)
rhs_expr = self.get_tensor_expr(rhs_tensor)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
# TFLite format demands equal scale and zero_point tuple parameters for some operations
# to allow us to use non-quantized operation instead of quantized if ignore_qnn_params=True
if ignore_qnn_params and not comparison_op:
assert (
lhs_tensor.qnn_params
and self.has_same_qnn_params(lhs_tensor, output_tensor)
and self.has_same_qnn_params(rhs_tensor, output_tensor)
), "All tensors should be quantized with the same (scale,zero-point) tuple parameters"
# If quantized, extracts qnn params and call QNN add operator.
if not ignore_qnn_params and lhs_tensor.qnn_params:
assert rhs_tensor.qnn_params, "Both tensors should be quantized."
assert output_tensor.qnn_params, "Output tensor should be quantized."
out = relay_op(
lhs=lhs_expr,
rhs=rhs_expr,
lhs_scale=lhs_tensor.qnn_params["scale"],
lhs_zero_point=lhs_tensor.qnn_params["zero_point"],
rhs_scale=rhs_tensor.qnn_params["scale"],
rhs_zero_point=rhs_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
)
else:
out = relay_op(lhs_expr, rhs_expr)
# Options (fused_activation_function)
options = None
if op.BuiltinOptionsType() == BuiltinOptions.AddOptions:
options = AddOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.SubOptions:
options = SubOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.MulOptions:
options = MulOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.DivOptions:
options = DivOptions()
if options is not None:
op_options = op.BuiltinOptions()
options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = options.FusedActivationFunction()
# Handle fused activations
if not ignore_qnn_params and output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_add(self, op):
"""Convert TFLite ADD"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.add, op)
return self._convert_elemwise(_op.add, op)
def convert_add_n(self, op):
"""Convert TFLite ADD_N"""
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
input_tensors = self.get_input_tensors(op)
assert not input_tensors[0].qnn_params, "TFLite does not support quantized ADD_N."
lhs_expr = self.get_tensor_expr(input_tensors[0])
for rhs_tensor in input_tensors[1:]:
assert not rhs_tensor.qnn_params, "TFLite does not support quantized ADD_N"
rhs_expr = self.get_tensor_expr(rhs_tensor)
lhs_expr = _op.add(lhs_expr, rhs_expr)
return lhs_expr
def convert_sub(self, op):
"""Convert TFLite SUB"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.subtract, op)
return self._convert_elemwise(_op.subtract, op)
def convert_mul(self, op):
"""Convert TFLite MUL"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
return self._convert_elemwise(_qnn.op.mul, op)
return self._convert_elemwise(_op.multiply, op)
def convert_div(self, op):
"""Convert TFLite DIV"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized DIV operator is not supported yet.")
return self._convert_elemwise(_op.divide, op)
def convert_pow(self, op):
"""Convert TFLite POW"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized POW operator is not supported yet.")
return self._convert_elemwise(_op.power, op)
def convert_maximum(self, op):
"""Convert TFLite MAXIMUM"""
return self._convert_elemwise(_op.maximum, op, self.is_quantized(op))
def convert_minimum(self, op):
"""Convert TFLite MINIMUM"""
return self._convert_elemwise(_op.minimum, op, self.is_quantized(op))
def convert_greater(self, op):
"""Convert TFLite GREATER"""
return self._convert_elemwise(_op.greater, op, self.is_quantized(op), comparison_op=True)
def convert_squared_difference(self, op):
"""Convert TFLite SQUARED DIFFERENCE"""
# Check if the input tensor is quantized, call QNN op
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized squared difference operator is not supported yet."
)
difference = self._convert_elemwise(_op.subtract, op)
# _convert_elemwise has guaranteed only have one output tensor
exp_type = self.get_tensor_type_str(self.get_output_tensors(op)[0].tensor.Type())
out = _op.power(difference, relay.const(2, exp_type))
return out
def convert_greater_equal(self, op):
"""Convert TFLite GREATER_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized GREATER_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.greater_equal, op)
def convert_less(self, op):
"""Convert TFLite LESS"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented("TFlite quantized LESS operator is not supported yet.")
return self._convert_elemwise(_op.less, op)
def convert_less_equal(self, op):
"""Convert TFLite LESS_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized LESS_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.less_equal, op)
def convert_equal(self, op):
"""Convert TFLite EQUAL"""
return self._convert_elemwise(_op.equal, op, self.is_quantized(op), comparison_op=True)
def convert_not_equal(self, op):
"""Convert TFLite NOT_EQUAL"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized NOT_EQUAL operator is not supported yet."
)
return self._convert_elemwise(_op.not_equal, op)
def _convert_logical_binary(self, relay_op, op):
"""Generic method to convert logical binary ops"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
lhs_expr = self.get_tensor_expr(lhs_tensor)
rhs_tensor = input_tensors[1]
rhs_expr = self.get_tensor_expr(rhs_tensor)
out = relay_op(lhs_expr, rhs_expr)
return out
def convert_logical_and(self, op):
"""Convert tflite LOGICAL_AND"""
return self._convert_logical_binary(_op.logical_and, op)
def convert_logical_or(self, op):
"""Convert tflite LOGICAL_OR"""
return self._convert_logical_binary(_op.logical_or, op)
def convert_logical_not(self, op):
"""Convert tflite LOGICAL_NOT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
data = self.get_expr(input_tensors[0].tensor_idx)
out = _op.logical_not(data)
return out
def convert_gather(self, op):
"""Method to Convert TFLite GATHER operator"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.GatherOptions import GatherOptions
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
data = self.get_tensor_expr(input_tensors[0])
indices = input_tensors[1]
indices_type = indices.tensor.Type()
assert indices_type in (TensorType.INT32, TensorType.INT64)
assert op.BuiltinOptionsType() == BuiltinOptions.GatherOptions
op_options = op.BuiltinOptions()
gather_options = GatherOptions()
gather_options.Init(op_options.Bytes, op_options.Pos)
axis = gather_options.Axis()
# Check the indices are with in bounds.
data_shape = to_int_list(self.get_tensor_shape(input_tensors[0]))
data_dim = len(data_shape)
axis = data_dim + axis if axis < 0 else axis
assert axis >= 0, "Axis out of bounds"
assert axis < data_dim, "Axis out of bounds"
if self.has_expr(indices.tensor_idx):
indices_expr = self.get_expr(indices.tensor_idx)
else:
indices_val = self.get_tensor_value(indices)
indices_expr = self.exp_tab.new_const(
indices_val, dtype=self.get_tensor_type_str(indices_type)
)
indices_shape = list(indices_val.shape)
indices_len = len(indices_shape)
out_shape = data_shape[:axis] + indices_shape[:] + data_shape[axis + 1 :]
loopover = [range(s) for s in out_shape]
for idx in list(itertools.product(*loopover)):
real_indices = (
list(idx[:axis])
+ [indices_val[idx[axis : axis + indices_len]]]
+ list(idx[axis + indices_len :])
)
if np.any(np.subtract(data_shape, real_indices) < 0):
raise ValueError("TFLite out of bound indices are not supported.")
# Use mode 'fast' since indices are already checked within bounds.
out = _op.take(data, indices_expr, axis=axis, mode="fast")
return out
def convert_gather_nd(self, op):
"""Method to Convert TFLite GATHER_ND operator"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
for t in input_tensors:
assert not t.qnn_params, "Quantized input is not expected."
data = self.get_tensor_expr(input_tensors[0])
indices = self.get_tensor_expr(input_tensors[1])
indices_type = input_tensors[1].tensor.Type()
assert indices_type in (TensorType.INT32, TensorType.INT64)
indices_dims = len(_infer_shape(indices))
indices_t = _op.transpose(indices, axes=[-1] + list(range(indices_dims - 1)))
out = _op.gather_nd(data, indices_t)
return out
def convert_strided_slice(self, op):
"""Method to Convert TFLite STRIDED_SLICE operator.
NOTE: Eventhough tensorflow supports begin_mask, end_mask, ellipsis_mask, new_axis_mask
and shrink_axis_mask, tflite doesn't support these and expect these values to be zero.
But in future, they may open up the mask implementation, so kept the implementation
same as tensorflow.
This op extracts a slice of size (end - begin) / stride from the given input tensor.
Starting at the location specified by begin the slice continues by adding stride to the
index until all dimensions are not less than end. Note that a stride can be negative,
which causes a reverse slice.
For slice input[val0, val1, ..., valn], begin/end/strides will be vectors of length n.
In each mask field(begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask)
the ith bit will correspond to the ith val.
If the ith bit of begin_mask is set, begin[i] is ignored and the fullest possible range
in that dimension is used instead.
If the ith bit of ellipsis_mask is set, as many unspecified dimensions as needed will be
inserted between other dimensions. Only one non-zero bit is allowed in ellipsis_mask.
If the ith bit of new_axis_mask is set, then begin, end, and stride are ignored and a
new length 1 dimension is added at this point in the output tensor.
If the ith bit of shrink_axis_mask is set, it implies that the ith specification shrinks
the dimensionality by 1, taking on the value at index begin[i]. end[i] and strides[i]
are ignored in this case.
begin and end are zero-indexed. strides entries must be non-zero.
TVM Relay implementation of doesn't support mask, so the mask values are processed in
this function and begin/end/strides are updated accordingly. If any mask is present, and
since tvm doesn't support mask computation directly, the output need a final reshape.
"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.StridedSliceOptions import StridedSliceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "input tensors length should be 4"
data_expr = self.get_expr(input_tensors[0].tensor_idx)
begin = list(self.get_tensor_value(input_tensors[1]))
end = list(self.get_tensor_value(input_tensors[2]))
stride = list(self.get_tensor_value(input_tensors[3]))
assert op.BuiltinOptionsType() == BuiltinOptions.StridedSliceOptions
op_options = op.BuiltinOptions()
options = StridedSliceOptions()
options.Init(op_options.Bytes, op_options.Pos)
begin_mask = options.BeginMask()
end_mask = options.EndMask()
ellipsis_mask = options.EllipsisMask()
new_axis_mask = options.NewAxisMask()
shrink_axis_mask = options.ShrinkAxisMask()
data_shape = to_int_list(self.get_tensor_shape(input_tensors[0]))
data_dim = len(data_shape)
stride_dim = len(stride)
def _transform_mask(stride_dim, ellipsis_mask):
"""Handle mask inputs to create new begin, end, stride and output shape"""
m_begin = [0] * data_dim
m_end = [0] * data_dim
m_stride = [0] * data_dim
fshape_indices = []
# Count new axis after ellipsis_mask, consider while applying ellipsis_mask.
ellipsis_seen = False
new_axes_after_ellipsis = 0
for i in range(stride_dim):
mask = 1 << i
if ellipsis_seen and (mask & new_axis_mask) != 0:
new_axes_after_ellipsis += 1
if (mask & ellipsis_mask) != 0:
ellipsis_seen = True
if not ellipsis_seen:
# Used later for extending the stride attributes in the below loop.
ellipsis_mask |= 1 << stride_dim
stride_dim += 1
final_index = 0
for index in range(stride_dim):
mask = 1 << index
if mask & ellipsis_mask:
# Identify the end index for applying ellipsis_mask
to_index = min(
((data_dim - (stride_dim - index)) + 1 + new_axes_after_ellipsis), data_dim
)
for i in range(final_index, to_index):
m_begin[final_index] = 0
m_end[final_index] = data_shape[final_index]
m_stride[final_index] = 1
fshape_indices.append(final_index)
final_index += 1
elif mask & new_axis_mask:
fshape_indices.append(-1)
elif not mask & new_axis_mask:
if final_index == len(m_begin):
break
if mask & begin_mask:
m_begin[final_index] = data_shape[final_index] if stride[index] < 0 else 0
elif begin[index]:
m_begin[final_index] = begin[index]
if mask & end_mask:
m_end[final_index] = 0 if stride[index] < 0 else data_shape[final_index]
elif end[index]:
m_end[final_index] = end[index]
m_stride[final_index] = stride[index]
if mask & shrink_axis_mask:
# Tensorflow make axis with shrink_axis_mask as dimension 1
m_begin[final_index] = (
data_shape[final_index] + begin[index]
if begin[index] < 0
else begin[index]
)
m_end[final_index] = m_begin[final_index] + 1
m_stride[final_index] = 1
fshape_indices.append(-2)
else:
fshape_indices.append(final_index)
final_index += 1
return m_begin, m_end, m_stride, fshape_indices
fshape_indices = None
if begin_mask or end_mask or ellipsis_mask or new_axis_mask or shrink_axis_mask:
begin, end, stride, fshape_indices = _transform_mask(stride_dim, ellipsis_mask)
out = _op.strided_slice(data_expr, begin=begin, end=end, strides=stride)
out_shape = _infer_shape(out)
if not fshape_indices:
fshape_indices = range(len(out_shape))
# Create final output shape.
final_output = []
final_len = len(fshape_indices)
for gather_index in fshape_indices:
if gather_index == -1:
final_output.append(1)
final_len += 1
elif gather_index == -2:
final_len -= 1
else:
final_output.append(out_shape[gather_index])
if final_len == 0:
return _op.squeeze(out, axis=tuple(range(len(fshape_indices))))
if not final_output:
return out
return _op.reshape(out, newshape=tuple(final_output))
def convert_zeros_like(self, op):
"""Convert TFLite ZEROS LIKE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
out = _op.zeros_like(in_expr)
return out
def convert_fill(self, op):
"""Convert TFLite FILL"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
if self.has_expr(input_tensors[0].tensor_idx):
raise tvm.error.OpNotImplemented(
"For dims parameter of Fill operator," " only constant values are supported."
)
in_dims = list(self.get_tensor_value(input_tensors[0]))
in_value_expr = self.get_expr(input_tensors[1].tensor_idx)
out = _op.full(in_value_expr, in_dims)
return out
def _convert_reduce(self, relay_op, op):
"""Generic method to Convert TFLite REDUCE operators"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReducerOptions import ReducerOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# input_tensor
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# axis
axis_value = self.get_tensor_value(input_tensors[1])
axis = tuple(axis_value) if len(axis_value.shape) > 0 else tuple((axis_value.item(),))
# Options - keep_dims (bool)
# In case Options are not present, set keep_dims to False(default)
if op.BuiltinOptionsType():
assert op.BuiltinOptionsType() == BuiltinOptions.ReducerOptions
reduce_options = ReducerOptions()
op_options = op.BuiltinOptions()
reduce_options.Init(op_options.Bytes, op_options.Pos)
keep_dims = reduce_options.KeepDims()
else:
keep_dims = False
if input_tensor.qnn_params:
in_expr = _op.cast(in_expr, "int32")
out = relay_op(in_expr, axis, keep_dims)
# Finally if the reduce is quantized. Add a requantize at the end.
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
if output_tensor.qnn_params:
out = _qnn.op.requantize(
out,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_reduce_min(self, op):
return self._convert_reduce(_op.reduce.min, op)
def convert_reduce_max(self, op):
return self._convert_reduce(_op.reduce.max, op)
def convert_reduce_mean(self, op):
return self._convert_reduce(_op.reduce.mean, op)
def convert_reduce_prod(self, op):
return self._convert_reduce(_op.reduce.prod, op)
def convert_reduce_sum(self, op):
return self._convert_reduce(_op.reduce.sum, op)
def convert_reduce_any(self, op):
return self._convert_reduce(_op.reduce.any, op)
def _convert_arg_min_max(self, relay_op, op):
"""Generic method converting TFLite arg_min_max"""
try:
from tflite.ArgMaxOptions import ArgMaxOptions
from tflite.ArgMinOptions import ArgMinOptions
from tflite.BuiltinOptions import BuiltinOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "two input tensor arguments expected"
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "one output tensor expected"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
axis_tensor = input_tensors[1]
# In Tensorflow, `axis` argument is a Tensor, not attribute. We
# support the case where it inputs from a scalar constant.
axis_value = self.get_tensor_value(axis_tensor)
assert axis_value.size == 1
axis_value = axis_value.item()
if op.BuiltinOptionsType() == BuiltinOptions.ArgMinOptions:
arg_min_max_options = ArgMinOptions()
elif op.BuiltinOptionsType() == BuiltinOptions.ArgMaxOptions:
arg_min_max_options = ArgMaxOptions()
op_options = op.BuiltinOptions()
arg_min_max_options.Init(op_options.Bytes, op_options.Pos)
# set keepdims to True since tflite 1.13 removes all dims of size 1
# WARNING: all other versions of tflite > 1.13 need keepdims=False
out = relay_op(in_expr, axis=axis_value, keepdims=False, exclude=False)
return out
def convert_arg_min(self, op):
"""Convert TFLite ARG_MIN"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized ARG_MIN operator is not supported yet."
)
return self._convert_arg_min_max(_op.argmin, op)
def convert_arg_max(self, op):
"""Convert TFLite ARG_MAX"""
return self._convert_arg_min_max(_op.argmax, op)
def convert_fully_connected(self, op):
"""Convert TFLite fully connected"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.FullyConnectedOptions import FullyConnectedOptions
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) in (2, 3), "input tensors length should be two or three"
input_tensor = input_tensors[0]
weight_tensor = input_tensors[1]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
weight_tensor_shape = to_int_list(self.get_tensor_shape(weight_tensor))
# Weight should have only 2 dimensions(TFLite convention)
assert len(weight_tensor_shape) == 2, "Weight should be only 2-dim"
# Input shape: [i_batch_size, ..., n_inputs]
# Filter shape: [n_inputs, n_units]
#
# As we will transform Fully_Connected Input to Dense Op inputs as below
# Dense expected Input shape: [batch_size, n_units]
# Dense expected Weight shape: [out_dim, n_units]
# Dense output shape: [batch_size, out_dim]
target_shape = tuple((-1, weight_tensor_shape[1]))
in_expr = self.get_tensor_expr(input_tensor)
in_expr = _op.reshape(in_expr, target_shape)
# TODO: Change the output shape calculation based on keep_dim option
assert op.BuiltinOptionsType() == BuiltinOptions.FullyConnectedOptions
op_options = op.BuiltinOptions()
fully_connected_options = FullyConnectedOptions()
fully_connected_options.Init(op_options.Bytes, op_options.Pos)
fused_activation_fn = fully_connected_options.FusedActivationFunction()
keep_num_dims = fully_connected_options.KeepNumDims()
# weight tensor type should be INT8/UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.INT8, TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
if self.has_expr(weight_tensor.tensor_idx):
weight_expr = self.get_expr(weight_tensor.tensor_idx)
else:
weight_value = self.get_tensor_value(weight_tensor)
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
weight_shape = _infer_shape(weight_expr)
if input_tensor.qnn_params:
out = _qnn.op.dense(
in_expr,
weight_expr,
input_zero_point=input_tensor.qnn_params["zero_point"],
kernel_zero_point=weight_tensor.qnn_params["zero_point"],
input_scale=input_tensor.qnn_params["scale"],
kernel_scale=weight_tensor.qnn_params["scale"],
units=weight_shape[0],
out_dtype="int32",
)
else:
out = _op.nn.dense(in_expr, weight_expr, units=weight_shape[0])
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
if bias_tensor.tensor_idx != -1:
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
if self.has_expr(bias_tensor.tensor_idx):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
)
out = _op.nn.bias_add(out, bias_expr)
# Finally if the dense is quantized. Add a requantize at the end.
if output_tensor.qnn_params:
data_scale = input_tensor.qnn_params["scale"]
weight_scale = weight_tensor.qnn_params["scale"]
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale_val = get_scalar_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, "float32")
new_input_zero_point = relay.const(0, "int32")
# Requantize
out = _qnn.op.requantize(
out,
input_scale=new_input_scale,
input_zero_point=new_input_zero_point,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
# Call activation function
output_scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
output_zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=output_scale_val,
zero_point=output_zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
# Change the output shape calculation based on keep_dim option
if keep_num_dims:
input_shape = _infer_shape(self.get_tensor_expr(input_tensor))
output_shape = input_shape[:-1] + tuple([weight_tensor_shape[0]])
out = _op.reshape(out, output_shape)
return out
def convert_squeeze(self, op):
"""Convert TFLite squeeze"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SqueezeOptions import SqueezeOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
output_tensors = self.get_output_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
assert len(output_tensors) == 1, "output tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.SqueezeOptions
op_options = op.BuiltinOptions()
squeeze_options = SqueezeOptions()
squeeze_options.Init(op_options.Bytes, op_options.Pos)
squeeze_axis = squeeze_options.SqueezeDimsAsNumpy()
in_expr = self.get_expr(input_tensor_idx)
out = _op.squeeze(in_expr, axis=tuple(squeeze_axis))
return out
def convert_fused_activation_function(self, in_expr, fused_activation_fn):
"""Convert TFLite fused activation function"""
try:
from tflite.ActivationFunctionType import ActivationFunctionType
except ImportError:
raise ImportError("The tflite package must be installed")
if fused_activation_fn == ActivationFunctionType.NONE:
return in_expr
if fused_activation_fn == ActivationFunctionType.RELU6:
return _op.clip(in_expr, a_min=0, a_max=6)
if fused_activation_fn == ActivationFunctionType.RELU:
return _op.nn.relu(in_expr)
if fused_activation_fn == ActivationFunctionType.RELU_N1_TO_1:
return _op.clip(in_expr, a_min=-1, a_max=1)
if fused_activation_fn == ActivationFunctionType.TANH:
return _op.tanh(in_expr)
fused_activation_fn_str = self.activation_fn_type[fused_activation_fn]
raise tvm.error.OpNotImplemented(
"Fused activation {} is not supported yet.".format(fused_activation_fn_str)
)
def convert_conv(self, op, conv_type):
"""convolution implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Conv2DOptions import Conv2DOptions
from tflite.DepthwiseConv2DOptions import DepthwiseConv2DOptions
from tflite.Padding import Padding
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 2, "input tensors length should be >= 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
weight_tensor = input_tensors[1]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
is_depthwise_conv = False
if conv_type == "conv2d":
assert op.BuiltinOptionsType() == BuiltinOptions.Conv2DOptions
op_options = op.BuiltinOptions()
conv_options = Conv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
elif conv_type == "depthwise":
is_depthwise_conv = True
assert op.BuiltinOptionsType() == BuiltinOptions.DepthwiseConv2DOptions
op_options = op.BuiltinOptions()
conv_options = DepthwiseConv2DOptions()
conv_options.Init(op_options.Bytes, op_options.Pos)
depth_multiplier = conv_options.DepthMultiplier()
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend TFLite.".format(conv_type)
)
stride_h = conv_options.StrideH()
stride_w = conv_options.StrideW()
dilation_h = conv_options.DilationHFactor()
dilation_w = conv_options.DilationWFactor()
padding = conv_options.Padding()
fused_activation_fn = conv_options.FusedActivationFunction()
_, input_h, input_w, input_c = to_int_list(self.get_tensor_shape(input_tensor))
if is_depthwise_conv:
# TFLite depthwise convolution kernel layout is:
# 1 KH KW C(input_c * depth_multiplier)
_, kernel_h, kernel_w, in_channels = to_int_list(self.get_tensor_shape(weight_tensor))
assert in_channels == input_c * depth_multiplier
else:
output_channels, kernel_h, kernel_w, _ = to_int_list(
self.get_tensor_shape(weight_tensor)
)
dilated_kernel_h = dilation_h * (kernel_h - 1) + 1
dilated_kernel_w = dilation_w * (kernel_w - 1) + 1
params = {
"kernel_size": [kernel_h, kernel_w],
"strides": [stride_h, stride_w],
"dilation": [dilation_h, dilation_w],
"padding": [0, 0],
"data_layout": "NHWC",
}
if is_depthwise_conv:
params["channels"] = int(in_channels)
params["groups"] = int(input_c)
# If number of input channels is 1, treat as normal
# convolution.
params["kernel_layout"] = "HWIO" if input_c == 1 else "HWOI"
else:
params["channels"] = int(output_channels)
params["kernel_layout"] = "HWIO"
# weight tensor type should be INT8/UINT8 (quantization) or FLOAT32
weight_tensor_type = weight_tensor.tensor.Type()
assert weight_tensor_type in (TensorType.INT8, TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weight_tensor_type)
in_expr = self.get_expr(input_tensor_idx)
# TFLite converts float32 models to float16 models by introducing
# a Dequantize op in every op that contains a float32 values.
# (weights, biases, and constants etc. )
# So conv op may have weight and bias as tensors instead of values.
if self.has_expr(weight_tensor.tensor_idx):
weight_expr = self.get_expr(weight_tensor.tensor_idx)
if is_depthwise_conv:
weight_expr = _op.reshape(
weight_expr, (kernel_h, kernel_w, input_c, depth_multiplier)
)
else:
weight_expr = _op.transpose(weight_expr, axes=(1, 2, 3, 0))
else:
if self.is_prefetched(weight_tensor.tensor_idx):
weight_value = self.get_prefetched_node(weight_tensor.tensor_idx)
else:
weight_value = self.get_tensor_value(weight_tensor)
# TFLite kernel layout:
# convolution:
# OC KH KW IC, we require KH KW IC OC (HWIO)
# depthwise convolution:
# 1 KH KW C(input_c * depth_multiplier), we require
# KH KW IC M (depth_multiplier) (HWOI)
if is_depthwise_conv:
weight_value = weight_value.reshape(kernel_h, kernel_w, input_c, depth_multiplier)
else:
weight_value = weight_value.transpose((1, 2, 3, 0))
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, dilated_kernel_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, dilated_kernel_w, stride_w)
do_pad = not (pad_top == 0 and pad_bottom == 0 and pad_left == 0 and pad_right == 0)
if do_pad:
params["padding"] = [pad_top, pad_left, pad_bottom, pad_right]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding format {} is not supported for operator Conv.".format(padding)
)
if input_tensor.qnn_params:
qnn_conv2d_params = dict(params)
qnn_conv2d_params["input_zero_point"] = input_tensor.qnn_params["zero_point"]
qnn_conv2d_params["kernel_zero_point"] = weight_tensor.qnn_params["zero_point"]
qnn_conv2d_params["out_dtype"] = (
"int64" if output_tensor_type_str == "int16" else "int32"
)
qnn_conv2d_params["input_scale"] = input_tensor.qnn_params["scale"]
qnn_conv2d_params["kernel_scale"] = weight_tensor.qnn_params["scale"]
out = _qnn.op.conv2d(in_expr, weight_expr, **qnn_conv2d_params)
else:
out = _op.nn.conv2d(in_expr, weight_expr, **params)
# if we have bias
if len(input_tensors) == 3:
bias_tensor = input_tensors[2]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (int8 qnn) or INT64 (int16 qnn) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.INT64, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
if self.has_expr(bias_tensor.tensor_idx):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
# Handle fused activation.
if output_tensor.qnn_params:
# Calculate the intermediate scale and zero point of the int32 output.
data_scale = input_tensor.qnn_params["scale"]
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale = weight_tensor.qnn_params["scale"]
# If weight scale is scalar, it is per-tensor quantization
if isinstance(weight_scale, float):
weight_scale_val = get_scalar_from_constant(weight_scale)
else:
weight_scale_val = get_tensor_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, "float32")
new_input_zero_point = relay.const(0, "int32")
# Finally requantize
out = _qnn.op.requantize(
out,
input_scale=new_input_scale,
input_zero_point=new_input_zero_point,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
axis=3,
)
# Call activation function
output_scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
output_zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=output_scale_val,
zero_point=output_zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_split(self, op):
"""split implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SplitOptions import SplitOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be == 2"
axis_tensor = input_tensors[0]
split_axis = self.get_tensor_value(axis_tensor)
input_tensor = input_tensors[1]
input_tensor_idx = input_tensor.tensor_idx
assert op.BuiltinOptionsType() == BuiltinOptions.SplitOptions
op_options = op.BuiltinOptions()
split_options = SplitOptions()
split_options.Init(op_options.Bytes, op_options.Pos)
num_splits = split_options.NumSplits()
in_expr = self.get_expr(input_tensor_idx)
out = _op.split(in_expr, num_splits, axis=int(split_axis))
# Relay does not like a TupleWrapper of 1 element, further this
# only shows up with tf1.13 if we use a split with num_splits==1.
# In tf 1.14 this doesn't appear as it is automatically a reshape
# operation.
if isinstance(out, _expr.TupleWrapper):
if out.size == 1:
out = out[0]
return out
def convert_split_v(self, op):
"""SPLIT_V implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
if self.has_expr(input_tensors[1].tensor_idx):
raise tvm.error.OpNotImplemented(
"For size_splits parameter of SPLIT_V operator, "
"only constant values are supported."
)
size_splits = list(self.get_tensor_value(input_tensors[1]))
size_splits = tuple(np.cumsum(size_splits)[:-1])
axis_tensor = input_tensors[2]
split_axis = self.get_tensor_value(axis_tensor)
out = _op.split(in_expr, size_splits, axis=int(split_axis))
# Relay does not like a TupleWrapper of 1 element, further this
# only shows up with tf1.13 if we use a split with num_splits==1.
# In tf 1.14 this doesn't appear as it is automatically a reshape
# operation.
if isinstance(out, _expr.TupleWrapper) and out.size == 1:
out = out[0]
return out
def convert_slice(self, op):
"""Convert TFLite SLICE"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be == 3"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
begin = list(self.get_tensor_value(input_tensors[1]))
size = list(self.get_tensor_value(input_tensors[2]))
# strided_slice(Relay) needs the slice's end indices, not the size
end = size
input_tensor_shape = to_int_list(self.get_tensor_shape(input_tensor))
input_tensor_rank = len(input_tensor_shape)
for i in range(input_tensor_rank):
if size[i] == -1:
end[i] = input_tensor_shape[i]
else:
end[i] += begin[i]
out = _op.strided_slice(in_expr, begin, end)
return out
def convert_select(self, op):
"""Convert TFLite SELECT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be == 3"
cond = self.get_tensor_expr(input_tensors[0])
x = self.get_tensor_expr(input_tensors[1])
y = self.get_tensor_expr(input_tensors[2])
out = _op.where(cond, x, y)
return out
def convert_transpose(self, op):
"""transpose implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
# axis
in_axis = tuple(self.get_tensor_value(input_tensors[1]))
if not in_axis:
out = _op.transpose(in_expr)
else:
out = _op.transpose(in_expr, in_axis)
return out
def convert_reverse_sequence(self, op):
"""Convert TFLite REVERSE_SEQUENCE"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.ReverseSequenceOptions import ReverseSequenceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFLite does not support quantized REVERSE_SEQUENCE operator yet."
)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
in_expr = self.get_tensor_expr(input_tensors[0])
length_expr = self.get_tensor_expr(input_tensors[1])
assert op.BuiltinOptionsType() == BuiltinOptions.ReverseSequenceOptions
op_options = op.BuiltinOptions()
options = ReverseSequenceOptions()
options.Init(op_options.Bytes, op_options.Pos)
batch_axis = options.BatchDim()
seq_axis = options.SeqDim()
return _op.reverse_sequence(in_expr, length_expr, seq_axis, batch_axis)
def convert_cast(self, op):
"""Convert TFLite CAST"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.CastOptions import CastOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# MLIR-based converter outputs no BuiltinOptions for Cast operator. In this
# case the output type can be derived from the Cast operator output tensor.
# When TOCO converter is used there will be "normal" BuiltinOptions.CastOptions
# with output type.
if op.BuiltinOptions() is not None:
assert op.BuiltinOptionsType() == BuiltinOptions.CastOptions
op_options = op.BuiltinOptions()
cast_options = CastOptions()
cast_options.Init(op_options.Bytes, op_options.Pos)
cast_dtype = cast_options.OutDataType()
else:
cast_dtype = self.get_output_tensors(op)[0].tensor.Type()
out = _op.cast(in_expr, self.get_tensor_type_str(cast_dtype))
return out
def convert_tile(self, op):
"""tile implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
# reps (tuple of int) – The number of times repeating the tensor data.
reps = tuple(self.get_tensor_value(input_tensors[1]))
out = _op.tile(in_expr, reps)
return out
def convert_topk_v2(self, op):
"""Convert TFLite TOPK_v2"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
k = self.get_tensor_value(input_tensors[1])
out = _op.topk(in_expr, int(k))
return out
def convert_pool2d(self, op, pool_type):
"""pool2d implementation."""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Padding import Padding
from tflite.Pool2DOptions import Pool2DOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
assert op.BuiltinOptionsType() == BuiltinOptions.Pool2DOptions
op_options = op.BuiltinOptions()
pool2d_options = Pool2DOptions()
pool2d_options.Init(op_options.Bytes, op_options.Pos)
stride_h = pool2d_options.StrideH()
stride_w = pool2d_options.StrideW()
padding = pool2d_options.Padding()
filter_h = pool2d_options.FilterHeight()
filter_w = pool2d_options.FilterWidth()
fused_activation_fn = pool2d_options.FusedActivationFunction()
params = {
"pool_size": (filter_h, filter_w),
"strides": (stride_h, stride_w),
"padding": [0, 0],
"layout": "NHWC",
}
in_expr = self.get_expr(input_tensor_idx)
_, input_h, input_w, _ = to_int_list(self.get_tensor_shape(input_tensor))
if padding == Padding.VALID:
pass
elif padding == Padding.SAME:
pad_top, pad_bottom = get_pad_value(input_h, filter_h, stride_h)
pad_left, pad_right = get_pad_value(input_w, filter_w, stride_w)
params["padding"] = [pad_top, pad_left, pad_bottom, pad_right]
else:
raise tvm.error.OpAttributeUnImplemented(
"Padding format {} for operator Pool2D is not supported.".format(padding)
)
if pool_type == "average":
if input_tensor.qnn_params:
assert self.has_same_qnn_params(input_tensor, output_tensor), (
"TFLite avg_pool2dreshape requires input and output scale"
"and zero points to be equal"
)
out = _op.cast(in_expr, dtype="int32")
out = _op.nn.avg_pool2d(out, **params)
out = _op.cast(out, dtype=output_tensor_type_str)
else:
out = _op.nn.avg_pool2d(in_expr, **params)
elif pool_type == "max":
if input_tensor.qnn_params:
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "qnn.op.max_pool2d requires input and output qnn params to be same"
out = _op.nn.max_pool2d(in_expr, **params)
elif pool_type == "l2":
# L2_POOL_2D is equivalent to square_root(avg_pool(square(in_data)))
# TFLite does not have support for quantised L2_POOL_2D op.
assert (
not input_tensor.qnn_params
), "As TFLite does not have support for quantized L2_POOL_2D, \
Quantized input is not expected."
exp_type = self.get_tensor_type_str(output_tensor.tensor.Type())
square_exp = _op.power(in_expr, relay.const(2, exp_type))
avg_pool_exp = _op.nn.avg_pool2d(square_exp, **params)
out = _op.sqrt(avg_pool_exp)
else:
raise tvm.error.OpNotImplemented(
"Operator {} is not supported for frontend TFLite.".format(pool_type + " pool")
)
# Handle fused activations
if output_tensor.qnn_params:
scale_val = get_scalar_from_constant(output_tensor.qnn_params["scale"])
zero_point_val = get_scalar_from_constant(output_tensor.qnn_params["zero_point"])
out = self.convert_qnn_fused_activation_function(
expr=out,
fused_activation_fn=fused_activation_fn,
scale=scale_val,
zero_point=zero_point_val,
dtype=output_tensor_type_str,
)
else:
out = self.convert_fused_activation_function(out, fused_activation_fn)
return out
def convert_pad(self, op):
"""Convert TFLite PAD/PADV2 \
TFLite treats PAD and PADV2 operators identically"""
input_tensors = self.get_input_tensors(op)
# TFLite PAD/PADV2 only supports CONSTANT mode
assert (
len(input_tensors) == 2 or len(input_tensors) == 3
), "input tensor's length should be 2 for PAD and 3 for PADV2"
if len(input_tensors) == 3:
assert (
input_tensors[0].tensor.Type() == input_tensors[2].tensor.Type()
), "constant_values tensor must be of same type as input tensor"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# paddings
pad_list = self.get_tensor_value(input_tensors[1])
# convert list of lists to tuple of tuples
paddings = tuple(tuple(l) for l in pad_list)
# Set the pad value, by default 0, unless constant_values parameter is provided
pad_value = 0
if input_tensor.qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensor, output_tensor
), "TFLite PADV2 requires input and output scale and zero points to be equal"
# The pad value for quantized pad is the input zero point by default.
pad_value = float(input_tensor.qnn_params["zero_point"].data.numpy())
if len(input_tensors) == 3:
pad_value = self.get_tensor_value(input_tensors[2])
if isinstance(pad_value, np.ndarray):
pad_value = pad_value.tolist()
if isinstance(pad_value, list):
assert len(pad_value) == 1, "Only one constant value is expected."
pad_value = pad_value[0]
if input_tensor.qnn_params:
# Check that input tensor and constant_values have same qnn params.
assert self.has_same_qnn_params(
input_tensor, input_tensors[2]
), "TFLite PADV2 requires input and constant_values tensors' \
scale and zero points to be equal"
out = _op.nn.pad(in_expr, pad_width=paddings, pad_value=pad_value)
return out
def convert_floor_div(self, op):
"""Convert TFLite FLOOR_DIV"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized FLOOR DIV operator is not supported yet."
)
return self._convert_elemwise(_op.floor_divide, op)
def convert_floor_mod(self, op):
"""Convert TFLite FLOOR_MOD"""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized FLOOR MOD operator is not supported yet."
)
return self._convert_elemwise(_op.floor_mod, op)
def convert_mirror_pad(self, op):
"""Convert TFLite MIRROR_PAD"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.MirrorPadOptions import MirrorPadOptions
except ImportError:
raise ImportError("The tflite package must be installed")
# the quantized form MirrorPad is not yet implemented in TFLite.
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized MIRROR_PAD operator is not supported yet."
)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
# tensor
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
# paddings
pad_list = self.get_tensor_value(input_tensors[1])
# convert list of lists to tuple of tuples
paddings = tuple(tuple(l.astype(np.int32)) for l in pad_list)
assert op.BuiltinOptionsType() == BuiltinOptions.MirrorPadOptions
op_options = op.BuiltinOptions()
mirror_pad_options = MirrorPadOptions()
mirror_pad_options.Init(op_options.Bytes, op_options.Pos)
mode_byte = mirror_pad_options.Mode()
mode = "REFLECT" if mode_byte == 0 else "SYMMETRIC"
out = _op.nn.mirror_pad(in_expr, paddings, mode)
return out
def convert_pack(self, op):
"""Convert TFLite pack"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.PackOptions import PackOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
if input_tensors[0].qnn_params:
output_tensor = output_tensors[0]
assert self.has_same_qnn_params(
input_tensors[0], output_tensor
), "TFLite pack requires input and output scale and zero points to be equal"
for input_tensor in input_tensors:
assert self.has_same_qnn_params(
input_tensors[0], input_tensor
), "TFLite pack requires all input tensors to have same scale and zero point"
assert op.BuiltinOptionsType() == BuiltinOptions.PackOptions
op_options = op.BuiltinOptions()
pack_options = PackOptions()
pack_options.Init(op_options.Bytes, op_options.Pos)
pack_axis = pack_options.Axis()
pack_values_count = pack_options.ValuesCount()
assert len(input_tensors) == pack_values_count, "Discordance in input values count"
in_exprs = [self.get_tensor_expr(_) for _ in input_tensors]
in_exprs_reshaped = [_op.expand_dims(_, axis=pack_axis, num_newaxis=1) for _ in in_exprs]
out = _op.concatenate(in_exprs_reshaped, pack_axis)
return out
def convert_unpack(self, op):
"""Convert TFLite unpack"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.UnpackOptions import UnpackOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.UnpackOptions
op_options = op.BuiltinOptions()
unpack_options = UnpackOptions()
unpack_options.Init(op_options.Bytes, op_options.Pos)
num_unpacks = unpack_options.Num()
unpack_axis = unpack_options.Axis()
# Relay doesn't support 'unpack' operator so we use 'split' & 'squeeze' instead.
# We have to do 'squeeze' along the split axis.
# Relay expects squeeze_axis to be List.
squeeze_axis = [unpack_axis]
# Relay doesn't like TupleWrapper of 1 element so we isolate the case of unpacking
# a tensor by an axis with len(axis) == 1. For reference see convert_split().
# Such unpacking will result in the same tensor so we omit 'split' and only squeeze
# along the axis of dim == 1.
if num_unpacks == 1:
squeezed = _op.squeeze(in_expr, axis=squeeze_axis)
if isinstance(squeezed, _expr.TupleWrapper):
squeezed = squeezed[0]
else:
splitted = _op.split(in_expr, indices_or_sections=num_unpacks, axis=unpack_axis)
squeezed = _expr.TupleWrapper(
_expr.Tuple(
[_op.squeeze(split_item, axis=squeeze_axis) for split_item in splitted]
),
len(splitted),
)
return squeezed
def convert_unidirectional_sequence_lstm(self, op):
"""Long Short Term Memory for TFLite implementation."""
if self.is_quantized(op):
raise tvm.error.OpNotImplemented(
"TFlite quantized UNIDIRECTIONALSEQUENCELSTM operator is not supported yet."
)
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 24, "input tensors length should be == 24"
# Extract input tensor from saved model
input_tensor = input_tensors[0]
# Extract tensors from input tensors from saved model
# Input weights
input_input_weights = input_tensors[1]
input_forget_weights = input_tensors[2]
input_cell_weights = input_tensors[3]
input_output_weights = input_tensors[4]
# Recurrent weights
recurrent_input_weights = input_tensors[5]
recurrent_forget_weights = input_tensors[6]
recurrent_cell_weights = input_tensors[7]
recurrent_output_weights = input_tensors[8]
# inputs 9, 10, 11, 16, 17, 20, 21, 22, 23 are not occupied
# there locations are -1 in the flatbuffer
# Bias weights
input_gate_bias = input_tensors[12]
forget_gate_bias = input_tensors[13]
cell_gate_bias = input_tensors[14]
output_gate_bias = input_tensors[15]
# State input
output_state_in = input_tensors[18]
cell_state_in = input_tensors[19]
# Extract output tensor from saved model
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
X_steps = self.unbind(input_tensor, axis=1)
weights_dict = {}
# hidden_state_weights is equivalent to output_state_in in tflite model
out_state_in_shape = tuple(self.get_tensor_shape(output_state_in))
out_state_in_dtype = self.get_tensor_type_str(output_state_in.tensor.Type())
out_state_in_expr = _op.zeros(out_state_in_shape, dtype=out_state_in_dtype)
weights_dict["hidden_state"] = _op.split(out_state_in_expr, 1)[0]
# cell_state_weights is equivalent to output_state_in tflite model
cell_state_in_shape = tuple(self.get_tensor_shape(cell_state_in))
cell_state_in_dtype = self.get_tensor_type_str(cell_state_in.tensor.Type())
cell_state_in_expr = _op.zeros(cell_state_in_shape, dtype=cell_state_in_dtype)
weights_dict["cell_state"] = _op.split(cell_state_in_expr, 1)[0]
# Process weight matrix of input: w_inp
# Concatenate of [input_input_weight, input_forget_weights,
# input_cell_weights, input_output_weights]
input_input_weights_default_values = self.get_tensor_value(input_input_weights)
input_input_weights_op = _op.split(
_op.const(input_input_weights_default_values.tolist()), 1
)
input_output_weights_default_values = self.get_tensor_value(input_output_weights)
input_output_weights_op = _op.split(
_op.const(input_output_weights_default_values.tolist()), 1
)
input_forget_weights_default_values = self.get_tensor_value(input_forget_weights)
input_forget_weights_op = _op.split(
_op.const(input_forget_weights_default_values.tolist()), 1
)
input_cell_weights_default_values = self.get_tensor_value(input_cell_weights)
input_cell_weights_op = _op.split(_op.const(input_cell_weights_default_values.tolist()), 1)
weights_dict["w_inp"] = _op.concatenate(
[
_op.squeeze(input_input_weights_op[0]),
_op.squeeze(input_forget_weights_op[0]),
_op.squeeze(input_cell_weights_op[0]),
_op.squeeze(input_output_weights_op[0]),
],
axis=0,
)
# Process weight matrix of hidden state:
# w_hid to support lstm_cell function. Not used in tflite
recurrent_input_weights_values = self.get_tensor_value(recurrent_input_weights)
recurrent_input_weights_op = _op.split(
_op.const(recurrent_input_weights_values.tolist()), 1
)
recurrent_output_weights_values = self.get_tensor_value(recurrent_output_weights)
recurrent_output_weights_op = _op.split(
_op.const(recurrent_output_weights_values.tolist()), 1
)
recurrent_forget_weights_values = self.get_tensor_value(recurrent_forget_weights)
recurrent_forget_weights_op = _op.split(
_op.const(recurrent_forget_weights_values.tolist()), 1
)
recurrent_cell_weights_values = self.get_tensor_value(recurrent_cell_weights)
recurrent_cell_weights_op = _op.split(_op.const(recurrent_cell_weights_values.tolist()), 1)
weights_dict["w_hid"] = _op.concatenate(
[
recurrent_input_weights_op[0],
recurrent_forget_weights_op[0],
recurrent_cell_weights_op[0],
recurrent_output_weights_op[0],
],
axis=0,
)
# Process weight matrix of bias: b_inp
input_gate_bias_values = self.get_tensor_value(input_gate_bias)
input_gate_bias_op = _op.split(_op.const(input_gate_bias_values.tolist()), 1)
output_gate_bias_values = self.get_tensor_value(output_gate_bias)
output_gate_bias_op = _op.split(_op.const(output_gate_bias_values.tolist()), 1)
forget_gate_bias_values = self.get_tensor_value(forget_gate_bias)
forget_gate_bias_op = _op.split(_op.const(forget_gate_bias_values.tolist()), 1)
cell_gate_bias_values = self.get_tensor_value(cell_gate_bias)
cell_gate_bias_op = _op.split(_op.const(cell_gate_bias_values.tolist()), 1)
weights_dict["b_inp"] = _op.concatenate(
[
input_gate_bias_op[0],
forget_gate_bias_op[0],
cell_gate_bias_op[0],
output_gate_bias_op[0],
],
axis=0,
)
# Process weight matrix of hidden bias:
# b_hid (with the same shape as b_inp)
gate_bias_dtype = self.get_tensor_type_str(input_gate_bias.tensor.Type())
weights_dict["b_hid"] = _op.split(
_op.const(
np.zeros(_infer_shape(weights_dict["b_inp"]), dtype=gate_bias_dtype),
dtype=gate_bias_dtype,
),
1,
)[0]
outputs, _, _ = lstm_cell(input_seqs=X_steps, **weights_dict)
output = _op.stack(outputs, axis=1)
return output
def convert_batch_to_space_nd(self, op):
"""batch_to_space_nd implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
block_shape = list(self.get_tensor_value(input_tensors[1]))
crops = self.get_tensor_value(input_tensors[2]).tolist()
out = _op.nn.batch_to_space_nd(in_expr, block_shape, crops)
return out
def convert_space_to_batch_nd(self, op):
"""space_to_batch_nd implementation."""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 3, "input tensors length should be 3"
input_tensor = input_tensors[0]
input_tensor_idx = input_tensor.tensor_idx
in_expr = self.get_expr(input_tensor_idx)
block_shape = list(self.get_tensor_value(input_tensors[1]))
paddings = self.get_tensor_value(input_tensors[2]).tolist()
out = _op.nn.space_to_batch_nd(in_expr, block_shape, paddings)
return out
def convert_depth_to_space(self, op):
"""Convert TFLite DEPTH_TO_SPACE"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.DepthToSpaceOptions import DepthToSpaceOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.DepthToSpaceOptions
op_options = op.BuiltinOptions()
depth_to_space_options = DepthToSpaceOptions()
depth_to_space_options.Init(op_options.Bytes, op_options.Pos)
block_size = depth_to_space_options.BlockSize()
out = _op.nn.depth_to_space(in_expr, block_size, layout="NHWC")
return out
def convert_space_to_depth(self, op):
"""Convert TFLite SPACE_TO_DEPTH"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.SpaceToDepthOptions import SpaceToDepthOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
assert op.BuiltinOptionsType() == BuiltinOptions.SpaceToDepthOptions
op_options = op.BuiltinOptions()
space_to_depth_options = SpaceToDepthOptions()
space_to_depth_options.Init(op_options.Bytes, op_options.Pos)
block_size = space_to_depth_options.BlockSize()
out = _op.nn.space_to_depth(in_expr, block_size, layout="NHWC")
return out
def convert_sparse_to_dense(self, op):
"""Convert TFLite SPARSE_TO_DENSE"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "input tensors length should be 4"
indices, values = input_tensors[0], input_tensors[2]
default_value = input_tensors[3]
output_shape = input_tensors[1]
for t in input_tensors:
assert not t.qnn_params, "Quantized input is not expected."
for t in [indices, output_shape]:
t_type = t.tensor.Type()
assert t_type in (TensorType.INT32, TensorType.INT64)
out = _op.sparse_to_dense(
self.get_tensor_expr(indices),
list(self.get_tensor_value(output_shape)),
self.get_tensor_expr(values),
self.get_tensor_expr(default_value),
)
return out
def convert_prelu(self, op):
"""Convert TFLite PReLU"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
input_tensor = input_tensors[0]
alpha_tensor = input_tensors[1]
if self.has_expr(alpha_tensor.tensor_idx):
alpha_expr = self.get_expr(alpha_tensor.tensor_idx)
else:
alpha_tensor_type = alpha_tensor.tensor.Type()
alpha_tensor_type_str = self.get_tensor_type_str(alpha_tensor_type)
alpha_expr = self.exp_tab.new_const(
self.get_tensor_value(alpha_tensor), dtype=alpha_tensor_type_str
)
in_expr = self.get_expr(input_tensor.tensor_idx)
data_shape = to_int_list(self.get_tensor_shape(input_tensor))
alpha_expr = _op.broadcast_to(alpha_expr, data_shape)
alpha_expr = _op.reshape(alpha_expr, [-1])
out = _op.nn.prelu(_op.reshape(in_expr, [-1]), alpha_expr, axis=0)
out = _op.reshape(out, data_shape)
return out
def convert_transpose_conv(self, op):
"""Convert TFLite TRANSPOSE_CONV"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.Padding import Padding
from tflite.TensorType import TensorType
from tflite.TransposeConvOptions import TransposeConvOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) >= 3, "input tensors length should be >= 3"
# Input (data) Tensor. NHWC layout
input_tensor = input_tensors[2]
_, _, _, input_c = to_int_list(self.get_tensor_shape(input_tensor))
# Weights tensor. TFLite uses OHWI layout
weights_tensor = input_tensors[1]
out_channels, kernel_h, kernel_w, in_channels = to_int_list(
self.get_tensor_shape(weights_tensor)
)
assert (
input_c == in_channels
), "Input channel in the filter should match to channel in the input"
# output_shape Tensor. NHWC layout
output_shape_tensor = input_tensors[0]
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type = output_tensor.tensor.Type()
output_tensor_type_str = self.get_tensor_type_str(output_tensor_type)
assert op.BuiltinOptionsType() == BuiltinOptions.TransposeConvOptions
op_options = op.BuiltinOptions()
deconv_options = TransposeConvOptions()
deconv_options.Init(op_options.Bytes, op_options.Pos)
padding = deconv_options.Padding()
stride_h = deconv_options.StrideH()
stride_w = deconv_options.StrideW()
assert padding in (
Padding.VALID,
Padding.SAME,
), "Padding format {} is not supported for operator TRANSPOSE_CONV".format(padding)
# Data
in_expr = self.get_expr(input_tensor.tensor_idx)
# Weights
weights_tensor_type = weights_tensor.tensor.Type()
# weights tensor type should be UINT8 (quantization) or FLOAT32
assert weights_tensor_type in (TensorType.INT8, TensorType.UINT8, TensorType.FLOAT32)
weight_tensor_type_str = self.get_tensor_type_str(weights_tensor_type)
if self.has_expr(weights_tensor.tensor_idx):
weight_expr_iohw = self.get_expr(weights_tensor.tensor_idx)
weight_expr_iohw = _op.transpose(weight_expr_iohw, axes=(3, 0, 1, 2))
else:
weight_value_ohwi = self.get_tensor_value(weights_tensor)
# Relay kernel_layout should be OIHW
# Relay weights layout should be different from kernel_layout - it should be IOHW
weight_value_iohw = np.transpose(weight_value_ohwi, (3, 0, 1, 2))
weight_expr_iohw = self.exp_tab.new_const(
weight_value_iohw, dtype=weight_tensor_type_str
)
# Output shape value
output_shape_value = self.get_tensor_value(output_shape_tensor)
# Relay expects filter output channel to match to output tensor channel.
assert (
out_channels == output_shape_value[3]
), "Output channel in the filter should match to channel in the output_shape"
if padding == Padding.SAME:
output_h, output_w = output_shape_value[1], output_shape_value[2]
pad_top, pad_bottom = get_pad_value(output_h, kernel_h, stride_h)
pad_left, pad_right = get_pad_value(output_w, kernel_w, stride_w)
padding = (pad_top, pad_left, pad_bottom, pad_right)
else:
padding = (0, 0, 0, 0)
if input_tensor.qnn_params:
input_zero_point = input_tensor.qnn_params["zero_point"]
kernel_zero_point = weights_tensor.qnn_params["zero_point"]
input_scale = input_tensor.qnn_params["scale"]
kernel_scale = weights_tensor.qnn_params["scale"]
out = _qnn.op.conv2d_transpose(
in_expr,
weight_expr_iohw,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides=(stride_h, stride_w),
padding=padding,
channels=int(out_channels),
kernel_size=(int(kernel_h), int(kernel_w)),
data_layout="NHWC",
kernel_layout="IOHW",
out_dtype="int32",
)
else:
out = _op.nn.conv2d_transpose(
in_expr,
weight_expr_iohw,
strides=(stride_h, stride_w),
padding=padding,
channels=int(out_channels),
kernel_size=(int(kernel_h), int(kernel_w)),
data_layout="NHWC",
kernel_layout="IOHW",
out_dtype=output_tensor_type_str,
)
# Checking if there is a fused bias
if len(input_tensors) == 4:
bias_tensor = input_tensors[3]
bias_tensor_type = bias_tensor.tensor.Type()
# bias tensor type should be INT32 (quantization) or FLOAT32
assert bias_tensor_type in (TensorType.INT32, TensorType.FLOAT32)
bias_tensor_type_str = self.get_tensor_type_str(bias_tensor_type)
if self.has_expr(bias_tensor.tensor_idx):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
if output_tensor.qnn_params:
# Calculate the intermediate scale and zero point of the int32 output.
data_scale = input_tensor.qnn_params["scale"]
data_scale_val = get_scalar_from_constant(data_scale)
weight_scale = weights_tensor.qnn_params["scale"]
# If weight scale is scalar, it is per-tensor quantization
if isinstance(weight_scale, float):
weight_scale_val = get_scalar_from_constant(weight_scale)
else:
weight_scale_val = get_tensor_from_constant(weight_scale)
new_input_scale_val = data_scale_val * weight_scale_val
new_input_scale = relay.const(new_input_scale_val, "float32")
new_input_zero_point = relay.const(0, "int32")
out = _qnn.op.requantize(
out,
input_scale=new_input_scale,
input_zero_point=new_input_zero_point,
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
axis=3,
)
return out
def convert_quantize(self, op):
"""Convert TFLite Quantize"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
input_tensor_type_str = self.get_tensor_type_str(input_tensor.tensor.Type())
in_expr = self.get_tensor_expr(input_tensor)
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
output_tensor_type_str = self.get_tensor_type_str(output_tensor.tensor.Type())
# The output must be quantized
assert output_tensor.qnn_params
# TFLite Quantize op can also act as Requantize op
if input_tensor_type_str == "float32":
out = self.quantize(in_expr, output_tensor)
else:
out = _qnn.op.requantize(
in_expr,
input_scale=input_tensor.qnn_params["scale"],
input_zero_point=input_tensor.qnn_params["zero_point"],
output_scale=output_tensor.qnn_params["scale"],
output_zero_point=output_tensor.qnn_params["zero_point"],
out_dtype=output_tensor_type_str,
)
return out
def convert_dequantize(self, op):
"""Convert TFLite Dequantize"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
if input_tensor.tensor.Type() == TensorType.FLOAT16:
dtype = self.get_tensor_type_str(input_tensor.tensor.Type())
input_value = self.get_tensor_value(input_tensor)
in_expr = self.exp_tab.new_const(input_value, dtype=dtype)
out = relay.cast(in_expr, dtype="float32")
return out
in_expr = self.get_expr(input_tensor.tensor_idx)
# The input must be quantized
assert input_tensor.qnn_params
# Dequantize the input.
out = self.dequantize(in_expr, input_tensor)
return out
def convert_detection_postprocess(self, op):
"""Convert TFLite_Detection_PostProcess"""
flexbuffer = op.CustomOptionsAsNumpy().tobytes()
custom_options = FlexBufferDecoder(flexbuffer).decode()
if "use_regular_nms" in custom_options:
if custom_options["use_regular_nms"]:
raise tvm.error.OpAttributeUnImplemented(
"use_regular_nms=True is not yet supported for operator {}.".format(
"TFLite_Detection_PostProcess"
)
)
inputs = self.get_input_tensors(op)
assert len(inputs) == 3, "inputs length should be 3"
cls_pred = self.get_expr(inputs[1].tensor_idx)
loc_prob = self.get_expr(inputs[0].tensor_idx)
batch_size = inputs[1].tensor.Shape(0)
anchor_values = self.get_tensor_value(inputs[2])
anchor_boxes = len(anchor_values)
anchor_type = self.get_tensor_type_str(inputs[2].tensor.Type())
anchor_expr = self.exp_tab.new_const(anchor_values, dtype=anchor_type)
if inputs[0].qnn_params:
loc_prob = _qnn.op.dequantize(
data=loc_prob,
input_scale=inputs[0].qnn_params["scale"],
input_zero_point=inputs[0].qnn_params["zero_point"],
)
if inputs[1].qnn_params:
cls_pred = _qnn.op.dequantize(
data=cls_pred,
input_scale=inputs[1].qnn_params["scale"],
input_zero_point=inputs[1].qnn_params["zero_point"],
)
if inputs[2].qnn_params:
anchor_expr = _qnn.op.dequantize(
data=anchor_expr,
input_scale=inputs[2].qnn_params["scale"],
input_zero_point=inputs[2].qnn_params["zero_point"],
)
# reshape the cls_pred and loc_prob tensors so
# they can be consumed by multibox_transform_loc
cls_pred = _op.transpose(cls_pred, [0, 2, 1])
# loc_prob coords are in yxhw format
# need to convert to xywh
loc_coords = _op.split(loc_prob, 4, axis=2)
loc_prob = _op.concatenate(
[loc_coords[1], loc_coords[0], loc_coords[3], loc_coords[2]], axis=2
)
loc_prob = _op.reshape(loc_prob, [batch_size, anchor_boxes * 4])
# anchor coords are in yxhw format
# need to convert to ltrb
anchor_coords = _op.split(anchor_expr, 4, axis=1)
anchor_y = anchor_coords[0]
anchor_x = anchor_coords[1]
anchor_h = anchor_coords[2]
anchor_w = anchor_coords[3]
plus_half = _expr.const(0.5, dtype="float32")
minus_half = _expr.const(-0.5, dtype="float32")
anchor_l = _op.add(anchor_x, _op.multiply(anchor_w, minus_half))
anchor_r = _op.add(anchor_x, _op.multiply(anchor_w, plus_half))
anchor_t = _op.add(anchor_y, _op.multiply(anchor_h, minus_half))
anchor_b = _op.add(anchor_y, _op.multiply(anchor_h, plus_half))
anchor_expr = _op.concatenate([anchor_l, anchor_t, anchor_r, anchor_b], axis=1)
anchor_expr = _op.expand_dims(anchor_expr, 0)
# attributes for multibox_transform_loc
multibox_transform_loc_attrs = {}
multibox_transform_loc_attrs["clip"] = False
multibox_transform_loc_attrs["threshold"] = custom_options["nms_score_threshold"]
multibox_transform_loc_attrs["variances"] = (
1 / custom_options["x_scale"],
1 / custom_options["y_scale"],
1 / custom_options["w_scale"],
1 / custom_options["h_scale"],
)
# attributes for non_max_suppression
non_max_suppression_attrs = {}
non_max_suppression_attrs["return_indices"] = False
non_max_suppression_attrs["iou_threshold"] = custom_options["nms_iou_threshold"]
non_max_suppression_attrs["force_suppress"] = True
non_max_suppression_attrs["top_k"] = anchor_boxes
non_max_suppression_attrs["max_output_size"] = custom_options["max_detections"]
non_max_suppression_attrs["invalid_to_bottom"] = False
ret = _op.vision.multibox_transform_loc(
cls_pred, loc_prob, anchor_expr, **multibox_transform_loc_attrs
)
ret = _op.vision.non_max_suppression(ret[0], ret[1], ret[1], **non_max_suppression_attrs)
ret = _op.vision.get_valid_counts(ret, 0)
valid_count = ret[0]
# keep only the top 'max_detections' rows
ret = _op.strided_slice(
ret[1], [0, 0, 0], [batch_size, custom_options["max_detections"], 6]
)
# the output needs some reshaping to match tflite
ret = _op.split(ret, 6, axis=2)
cls_ids = _op.reshape(ret[0], [batch_size, -1])
scores = _op.reshape(ret[1], [batch_size, -1])
boxes = _op.concatenate([ret[3], ret[2], ret[5], ret[4]], axis=2)
ret = _expr.TupleWrapper(_expr.Tuple([boxes, cls_ids, scores, valid_count]), size=4)
return ret
def convert_nms_v5(self, op):
"""Convert TFLite NonMaxSuppressionV5"""
# https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/non-max-suppression-v5
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 6, "input tensor length should be 6"
boxes = self.get_expr(input_tensors[0].tensor_idx)
scores = self.get_expr(input_tensors[1].tensor_idx)
max_output_size = self.get_tensor_value(input_tensors[2])
iou_threshold = self.get_tensor_value(input_tensors[3])
score_threshold = self.get_tensor_value(input_tensors[4])
soft_nms_sigma = self.get_tensor_value(input_tensors[5])
if isinstance(max_output_size, np.ndarray):
assert max_output_size.size == 1, "only one value is expected."
max_output_size = int(max_output_size)
if isinstance(iou_threshold, np.ndarray):
assert iou_threshold.size == 1, "only one value is expected."
iou_threshold = float(iou_threshold)
if isinstance(score_threshold, np.ndarray):
assert score_threshold.size == 1, "only one value is expected."
score_threshold = float(score_threshold)
if isinstance(soft_nms_sigma, np.ndarray):
assert soft_nms_sigma.size == 1, "only one value is expected."
soft_nms_sigma = float(soft_nms_sigma)
if soft_nms_sigma != 0.0:
raise tvm.error.OpNotImplemented(
"It is soft_nms when soft_nms_sigma != 0, which is not supported!"
)
scores_expand = _op.expand_dims(scores, axis=-1, num_newaxis=1)
data = _op.concatenate([scores_expand, boxes], -1)
data = _op.expand_dims(data, axis=0, num_newaxis=1)
count, data, indices = _op.vision.get_valid_counts(
data, score_threshold=score_threshold, id_index=-1, score_index=0
)
nms_ret = _op.vision.non_max_suppression(
data=data,
valid_count=count,
indices=indices,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=-1,
coord_start=1,
score_index=0,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
selected_indices = _op.squeeze(nms_ret[0], axis=[0])
selected_indices = _op.strided_slice(selected_indices, [0], [max_output_size])
valide_num = _op.squeeze(nms_ret[1], axis=[1])
selected_scores = _op.take(scores, selected_indices, axis=0)
out = _expr.TupleWrapper(_expr.Tuple([selected_indices, selected_scores, valide_num]), 3)
return out
def convert_expand_dims(self, op):
"""Convert TFLite EXPAND_DIMS"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
if input_tensors[0].qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
input_tensors[0], output_tensors[0]
), "TFLite EXPAND_DIMS requires input and output tensors' \
scale and zero points to be equal"
input_expr = self.get_tensor_expr(input_tensors[0])
axis = self.get_tensor_value(input_tensors[1])
if isinstance(axis, np.ndarray):
assert axis.size == 1, "only one value is expected."
axis = int(axis)
ndims = len(input_tensors[0].tensor.ShapeAsNumpy())
assert -1 - ndims <= axis <= ndims, "axis out of range"
out = _op.expand_dims(input_expr, axis, 1)
return out
def convert_one_hot(self, op):
"""Convert TFLite ONE_HOT"""
try:
from tflite.BuiltinOptions import BuiltinOptions
from tflite.OneHotOptions import OneHotOptions
except ImportError:
raise ImportError("The tflite package must be installed")
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 4, "Input tensor's length should be 4"
# Ensuring input isn't quantized
assert all(not i.qnn_params for i in input_tensors), "Quantized input is not expected."
# TFlite ONE_HOT requires both on_value
# and off_value, making dtype redundant.
indices = input_tensors[0]
depth = input_tensors[1]
on_value = input_tensors[2]
off_value = input_tensors[3]
assert (
on_value.tensor.Type() == off_value.tensor.Type()
), "on_value and off_value should be the same type"
# Getting relay expr
indices_expr = self.get_expr(indices.tensor_idx)
on_value_expr = self.get_expr(on_value.tensor_idx)
off_value_expr = self.get_expr(off_value.tensor_idx)
# Getting depth value
depth = self.get_tensor_value(depth)
if isinstance(depth, np.ndarray):
depth = int(depth)
# Getting Axis from Option (Attributes)
assert op.BuiltinOptionsType() == BuiltinOptions.OneHotOptions
op_options = op.BuiltinOptions()
one_hot_options = OneHotOptions()
one_hot_options.Init(op_options.Bytes, op_options.Pos)
axis = one_hot_options.Axis()
# Setting dtype
dtype = self.get_tensor_type_str(on_value.tensor.Type())
out = _op.one_hot(indices_expr, on_value_expr, off_value_expr, depth, axis, dtype)
return out
def convert_reverse_v2(self, op):
"""Convert TFLite REVERSE_V2"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensor's length should be 2"
input_expr = self.get_expr(input_tensors[0].tensor_idx)
# Getting axis value
axis = self.get_tensor_value(input_tensors[1])
if isinstance(axis, np.ndarray):
assert len(axis) == 1, "TFLite does not support multi-axis yet"
axis = int(axis)
out = _op.reverse(input_expr, axis)
return out
def convert_matrix_set_diag(self, op):
"""Convert TFLite MATRIX_SET_DIAG"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensor's length should be 2"
assert (
input_tensors[0].tensor.Type() == input_tensors[1].tensor.Type()
), "input and diagonal should be the same type of tensors"
if input_tensors[0].qnn_params:
# Check that input and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
input_tensors[0], output_tensors[0]
), "TFLite MATRIX_SET_DIAG requires input and output tensors' \
scale and zero points to be equal"
# Check that input and diagonal tensor have same qnn params.
assert self.has_same_qnn_params(
input_tensors[0], input_tensors[1]
), "TFLite MATRIX_SET_DIAG requires input and diagonal tensors' \
scale and zero points to be equal"
input_expr = self.get_tensor_expr(input_tensors[0])
diagonal_expr = self.get_tensor_expr(input_tensors[1])
out = _op.matrix_set_diag(input_expr, diagonal_expr)
return out
def convert_matrix_diag(self, op):
"""Convert TFLite MATRIX_DIAG"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensor's length should be 1"
diagonal = input_tensors[0]
if diagonal.qnn_params:
# Check that diagonal and output tensor have same qnn params.
output_tensors = self.get_output_tensors(op)
assert self.has_same_qnn_params(
diagonal, output_tensors[0]
), "TFLite MATRIX_DIAG requires diagonal and output tensors' \
scale and zero points to be equal"
shape = to_int_list(self.get_tensor_shape(diagonal))
shape = np.append(shape, shape[-1])
dtype = self.get_tensor_type_str(diagonal.tensor.Type())
input_expr = _op.zeros(tuple(shape), dtype)
diagonal_expr = self.get_tensor_expr(diagonal)
out = _op.matrix_set_diag(input_expr, diagonal_expr)
return out
def convert_densify(self, op):
"""Convert TFLite DENSIFY"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
output_tensors = self.get_output_tensors(op)
assert len(output_tensors) == 1, "output tensors length should be 1"
output_tensor = output_tensors[0]
sparse_weight_tensor = input_tensors[0]
sparse_weight_tensor_type_str = self.get_tensor_type_str(sparse_weight_tensor.tensor.Type())
# NOTE: With current implementation in TFLite, Densify Op does not need to be present
# in runtime.
# TODO(ANSHUMAN87): we need to use the sparse_indices output
# from below function and use that in sparse_to_dense Op.
# Once the stack corruption issue is resolved in sparse_to_dense Op.
_, dense_weight = prepare_dense_matrix_from_sparse(
sparse_weight_tensor.tensor,
self.get_tensor_value(sparse_weight_tensor, is_sparse=True),
sparse_weight_tensor_type_str,
)
self.set_prefetched_node(output_tensor.tensor_idx, dense_weight)
def convert_fake_quant(self, op):
"""Convert TFLite FAKE_QUANT"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 1, "input tensors length should be 1"
input_tensor = input_tensors[0]
in_expr = self.get_expr(input_tensor.tensor_idx)
from tflite.BuiltinOptions import BuiltinOptions
from tflite.FakeQuantOptions import FakeQuantOptions
assert op.BuiltinOptionsType() == BuiltinOptions.FakeQuantOptions
op_options = op.BuiltinOptions()
fake_quant_options = FakeQuantOptions()
fake_quant_options.Init(op_options.Bytes, op_options.Pos)
opt_min = fake_quant_options.Min()
opt_max = fake_quant_options.Max()
narrow_range = fake_quant_options.NarrowRange()
num_bits = fake_quant_options.NumBits()
assert 2 <= num_bits <= 16
quant_min = 1 if narrow_range else 0
quant_max = (1 << num_bits) - 1
scale = (opt_max - opt_min) / (quant_max - quant_min)
zero_point_from_min = quant_min - opt_min / scale
if zero_point_from_min <= quant_min:
nudged_zero_point = quant_min
elif zero_point_from_min >= quant_max:
nudged_zero_point = quant_max
else:
nudged_zero_point = round(zero_point_from_min)
nudged_min = (quant_min - nudged_zero_point) * scale
nudged_max = (quant_max - nudged_zero_point) * scale
nudged_min_expr = _op.const(nudged_min)
clamped = _op.clip(in_expr, nudged_min, nudged_max)
clamped_shifted = _op.subtract(clamped, nudged_min_expr)
half = _op.const(0.5)
one = _op.const(1.0)
scale_expr = _op.const(scale)
inv_scale = _op.divide(one, scale_expr)
rounded = _op.floor(_op.add(_op.multiply(clamped_shifted, inv_scale), half))
return _op.add(_op.multiply(rounded, scale_expr), nudged_min_expr)
def get_expr(self, input_tensor_idx):
return self.exp_tab.get_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def has_expr(self, input_tensor_idx):
return self.exp_tab.has_expr(get_tensor_name(self.subgraph, input_tensor_idx))
def is_prefetched(self, input_tensor_idx):
return (
self.prefetched_nodes.get(get_tensor_name(self.subgraph, input_tensor_idx)) is not None
)
def set_prefetched_node(self, input_tensor_idx, value):
self.prefetched_nodes[get_tensor_name(self.subgraph, input_tensor_idx)] = value
def get_prefetched_node(self, input_tensor_idx):
return self.prefetched_nodes[get_tensor_name(self.subgraph, input_tensor_idx)]
def get_tensor_expr(self, tensor, is_sparse=False):
"""Return the Relay expr for tensor."""
if self.has_expr(tensor.tensor_idx):
expr = self.get_expr(tensor.tensor_idx)
else:
type_str = self.get_tensor_type_str(tensor.tensor.Type())
expr = self.exp_tab.new_const(self.get_tensor_value(tensor, is_sparse), dtype=type_str)
return expr
def get_tensor_shape(self, tensor_wrapper):
"""Returns tensor shape. Infers shape if the shape is empty."""
assert isinstance(tensor_wrapper, TensorWrapper), "Expecting TensorWrapper here"
return (
tensor_wrapper.tensor.ShapeAsNumpy()
if tensor_wrapper.tensor.ShapeLength() > 0
else _infer_shape(self.get_tensor_expr(tensor_wrapper))
)
# pylint: disable=no-else-return
def prepare_dense_matrix_from_sparse(sparse_tensor, sparse_tensor_value, sparse_tensor_type):
"""Prepare sparse indices and dense matrix from TFLite sparse parameters."""
# The function is implemented based on TFLite sparse parameter specifications
# Please refer
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/schema/schema.fbs#L89
# for details about each parameters
sparsity = sparse_tensor.Sparsity()
dense_shape = sparse_tensor.ShapeAsNumpy()
orig_rank = len(dense_shape)
# The traversal order of the dimensions defined in the `shape` field of the to be dense tensor.
traversal_order = sparsity.TraversalOrderAsNumpy()
# For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
# stores how a block dimension in (dn, ..., dn+k-1) maps to the original
# tensor dimension in (d0, ..., dn). It's stored in the order of (dn, ..., dn+k-1).
# If not block-sparse, this field is NULL.
block_map = sparsity.BlockMapAsNumpy()
total_rank = sparsity.TraversalOrderLength()
dense_mat = np.full(shape=dense_shape, fill_value=0, dtype=sparse_tensor_type).flatten()
from enum import Enum
# NOTE: Here the Vector term is borrowed from TFLite spec.
class VectorType(Enum):
Empty = 0
Int32 = 1
Uint16 = 2
Uint8 = 3
def _get_vector_flag(v_type):
if VectorType(v_type) == VectorType.Int32:
return N.Int32Flags
elif VectorType(v_type) == VectorType.Uint16:
return N.Uint16Flags
elif VectorType(v_type) == VectorType.Uint8:
return N.Uint8Flags
else:
raise tvm.error.OpNotImplemented("The provided type {} is not supported".format(v_type))
def _get_flattened_index(indices, shape):
index = 0
sub_elements = 1
for i in reversed(range(0, len(dense_shape))):
index += indices[i] * sub_elements
sub_elements *= shape[i]
return index
# DimensionMetadata per dimension: the metadata needed for
# each dimension to locate the non-zero values in the original dense tensor
# inline with traversal order parameter.
#
# sp_format has 2 possible values: {DENSE = 0, SPARSE_CSR = 1}
# If format = DENSE{0} : DenseSize represents size of that dimension
# If format = SPARSE_CSR{1} : array_segments represents how to segment the indices array,
# each segment corresponds to one element in the previous dimension. array_indices
# represents the index of the non-zero elements within this dimension
# (as those in the CSR matrix format, where the first array is row pointers
# and the second array is column indices).
sp_format = np.zeros(sparsity.DimMetadataLength())
dim_metadata = [None] * (2 * sparsity.DimMetadataLength())
# Below loop will fetch all meta data per dimension based on format type
# Dense or Sparse and will put it in an agnostic array for easy access
# while preparing dense buffer or indices.
for i in range(sparsity.DimMetadataLength()):
sp_format[i] = sparsity.DimMetadata(i).Format()
if sp_format[i] == 0:
dim_metadata[2 * i] = [sparsity.DimMetadata(i).DenseSize()]
else:
from flatbuffers import number_types as N
dim_metadata[2 * i] = (
sparsity.DimMetadata(i)
.ArraySegments()
.GetVectorAsNumpy(
flags=_get_vector_flag(sparsity.DimMetadata(i).ArraySegmentsType()), off=4
)
)
dim_metadata[2 * i + 1] = (
sparsity.DimMetadata(i)
.ArrayIndices()
.GetVectorAsNumpy(
flags=_get_vector_flag(sparsity.DimMetadata(i).ArrayIndicesType()), off=4
)
)
block_dim = 0
block_size = np.zeros(sparsity.BlockMapLength())
# Block size parameter if encoded in BSR format
for i in range(orig_rank):
if block_dim < sparsity.BlockMapLength() and block_map[block_dim] == i:
orig_dim = traversal_order[orig_rank + block_dim]
block_size[block_dim] = sparsity.DimMetadata(orig_dim).DenseSize()
block_dim += 1
indices_list = []
# Below function iterates through each applicable indices per dimension
# based on format type specified and finally produce the dense matrix and the NZ indices.
def _def_prepare_dense_matrix_from_sparse(indices, level, prev_idx):
if level == len(indices):
start_pos = 0
orig_idx = np.zeros(orig_rank, dtype="int32")
while start_pos < orig_rank:
orig_idx[traversal_order[start_pos]] = indices[start_pos]
start_pos += 1
while start_pos < len(indices):
block_idx = traversal_order[start_pos] - orig_rank
orig_dim = block_map[block_idx]
orig_idx[orig_dim] = orig_idx[orig_dim] * block_size[block_idx] + indices[start_pos]
start_pos += 1
indices_list.append(orig_idx)
nonlocal value_idx
dense_mat[_get_flattened_index(orig_idx, dense_shape)] = sparse_tensor_value[value_idx]
value_idx += 1
else:
metadata_idx = 2 * level
if sp_format[level] == 0:
shape_of_level = dim_metadata[metadata_idx][0]
for idx in range(shape_of_level):
indices[level] = idx
_def_prepare_dense_matrix_from_sparse(
indices, level + 1, prev_idx * shape_of_level + idx
)
else:
array_segments = dim_metadata[metadata_idx]
array_indices = dim_metadata[metadata_idx + 1]
for idx in range(array_segments[prev_idx], array_segments[prev_idx + 1]):
indices[level] = array_indices[idx]
_def_prepare_dense_matrix_from_sparse(indices, level + 1, idx)
indices = np.zeros(total_rank)
value_idx = 0
_def_prepare_dense_matrix_from_sparse(indices, 0, 0)
return np.array(indices_list, dtype="int32"), dense_mat.reshape(dense_shape)
def get_scalar_from_constant(expr):
"""Returns scalar value from Relay constant scalar."""
assert (
isinstance(expr, _expr.Constant) and not expr.data.shape
), "Expr is not a constant scalar."
value = expr.data.numpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value.item(0)
def get_tensor_from_constant(expr):
"""Returns tensor of values from Relay constant node."""
assert isinstance(expr, _expr.Constant)
value = expr.data.numpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value
def build_str_map(obj):
"""Build string map of TFLite enum int value
Parameters
----------
obj:
TFLite class which contains enum int value, such as BuiltInOptions
Returns
-------
String representation map of TFLite class enum int value
"""
ret = {}
for field_name in dir(obj):
if not field_name.startswith("_"):
field_value = getattr(obj, field_name)
if isinstance(field_value, int):
ret[field_value] = field_name
return ret
# SAME padding: https://www.tensorflow.org/api_guides/python/nn
def get_pad_value(data, kernel, stride):
"""Get the pad tuple of value for SAME padding
Parameters
----------
data:
1D input data
kernel:
1D input kernel
stride:
1D input stride
Returns
-------
pad tuple of value
"""
out = int(math.ceil(float(data) / float(stride)))
pad = max(0, (out - 1) * stride + kernel - data)
pad_before = pad // 2
pad_after = pad - pad_before
return pad_before, pad_after
def get_tensor_name(subgraph, tensor_idx):
"""Get the tensor name.
Parameters
----------
subgraph:
tflite.Subgraph.Subgraph
tensor:
tensor index in subgraph
Returns
-------
tensor name in UTF-8 encoding
"""
return subgraph.Tensors(tensor_idx).Name().decode("utf-8")
def _decode_type(n):
_tflite_m = {
0: "float32",
1: "float16",
2: "int32",
3: "uint8",
4: "int64",
5: "string",
6: "bool",
7: "int16",
8: "complex64",
9: "int8",
}
return _tflite_m[n]
def _input_type(model):
subgraph_count = model.SubgraphsLength()
assert subgraph_count > 0
shape_dict = {}
dtype_dict = {}
for subgraph_index in range(subgraph_count):
subgraph = model.Subgraphs(subgraph_index)
inputs_count = subgraph.InputsLength()
assert inputs_count >= 1
for input_index in range(inputs_count):
input_ = subgraph.Inputs(input_index)
assert subgraph.TensorsLength() > input_
tensor = subgraph.Tensors(input_)
input_shape = tuple(tensor.ShapeAsNumpy())
tensor_type = tensor.Type()
input_name = tensor.Name().decode("utf8")
shape_dict[input_name] = input_shape
dtype_dict[input_name] = _decode_type(tensor_type)
return shape_dict, dtype_dict
def from_tflite(model, shape_dict=None, dtype_dict=None, op_converter=OperatorConverter):
"""Convert from tflite model into compatible relay Function.
Parameters
----------
model:
tflite.Model or tflite.Model.Model (depending on tflite version)
shape_dict : dict of str to int list/tuple
Input shapes of the model.
dtype_dict : dict of str to str
Input types of the model.
Returns
-------
mod : tvm.IRModule
The relay module for compilation.
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
import tflite.BuiltinOperator
import tflite.SubGraph
except ImportError:
raise ImportError("The tflite package must be installed")
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite
assert isinstance(model, tflite.Model)
except TypeError:
import tflite.Model
assert isinstance(model, tflite.Model.Model)
_shape_dict, _dtype_dict = _input_type(model)
if shape_dict is not None:
_shape_dict.update(shape_dict)
if dtype_dict is not None:
_dtype_dict.update(dtype_dict)
# keep the same as tflite
assert model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)"
subgraph = model.Subgraphs(0)
# model inputs / outputs
model_inputs = subgraph.InputsAsNumpy()
model_outputs = subgraph.OutputsAsNumpy()
exp_tab = ExprTable()
for model_input in model_inputs:
model_input_name = get_tensor_name(subgraph, model_input)
shape = _shape_dict[model_input_name] if model_input_name in _shape_dict else None
dtype = _dtype_dict[model_input_name] if model_input_name in _dtype_dict else "float32"
exp_tab.set_expr(model_input_name, _expr.var(model_input_name, shape=shape, dtype=dtype))
# op code in model
op_converter = op_converter(model, subgraph, exp_tab)
op_converter.check_unsupported_ops()
op_converter.convert_op_to_relay()
# params and outputs
params = {k: _nd.array(np.array(v)) for k, v in exp_tab.params.items()}
outputs = [exp_tab.get_expr(get_tensor_name(subgraph, i)) for i in model_outputs]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
attrs = tvm.ir.make_node(
"DictAttrs",
**{
"output_tensor_names": [
sanitize_name(get_tensor_name(subgraph, model_output))
for model_output in model_outputs
]
},
)
func = _function.Function(analysis.free_vars(outputs), outputs, attrs=attrs)
mod = IRModule.from_expr(func)
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/frontend/tflite_flexbuffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, too-many-lines, import-outside-toplevel
"""Tensorflow lite frontend helper to parse custom options in Flexbuffer format."""
import struct
from enum import IntEnum
class BitWidth(IntEnum):
"""Flexbuffer bit width schema from flexbuffers.h"""
BIT_WIDTH_8 = 0
BIT_WIDTH_16 = 1
BIT_WIDTH_32 = 2
BIT_WIDTH_64 = 3
class FlexBufferType(IntEnum):
"""Flexbuffer type schema from flexbuffers.h"""
FBT_NULL = 0
FBT_INT = 1
FBT_UINT = 2
FBT_FLOAT = 3
# Types above stored inline, types below store an offset.
FBT_KEY = 4
FBT_STRING = 5
FBT_INDIRECT_INT = 6
FBT_INDIRECT_UINT = 7
FBT_INDIRECT_FLOAT = 8
FBT_MAP = 9
FBT_VECTOR = 10 # Untyped.
FBT_VECTOR_INT = 11 # Typed any size (stores no type table).
FBT_VECTOR_UINT = 12
FBT_VECTOR_FLOAT = 13
FBT_VECTOR_KEY = 14
FBT_VECTOR_STRING = 15
FBT_VECTOR_INT2 = 16 # Typed tuple (no type table, no size field).
FBT_VECTOR_UINT2 = 17
FBT_VECTOR_FLOAT2 = 18
FBT_VECTOR_INT3 = 19 # Typed triple (no type table, no size field).
FBT_VECTOR_UINT3 = 20
FBT_VECTOR_FLOAT3 = 21
FBT_VECTOR_INT4 = 22 # Typed quad (no type table, no size field).
FBT_VECTOR_UINT4 = 23
FBT_VECTOR_FLOAT4 = 24
FBT_BLOB = 25
FBT_BOOL = 26
FBT_VECTOR_BOOL = 36 # To Allow the same type of conversion of type to vector type
class FlexBufferDecoder(object):
"""
This implements partial flexbuffer deserialization to be able
to read custom options. It is not intended to be a general
purpose flexbuffer deserializer and as such only supports a
limited number of types and assumes the data is a flat map.
"""
def __init__(self, buffer):
self.buffer = buffer
def indirect_jump(self, offset, byte_width):
"""Helper function to read the offset value and jump"""
unpack_str = ""
if byte_width == 1:
unpack_str = "<B"
elif byte_width == 4:
unpack_str = "<i"
assert unpack_str != ""
back_jump = struct.unpack(unpack_str, self.buffer[offset : offset + byte_width])[0]
return offset - back_jump
def decode_keys(self, end, size, byte_width):
"""Decodes the flexbuffer type vector. Map keys are stored in this form"""
# Keys are strings here. The format is all strings separated by null, followed by back
# offsets for each of the string. For example, (str1)\0(str1)\0(offset1)(offset2) The end
# pointer is pointing at the end of all strings
keys = list()
for i in range(0, size):
offset_pos = end + i * byte_width
start_index = self.indirect_jump(offset_pos, byte_width)
str_size = self.buffer[start_index:].find(b"\0")
assert str_size != -1
s = self.buffer[start_index : start_index + str_size].decode("utf-8")
keys.append(s)
return keys
def decode_vector(self, end, size, byte_width):
"""Decodes the flexbuffer vector"""
# Each entry in the vector can have different datatype. Each entry is of fixed length. The
# format is a sequence of all values followed by a sequence of datatype of all values. For
# example - (4)(3.56)(int)(float) The end here points to the start of the values.
values = list()
for i in range(0, size):
value_type_pos = end + size * byte_width + i
value_type = FlexBufferType(self.buffer[value_type_pos] >> 2)
value_bytes = self.buffer[end + i * byte_width : end + (i + 1) * byte_width]
if value_type == FlexBufferType.FBT_BOOL:
value = bool(value_bytes[0])
elif value_type == FlexBufferType.FBT_INT:
value = struct.unpack("<i", value_bytes)[0]
elif value_type == FlexBufferType.FBT_UINT:
value = struct.unpack("<I", value_bytes)[0]
elif value_type == FlexBufferType.FBT_FLOAT:
value = struct.unpack("<f", value_bytes)[0]
else:
raise Exception
values.append(value)
return values
def decode_map(self, end, byte_width, parent_byte_width):
"""Decodes the flexbuffer map and returns a dict"""
mid_loc = self.indirect_jump(end, parent_byte_width)
map_size = struct.unpack("<i", self.buffer[mid_loc - byte_width : mid_loc])[0]
# Find keys
keys_offset = mid_loc - byte_width * 3
keys_end = self.indirect_jump(keys_offset, byte_width)
keys = self.decode_keys(keys_end, map_size, 1)
# Find values
values_end = self.indirect_jump(end, parent_byte_width)
values = self.decode_vector(values_end, map_size, byte_width)
return dict(zip(keys, values))
def decode(self):
"""Decode the buffer. Decoding is partially implemented"""
root_end = len(self.buffer) - 1
root_byte_width = self.buffer[root_end]
root_end -= 1
root_packed_type = self.buffer[root_end]
root_end -= root_byte_width
root_type = FlexBufferType(root_packed_type >> 2)
byte_width = 1 << BitWidth(root_packed_type & 3)
if root_type == FlexBufferType.FBT_MAP:
return self.decode_map(root_end, byte_width, root_byte_width)
raise NotImplementedError("Flexbuffer Decoding is partially imlpemented.")
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/function.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-import
"""The expression nodes of Relay."""
from __future__ import absolute_import
import tvm._ffi
from tvm.runtime import convert
from tvm.ir import BaseFunc
from .expr import Call
from . import _ffi_api
@tvm._ffi.register_object("relay.Function")
class Function(BaseFunc):
"""A function declaration expression.
Parameters
----------
params: List[tvm.relay.Var]
List of input parameters to the function.
body: tvm.relay.Expr
The body of the function.
ret_type: Optional[tvm.relay.Type]
The return type annotation of the function.
type_params: Optional[List[tvm.relay.TypeParam]]
The additional type parameters, this is only
used in advanced usecase of template functions.
"""
def __init__(self, params, body, ret_type=None, type_params=None, attrs=None):
if type_params is None:
type_params = convert([])
self.__init_handle_by_constructor__(
_ffi_api.Function, params, body, ret_type, type_params, attrs
)
def __call__(self, *args):
"""Invoke the global function.
Parameters
----------
args: List[relay.Expr]
Arguments.
"""
return Call(self, args, None, None)
@tvm._ffi.register_func("relay.FunctionWithFields")
def FunctionWithFields(
function,
params=None,
body=None,
ret_type=None,
ty_params=None,
attrs=None,
virtual_device=None,
span=None,
):
"""
Returns function with the given properties. A None property denotes 'no change'.
Returns function if all properties are unchanged. Otherwise, returns a copy with the new
fields.
"""
return _ffi_api.FunctionWithFields(
function, params, body, ret_type, ty_params, attrs, virtual_device, span
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/loops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Utilities for building Relay loops.
"""
from .scope_builder import ScopeBuilder
from . import expr as _expr
from . import function as _function
def while_loop(cond, loop_vars, loop_bodies):
"""
Construct a while loop.
Parameters
----------
cond: Callable[Tuple[relay.Expr], relay.Expr]
The condition of the loop.
loop_vars: Tuple[relay.Expr]
The variables being looped over.
The initial values of the loop, will be used to
construct the loop variables.
loop_bodies: Callable[Tuple[relay.Expr], Tuple[relay.Expr]]
The body of the loop, should be a function which
given loop variables produces the output result
also as a tuple
Returns
-------
loop: relay.Expr
The loop expression.
"""
sb = ScopeBuilder()
loop = _expr.Var("while_loop")
fresh_vars = []
for i, loop_var in enumerate(loop_vars):
name = loop_var.name_hint if isinstance(loop_var, _expr.Var) else "arg{}".format(i)
new_var = _expr.var(name, type_annotation=sb.type_of(loop_var))
fresh_vars.append(new_var)
with sb.if_scope(cond(*fresh_vars)):
sb.ret(loop(*loop_bodies(*fresh_vars)))
with sb.else_scope():
sb.ret(_expr.Tuple(fresh_vars))
func = _function.Function(fresh_vars, sb.get())
let = _expr.Let(loop, func, loop)
return let
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""Relay core operators."""
# operator defs
from .op import (
get,
register_compute,
register_gradient,
register_pattern,
register_alter_op_layout,
register_legalize,
OpPattern,
OpStrategy,
debug,
register_external_compiler,
register_fake_quantization_to_integer,
register_mixed_precision_conversion,
)
from . import strategy
# Operators
from .reduce import *
from .tensor import *
from .transform import *
from .algorithm import *
from . import vm
from . import nn
from . import annotation
from . import memory
from . import image
from . import vision
from . import op_attrs
from . import random
# operator registry
from . import _tensor
from . import _tensor_grad
from . import _transform
from . import _reduce
from . import _algorithm
from . import _math
def _register_op_make():
# pylint: disable=import-outside-toplevel
from . import _make
from .. import expr
expr._op_make = _make
_register_op_make()
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_algorithm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Definition of classic algorithms"
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from . import strategy
from . import op as _reg
from .op import OpPattern, register_pattern
from .op import register_strategy, register_shape_func
from ._tensor import elemwise_shape_func
# sort
register_strategy("sort", strategy.sort_strategy)
register_pattern("sort", OpPattern.OPAQUE)
register_shape_func("sort", False, elemwise_shape_func)
# argsort
register_strategy("argsort", strategy.argsort_strategy)
register_pattern("argsort", OpPattern.OPAQUE)
register_shape_func("argsort", False, elemwise_shape_func)
# topk
register_strategy("topk", strategy.topk_strategy)
register_pattern("topk", OpPattern.OPAQUE)
# searchsorted
register_strategy("searchsorted", strategy.searchsorted_strategy)
register_pattern("searchsorted", OpPattern.OPAQUE)
@script
def _topk_shape_func_input_shape(data_shape, k, axis):
ndim = data_shape.shape[0]
val_out = output_tensor((ndim,), "int64")
indices_out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
val_out[i] = int64(data_shape[i])
indices_out[i] = int64(data_shape[i])
else:
if k < 1:
val_out[i] = int64(data_shape[i])
indices_out[i] = int64(data_shape[i])
else:
val_out[i] = int64(k)
indices_out[i] = int64(k)
return val_out, indices_out
@_reg.register_shape_func("topk", False)
def topk_shape_func(attrs, inputs, _):
"""
Shape func for topk.
"""
axis = attrs.axis
if axis < 0:
axis += inputs[0].shape[0]
val_out, indices_out = _topk_shape_func_input_shape(inputs[0], attrs.k, convert(axis))
ret_type = attrs.ret_type
if ret_type == "both":
ret = [val_out, indices_out]
elif ret_type == "values":
ret = [val_out]
else:
ret = [indices_out]
return ret
@script
def _searchsorted_shape(sorted_sequence_shape, values_shape):
out_shape = output_tensor((values_shape.shape[0],), "int64")
if sorted_sequence_shape.shape[0] > 1:
assert (
sorted_sequence_shape.shape[0] == values_shape.shape[0]
), "Ranks of `sorted_sequence` and values must be the same if `sorted_sequence` is not 1-D."
for i in range(values_shape.shape[0]):
if sorted_sequence_shape.shape[0] > 1 and i < values_shape.shape[0] - 1:
assert (
sorted_sequence_shape[i] == values_shape[i]
), "`sorted_sequence and `values` do not have the same shape along outer axes."
out_shape[i] = values_shape[i]
return out_shape
@_reg.register_shape_func("searchsorted", False)
def searchsorted_shape_func(attrs, inputs, _):
"""
Shape func for searchsorted operator.
"""
return [_searchsorted_shape(inputs[0], inputs[1])]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
from . import op as _reg
from . import strategy
# einsum
_reg.register_strategy("einsum", strategy.einsum_strategy)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm.runtime import convert
from tvm.te.hybrid import script
from tvm.topi.utils import get_const_int, get_const_tuple
from . import op as _reg
_reg.register_reduce_schedule("argmax")
_reg.register_reduce_schedule("argmin")
_reg.register_reduce_schedule("sum")
_reg.register_reduce_schedule("all")
_reg.register_reduce_schedule("any")
_reg.register_reduce_schedule("max")
_reg.register_reduce_schedule("min")
_reg.register_reduce_schedule("prod")
_reg.register_reduce_schedule("mean")
_reg.register_reduce_schedule("variance")
def _create_axis_record(attrs, inputs):
axes = attrs.axis if attrs.axis is None else list(get_const_tuple(attrs.axis))
exclude = get_const_int(attrs.exclude) > 0
keepdims = get_const_int(attrs.keepdims) > 0
data_shape = inputs[0]
shape_size = data_shape.shape[0].value
axis_record = [-1] * shape_size
if axes is None:
axes = list(range(shape_size))
for i, axis in enumerate(axes):
if axis < 0:
axes[i] = shape_size + axis
if exclude:
ex_axes = []
for i in range(shape_size):
if i not in axes:
ex_axes.append(i)
axes = ex_axes
for i in range(shape_size):
if i not in axes:
axis_record[i] = i
if not keepdims:
tmp = []
for i in axis_record:
if i >= 0:
tmp.append(i)
axis_record = tmp
return axis_record
@script
def _reduce_shape_func(data_shape, axis_record):
out = output_tensor((len(axis_record),), "int64")
for i in const_range(len(axis_record)):
if axis_record[i] >= 0:
out[i] = data_shape[axis_record[i]]
else:
out[i] = int64(1)
return out
def reduce_shape_func(attrs, inputs, _):
"""
Shape function for reduce op.
"""
axis_record = _create_axis_record(attrs, inputs)
return [_reduce_shape_func(inputs[0], convert(axis_record))]
_reg.register_shape_func("argmax", False, reduce_shape_func)
_reg.register_shape_func("argmin", False, reduce_shape_func)
_reg.register_shape_func("all", False, reduce_shape_func)
_reg.register_shape_func("sum", False, reduce_shape_func)
_reg.register_shape_func("max", False, reduce_shape_func)
_reg.register_shape_func("min", False, reduce_shape_func)
_reg.register_shape_func("prod", False, reduce_shape_func)
_reg.register_shape_func("mean", False, reduce_shape_func)
_reg.register_shape_func("variance", False, reduce_shape_func)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, len-as-condition
"""Backend compiler related feature registration"""
from tvm.te.hybrid import script
from tvm import topi
from tvm.runtime import convert
from .op import register_compute, register_shape_func, register_legalize
from .op import register_broadcast_schedule, register_injective_schedule
from .op import register_pattern, OpPattern
register_broadcast_schedule("log")
register_broadcast_schedule("log2")
register_broadcast_schedule("log10")
register_broadcast_schedule("tan")
register_broadcast_schedule("cos")
register_broadcast_schedule("cosh")
register_broadcast_schedule("sin")
register_broadcast_schedule("sinh")
register_broadcast_schedule("acos")
register_broadcast_schedule("acosh")
register_broadcast_schedule("asin")
register_broadcast_schedule("asinh")
register_broadcast_schedule("atan")
register_broadcast_schedule("atanh")
register_broadcast_schedule("exp")
register_broadcast_schedule("erf")
register_broadcast_schedule("sqrt")
register_broadcast_schedule("rsqrt")
register_broadcast_schedule("sigmoid")
register_broadcast_schedule("floor")
register_broadcast_schedule("ceil")
register_broadcast_schedule("trunc")
register_broadcast_schedule("round")
register_broadcast_schedule("sign")
register_broadcast_schedule("abs")
register_broadcast_schedule("tanh")
register_broadcast_schedule("add")
register_broadcast_schedule("subtract")
register_broadcast_schedule("multiply")
register_broadcast_schedule("divide")
register_broadcast_schedule("floor_divide")
register_broadcast_schedule("trunc_divide")
register_broadcast_schedule("power")
register_broadcast_schedule("copy")
register_broadcast_schedule("logical_not")
register_broadcast_schedule("logical_and")
register_broadcast_schedule("logical_or")
register_broadcast_schedule("logical_xor")
register_broadcast_schedule("bitwise_not")
register_broadcast_schedule("bitwise_and")
register_broadcast_schedule("bitwise_or")
register_broadcast_schedule("bitwise_xor")
register_broadcast_schedule("negative")
register_broadcast_schedule("mod")
register_broadcast_schedule("floor_mod")
register_broadcast_schedule("trunc_mod")
register_broadcast_schedule("equal")
register_broadcast_schedule("not_equal")
register_broadcast_schedule("less")
register_broadcast_schedule("less_equal")
register_broadcast_schedule("greater")
register_broadcast_schedule("greater_equal")
register_broadcast_schedule("isnan")
register_broadcast_schedule("isfinite")
register_broadcast_schedule("isinf")
register_injective_schedule("maximum")
register_injective_schedule("minimum")
register_injective_schedule("right_shift")
register_injective_schedule("left_shift")
register_injective_schedule("shape_of")
register_injective_schedule("ndarray_size")
register_injective_schedule("device_copy")
register_broadcast_schedule("fast_exp")
register_broadcast_schedule("fast_tanh")
register_broadcast_schedule("fast_erf")
@register_legalize("erf")
def legalize_erf(attrs, inputs, types):
"""Legalize ERF op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.math.erf_legalize(attrs, inputs, types)
# zeros
@register_compute("zeros")
def zeros_compute(attrs, inputs, output_type):
assert not inputs
return [topi.full(output_type.shape, output_type.dtype, 0.0)]
register_broadcast_schedule("zeros")
register_pattern("zeros", OpPattern.ELEMWISE)
# zeros_like
@register_compute("zeros_like")
def zeros_like_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full_like(inputs[0], 0.0)]
register_broadcast_schedule("zeros_like")
# ones
@register_compute("ones")
def ones_compute(attrs, inputs, output_type):
assert not inputs
return [topi.full(output_type.shape, output_type.dtype, 1.0)]
register_broadcast_schedule("ones")
register_pattern("ones", OpPattern.ELEMWISE)
# ones_like
@register_compute("ones_like")
def ones_like_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full_like(inputs[0], 1.0)]
register_broadcast_schedule("ones_like")
# clip
@register_compute("clip")
def clip_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.clip(inputs[0], attrs.a_min, attrs.a_max)]
register_injective_schedule("clip")
# fixed point multiply
@register_compute("fixed_point_multiply")
def fixed_point_multiply_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.fixed_point_multiply(inputs[0], attrs.multiplier, attrs.shift)]
register_injective_schedule("fixed_point_multiply")
# per-channel/per-axis fixed point multiply
@register_compute("fixed_point_multiply_per_axis")
def fixed_point_multiply_per_axis_compute(attrs, inputs, output_type):
assert len(inputs) == 4
return [
topi.fixed_point_multiply_per_axis(
*inputs, attrs.is_lshift_required, attrs.is_rshift_required, attrs.axes
)
]
register_broadcast_schedule("fixed_point_multiply_per_axis")
# full
@script
def _full_shape_func(shape):
out_ndim = shape.shape[0]
out = output_tensor((out_ndim,), "int64")
for i in const_range(out_ndim):
out[i] = int64(shape[i])
return out
@script
def _convert_shape(shape):
out = output_tensor((len(shape),), "int64")
for i in const_range(len(shape)):
out[i] = int64(shape[i])
return out
def full_shape_func(attrs, inputs, out_ndims):
"""
Shape func for full.
"""
if len(inputs) > 1:
return [_full_shape_func(inputs[1])]
return [_convert_shape(convert(attrs.shape))]
def no_data_full_shape_func(attrs, inputs, out_ndims):
"""
Shape func for zeros and ones.
"""
if len(inputs) == 0:
return [_convert_shape(convert(attrs.shape))]
return [_full_shape_func(inputs[0])]
@script
def _broadcast_shape_func(x, y, ndim):
out = output_tensor((ndim,), "int64")
if len(x.shape) == 0:
for i in const_range(ndim):
out[i] = y[i]
elif len(y.shape) == 0:
for i in const_range(ndim):
out[i] = x[i]
else:
ndim1 = x.shape[0]
ndim2 = y.shape[0]
for i in const_range(1, min(ndim1, ndim2) + 1):
if x[ndim1 - i] == y[ndim2 - i]:
out[ndim - i] = x[ndim1 - i]
elif x[ndim1 - i] == 1:
out[ndim - i] = y[ndim2 - i]
else:
assert y[ndim2 - i] == 1, "Incompatible broadcast type %s and %s" % (
x[ndim1 - i],
y[ndim2 - i],
)
out[ndim - i] = x[ndim1 - i]
for i in const_range(min(ndim1, ndim2) + 1, ndim + 1):
if ndim1 >= ndim2:
out[ndim - i] = x[ndim1 - i]
else:
out[ndim - i] = y[ndim2 - i]
return out
def broadcast_shape_func(attrs, inputs, out_ndims):
"""
Shape function for broadcast op.
"""
return [_broadcast_shape_func(*inputs, out_ndims[0])]
def elemwise_shape_func(attrs, inputs, _):
"""
Shape function for elemwise op.
"""
return [topi.math.identity(inputs[0])]
register_shape_func("cast", False, elemwise_shape_func)
register_shape_func("cast_like", False, elemwise_shape_func)
register_shape_func("round", False, elemwise_shape_func)
register_shape_func("zeros", False, no_data_full_shape_func)
register_shape_func("zeros_like", False, elemwise_shape_func)
register_shape_func("ones", False, no_data_full_shape_func)
register_shape_func("ones_like", False, elemwise_shape_func)
register_shape_func("full", False, full_shape_func)
register_shape_func("full_like", False, elemwise_shape_func)
register_shape_func("broadcast_to", True, full_shape_func)
register_shape_func("add", False, broadcast_shape_func)
register_shape_func("subtract", False, broadcast_shape_func)
register_shape_func("multiply", False, broadcast_shape_func)
register_shape_func("divide", False, broadcast_shape_func)
register_shape_func("floor_divide", False, broadcast_shape_func)
register_shape_func("trunc_divide", False, broadcast_shape_func)
register_shape_func("power", False, broadcast_shape_func)
register_shape_func("mod", False, broadcast_shape_func)
register_shape_func("floor_mod", False, broadcast_shape_func)
register_shape_func("trunc_mod", False, broadcast_shape_func)
register_shape_func("logical_and", False, broadcast_shape_func)
register_shape_func("logical_or", False, broadcast_shape_func)
register_shape_func("logical_xor", False, broadcast_shape_func)
register_shape_func("bitwise_not", False, broadcast_shape_func)
register_shape_func("bitwise_and", False, broadcast_shape_func)
register_shape_func("bitwise_or", False, broadcast_shape_func)
register_shape_func("bitwise_xor", False, broadcast_shape_func)
register_shape_func("equal", False, broadcast_shape_func)
register_shape_func("not_equal", False, broadcast_shape_func)
register_shape_func("less", False, broadcast_shape_func)
register_shape_func("less_equal", False, broadcast_shape_func)
register_shape_func("greater", False, broadcast_shape_func)
register_shape_func("greater_equal", False, broadcast_shape_func)
register_shape_func("maximum", False, broadcast_shape_func)
register_shape_func("minimum", False, broadcast_shape_func)
register_shape_func("left_shift", False, broadcast_shape_func)
register_shape_func("right_shift", False, broadcast_shape_func)
register_shape_func("sqrt", False, elemwise_shape_func)
register_shape_func("rsqrt", False, elemwise_shape_func)
register_shape_func("negative", False, elemwise_shape_func)
register_shape_func("exp", False, elemwise_shape_func)
register_shape_func("tan", False, elemwise_shape_func)
register_shape_func("fast_exp", False, elemwise_shape_func)
register_shape_func("fast_tanh", False, elemwise_shape_func)
register_shape_func("fast_erf", False, elemwise_shape_func)
register_shape_func("floor", False, elemwise_shape_func)
register_shape_func("log", False, elemwise_shape_func)
register_shape_func("device_copy", False, elemwise_shape_func)
register_shape_func("clip", False, elemwise_shape_func)
register_shape_func("log2", False, elemwise_shape_func)
register_shape_func("sigmoid", False, elemwise_shape_func)
register_shape_func("tanh", False, elemwise_shape_func)
register_shape_func("logical_not", False, elemwise_shape_func)
register_shape_func("ceil", False, elemwise_shape_func)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_tensor_grad.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Gradient definitions for Relay operators"""
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
from tvm.error import OpError
from ..expr import Tuple, TupleGetItem, const, Var
from ..ty import TensorType
from ..loops import while_loop
from . import nn as _nn
from .op import register_gradient
from .reduce import sum as _sum
from .tensor import (
cos,
cosh,
exp,
less,
negative,
ones_like,
power,
sin,
sinh,
sqrt,
zeros_like,
equal,
shape_of,
log,
concatenate,
)
from .transform import (
broadcast_to_like,
collapse_sum_like,
cast_like,
reshape,
reshape_like,
strided_slice,
take,
transpose,
where,
repeat,
expand_dims,
full_like,
split,
squeeze,
strided_set,
arange,
scatter_nd,
)
@register_gradient("log")
def log_grad(orig, grad):
"""Returns [grad * (1 / x)]"""
x = orig.args[0]
return [grad * ones_like(x) / x]
@register_gradient("log2")
def log2_grad(orig, grad):
"""Returns [grad * 1 / (log(2) * x)]"""
x = orig.args[0]
ones = ones_like(x)
two = const(2.0, dtype=x.checked_type.dtype)
return [grad * ones / (log(two) * x)]
@register_gradient("log10")
def log10_grad(orig, grad):
"""Returns [grad * 1 / (log(10) * x)]"""
x = orig.args[0]
ones = ones_like(x)
ten = const(10.0, dtype=x.checked_type.dtype)
return [grad * ones / (log(ten) * x)]
@register_gradient("tan")
def tan_grad(orig, grad):
"""Returns [grad / (cos^2(x))]"""
x = orig.args[0]
return [grad / (cos(x) * cos(x))]
@register_gradient("cos")
def cos_grad(orig, grad):
"""Returns [grad * (-sin(x))]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * (-ones * sin(x))]
@register_gradient("cosh")
def cosh_grad(orig, grad):
"""Returns [grad * sinh(x)]"""
x = orig.args[0]
return [grad * sinh(x)]
@register_gradient("sin")
def sin_grad(orig, grad):
"""Returns [grad * cos(x)]"""
x = orig.args[0]
return [grad * cos(x)]
@register_gradient("sinh")
def sinh_grad(orig, grad):
"""Returns [grad * cosh(x)]"""
x = orig.args[0]
return [grad * cosh(x)]
@register_gradient("acos")
def acos_grad(orig, grad):
"""Returns [grad * -1/((1 - (x ^ 2)) ^ 1/2)]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * (-ones / sqrt(ones - (x * x)))]
@register_gradient("acosh")
def acosh_grad(orig, grad):
"""Returns [grad * 1/((x - 1) ^ 1/2 * (x + 1) ^ 1/2)]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / sqrt((x * x) - ones)]
@register_gradient("asin")
def asin_grad(orig, grad):
"""Returns [grad * 1/((1 - (x ^ 2)) ^ (1/2))]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / sqrt(ones - (x * x))]
@register_gradient("asinh")
def asinh_grad(orig, grad):
"""Returns [grad * 1/((1 + (x ^ 2)) ^ (1/2))]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / sqrt(ones + (x * x))]
@register_gradient("atan")
def atan_grad(orig, grad):
"""Returns [grad * 1 / (1 + x ^ 2)]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / (ones + (x * x))]
@register_gradient("atanh")
def atanh_grad(orig, grad):
"""Returns [grad * 1 / (1 - x ^ 2)]"""
x = orig.args[0]
ones = ones_like(x)
return [grad * ones / (ones - (x * x))]
@register_gradient("exp")
def exp_grad(orig, grad):
"""Returns [grad * exp(x)]"""
return [grad * exp(orig.args[0])]
@register_gradient("sqrt")
def sqrt_grad(orig, grad):
"""Returns [grad * 0.5 * (x ^ -0.5)]"""
x = orig.args[0]
a = const(0.5, dtype=x.checked_type.dtype)
return [grad * a * power(x, negative(a))]
@register_gradient("sigmoid")
def sigmoid_grad(orig, grad):
"""Returns [grad * sigmoid(x) * (1 - sigmoid(x))]."""
return [grad * orig * (ones_like(orig) - orig)]
@register_gradient("tanh")
def tanh_grad(orig, grad):
"""Returns grad * (1 - tanh(x) * tanh(x))."""
return [grad * (ones_like(orig) - orig * orig)]
@register_gradient("nn.relu")
def relu_grad(orig, grad):
"""Returns grad * (select(x < 0, 0, 1))."""
x = orig.args[0]
zeros = zeros_like(x)
ones = ones_like(x)
return [where(less(x, zeros), zeros, ones * grad)]
@register_gradient("add")
def add_grad(orig, grad):
"""Returns [grad, grad]"""
return [collapse_sum_like(grad, orig.args[0]), collapse_sum_like(grad, orig.args[1])]
@register_gradient("subtract")
def subtract_grad(orig, grad):
"""Returns [grad, -grad]"""
return [collapse_sum_like(grad, orig.args[0]), collapse_sum_like(negative(grad), orig.args[1])]
@register_gradient("multiply")
def multiply_grad(orig, grad):
"""Returns [grad * y, grad * x]"""
x, y = orig.args
return [collapse_sum_like(grad * y, x), collapse_sum_like(grad * x, y)]
@register_gradient("divide")
def divide_grad(orig, grad):
"""Returns [grad / y, - grad * (x / y) / y]"""
x, y = orig.args
return [collapse_sum_like(grad / y, x), collapse_sum_like(-(grad * orig / y), y)]
@register_gradient("zeros")
def zeros_grad(orig, grad):
"""Returns []"""
return []
@register_gradient("dyn.zeros")
def dyn_zeros_grad(orig, grad):
"""Returns the gradient of dyn.zeros which is just zero."""
assert len(orig.args) == 1
return [zeros_like(orig.args[0])]
@register_gradient("ones")
def ones_grad(orig, grad):
"""Returns []"""
return []
@register_gradient("dyn.ones")
def dyn_ones_grad(orig, grad):
"""Returns the gradient of dyn.ones which is just zero."""
assert len(orig.args) == 1
return [zeros_like(orig.args[0])]
@register_gradient("zeros_like")
def zeros_like_grad(orig, grad):
"""Returns [0]"""
return [orig]
@register_gradient("ones_like")
def ones_like_grad(orig, grad):
"""Returns [0]"""
return [zeros_like(orig.args[0])]
@register_gradient("collapse_sum_like")
def collapse_sum_like_grad(orig, grad):
"""Returns [broadcast_to_like(grad, x), 0]"""
x, y = orig.args
return [broadcast_to_like(grad, x), zeros_like(y)]
@register_gradient("collapse_sum_to")
def collapse_sum_to_grad(orig, grad):
"""Returns [broadcast_to_like(grad, x), 0]"""
x, y = orig.args
return [broadcast_to_like(grad, x), zeros_like(y)]
@register_gradient("abs")
def abs_grad(orig, grad):
"""Returns grad * (select(x < 0, -1, 1))."""
x = orig.args[0]
zeros = zeros_like(x)
ones = ones_like(x)
return [where(less(x, zeros), -ones * grad, ones * grad)]
@register_gradient("erf")
def erf_grad(orig, grad):
# c_2_div_sqrt_pi = 2.0 / math.sqrt(math.pi)
(inp,) = orig.args
c_2_div_sqrt_pi = const(1.1283791670955126, dtype=inp.checked_type.dtype)
return [c_2_div_sqrt_pi * exp(-inp * inp) * grad]
@register_gradient("clip")
def clip_grad(orig, grad):
"""Returns grad * (select(x < min || max < x , 0, 1))."""
x = orig.args[0]
a_min = orig.attrs.get_int("a_min")
a_max = orig.attrs.get_int("a_max")
a_mins = broadcast_to_like(const(a_min, dtype=x.checked_type.dtype), x)
a_maxs = broadcast_to_like(const(a_max, dtype=x.checked_type.dtype), x)
zeros = zeros_like(x)
ones = ones_like(x)
return [where(less(x, a_mins), zeros, where(less(a_maxs, x), zeros, ones * grad))]
@register_gradient("nn.max_pool2d")
def max_pool2d_grad(orig, grad):
"""Returns the gradient of max_pool2d."""
attrs = orig.attrs
pool_grad = _nn.max_pool2d_grad(
grad,
orig.args[0],
pool_size=attrs.pool_size,
strides=attrs.strides,
padding=attrs.padding,
layout=attrs.layout,
ceil_mode=attrs.ceil_mode,
)
return [pool_grad]
@register_gradient("nn.avg_pool2d")
def avg_pool2d_grad(orig, grad):
"""Returns the gradient of avg_pool2d."""
attrs = orig.attrs
pool_grad = _nn.avg_pool2d_grad(
grad,
orig.args[0],
pool_size=attrs.pool_size,
strides=attrs.strides,
padding=attrs.padding,
layout=attrs.layout,
ceil_mode=attrs.ceil_mode,
count_include_pad=attrs.count_include_pad,
)
return [pool_grad]
@register_gradient("nn.global_avg_pool2d")
def global_avg_pool2d_grad(orig, grad):
"""Returns the gradient of global_avg_pool2d."""
data = orig.args[0]
shape = data.checked_type.shape
layout = orig.attrs.layout
# we assume NCHW or NHWC layout for now, but easy to add more
assert layout in ["NCHW", "NHWC"]
if layout == "NCHW":
pool_size = shape[2], shape[3]
elif layout == "NHWC":
pool_size = shape[1], shape[2]
pool_grad = _nn.avg_pool2d_grad(
grad, data, pool_size=pool_size, strides=(1, 1), padding=(0, 0), layout=layout
)
return [pool_grad]
@register_gradient("concatenate")
def concatenate_grad(orig, grad):
"""
Returns the gradient of concatenate, which is just the downstream gradient
split across the inputs.
"""
assert len(orig.args) == 1
t = orig.args[0]
# calculate split indices. TODO(@altanh): support Any?
axis_dims = [ty.shape[orig.attrs.axis] for ty in t.checked_type.fields]
splits, cumsum = [], 0
for dim in axis_dims[:-1]:
cumsum += dim
splits.append(cumsum)
grads = split(grad, tuple(splits), axis=orig.attrs.axis).tuple_value
return [grads]
@register_gradient("nn.conv2d")
def conv2d_grad(orig, grad):
"""Gradient of conv2d"""
attrs = orig.attrs
data, weight = orig.args
data_shape = get_const_tuple(data.checked_type.shape)
weight_shape = get_const_tuple(weight.checked_type.shape)
_, _, grad_h, grad_w = get_const_tuple(orig.checked_type.shape)
_, _, in_h, in_w = data_shape
_, _, filter_h, filter_w = weight_shape
# infer output_padding
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
get_const_tuple(attrs.padding), (filter_h, filter_w)
)
stride_h, stride_w = get_const_tuple(attrs.strides)
out_h = (grad_h - 1) * stride_h - fpad_top - fpad_bottom + filter_h
out_w = (grad_w - 1) * stride_w - fpad_left - fpad_right + filter_w
output_padding = (in_h - out_h, in_w - out_w)
assert attrs.data_layout == "NCHW", "only support NCHW data layout"
assert attrs.kernel_layout == "OIHW", "only support OIHW kernel layout"
assert attrs.out_layout in ["", "NCHW"], "only support NCHW output layout"
if attrs.out_dtype in ["", None]:
assert data.checked_type, "Call InferType first."
out_dtype = data.checked_type.dtype
else:
out_dtype = attrs.out_dtype
backward_data = _nn.conv2d_transpose(
grad,
weight,
strides=attrs.strides,
padding=attrs.padding,
dilation=attrs.dilation,
groups=attrs.groups,
output_padding=output_padding,
out_dtype=out_dtype,
)
backward_weight = _nn.conv2d_backward_weight(
grad,
data,
strides=attrs.strides,
padding=attrs.padding,
dilation=attrs.dilation,
groups=attrs.groups,
channels=attrs.channels,
kernel_size=(filter_h, filter_w),
grad_layout=attrs.out_layout if attrs.out_layout else attrs.data_layout,
data_layout=attrs.data_layout,
kernel_layout=attrs.kernel_layout,
out_dtype=out_dtype,
)
return [backward_data, backward_weight]
def _get_reduce_axis(call):
"""Helper function that returns the reduce axis of the call as plain python ints."""
x, axis = call.args[0], call.attrs.axis
shape = x.checked_type.concrete_shape
# should never exclude when axis is None
assert not (axis is None and call.attrs.exclude)
if axis is None:
return None
# convert to nonnegative integers and sort
axis = sorted([ax if ax >= 0 else len(shape) + ax for ax in map(int, axis)])
if call.attrs.exclude:
axis = [ax for ax in range(len(shape)) if ax not in axis]
return axis
def _unreduce_expand(x, axis):
"""Helper function that returns x expanded on the reduced dimensions in axis."""
# assume axis is sorted nonnegative ints
for ax in axis:
x = expand_dims(x, ax)
return x
@register_gradient("max")
def max_grad(orig, grad):
"""Returns the gradient of max"""
x, axis = orig.args[0], _get_reduce_axis(orig)
shape = x.checked_type.concrete_shape
repeated = orig
if axis is None:
repeated = full_like(x, repeated)
else:
# expand dims (if necessary) and repeat along each axis
if not orig.attrs.keepdims:
repeated = _unreduce_expand(repeated, axis)
grad = _unreduce_expand(grad, axis)
for ax in axis:
repeated = repeat(repeated, shape[ax], ax)
indicators = cast_like(equal(repeated, x), grad)
num_selected = _sum(indicators, axis, keepdims=True)
# spread error across all max weights
return [indicators * grad / num_selected]
@register_gradient("nn.softmax")
def softmax_grad(orig, grad):
"""Gradient of softmax"""
return [(grad - _sum(grad * orig, orig.attrs.axis, True)) * orig]
@register_gradient("nn.log_softmax")
def log_softmax_grad(orig, grad):
"""Gradient of log_softmax"""
return [grad - _sum(grad, axis=orig.attrs.axis, keepdims=True) * exp(orig)]
@register_gradient("nn.bias_add")
def bias_add_grad(orig, grad):
"""Returns gradient of bias_add"""
data = orig.args[0]
return [
collapse_sum_like(grad, data),
_sum(grad, orig.attrs.axis, keepdims=False, exclude=True),
]
@register_gradient("nn.dense")
def dense_grad(orig, grad):
"""Returns [grad' @ weight, data @ grad']"""
data, weight = orig.args
return [
collapse_sum_like(
_nn.dense(grad, transpose(weight), units=weight.checked_type.shape[1]), data
),
collapse_sum_like(
_nn.dense(transpose(grad), transpose(data), units=data.checked_type.shape[1]), weight
),
]
@register_gradient("nn.matmul")
def matmul_grad(orig, grad):
"""Returns [grad' @ tensor_b, tensor_a @ grad']"""
tensor_a, tensor_b = orig.args
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (True, True):
return [
collapse_sum_like(
_nn.matmul(tensor_b, grad, transpose_a=True, transpose_b=True), tensor_a
),
collapse_sum_like(
_nn.matmul(grad, tensor_a, transpose_a=True, transpose_b=True), tensor_b
),
]
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (True, False):
return [
collapse_sum_like(_nn.matmul(tensor_b, grad, transpose_b=True), tensor_a),
collapse_sum_like(_nn.matmul(tensor_a, grad), tensor_b),
]
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (False, True):
# Keep using Dense op here for not involving extra ops
# TODO(jcf94): Merge all to nn.matmul when it is finally ready
return dense_grad(orig, grad)
# (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (False, False)
return [
collapse_sum_like(_nn.matmul(grad, tensor_b, transpose_b=True), tensor_a),
collapse_sum_like(_nn.matmul(tensor_a, grad, transpose_a=True), tensor_b),
]
@register_gradient("nn.batch_matmul")
def batch_matmul_grad(orig, grad):
"""gradient for nn.batch_matmul: in einsum LHS_bik,RHS_bjk->RES_bij
grads: GRAD_OUT_bij,RHS_bjk->GRAD_IN_LHS_bik
GRAD_OUT_bij,LHS_bik->GRAD_IN_RHS_bjk
"""
lhs, rhs = orig.args
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (True, True):
# ki, jk -> ij
# jk, ij -> ki
# ij, ki -> jk
return [
collapse_sum_like(_nn.batch_matmul(rhs, grad, transpose_a=True, transpose_b=True), lhs),
collapse_sum_like(_nn.batch_matmul(grad, lhs, transpose_a=True, transpose_b=True), rhs),
]
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (True, False):
# ki, kj -> ij
# kj, ij -> ki
# ki, ij -> kj
return [
collapse_sum_like(
_nn.batch_matmul(rhs, grad, transpose_a=False, transpose_b=True), lhs
),
collapse_sum_like(
_nn.batch_matmul(lhs, grad, transpose_a=False, transpose_b=False), rhs
),
]
if (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (False, True):
# ik, jk -> ij
# ij, jk -> ik
# ij, ik -> jk
# Keep using NT format batch_matmul here for not involving extra ops
# TODO(jcf94): Merge all to normal batch_matmul when it is finally ready
return [
collapse_sum_like(
_nn.batch_matmul(
grad,
transpose(rhs, [0, 2, 1]),
transpose_a=False,
transpose_b=True,
),
lhs,
),
collapse_sum_like(
_nn.batch_matmul(
transpose(grad, [0, 2, 1]),
transpose(lhs, [0, 2, 1]),
transpose_a=False,
transpose_b=True,
),
rhs,
),
]
# (orig.attrs["transpose_a"], orig.attrs["transpose_b"]) == (False, False)
# ik, kj -> ij
# ij, kj -> ik
# ik, ij -> kj
return [
collapse_sum_like(_nn.batch_matmul(grad, rhs, transpose_a=False, transpose_b=True), lhs),
collapse_sum_like(_nn.batch_matmul(lhs, grad, transpose_a=True, transpose_b=False), rhs),
]
@register_gradient("reshape")
def reshape_grad(orig, grad):
"""Gradient of reshape"""
return [reshape_like(grad, orig.args[0])]
@register_gradient("dyn.reshape")
def dyn_reshape_grad(orig, grad):
"""Gradient of dyn_reshape"""
return [reshape_like(grad, orig.args[0]), zeros_like(orig.args[1])]
@register_gradient("shape_of")
def shape_of_grad(orig, grad):
"""Gradient of shape_of"""
return [zeros_like(orig.args[0])]
@register_gradient("cast")
def cast_grad(orig, grad):
x = orig.args[0]
return [cast_like(grad, x)]
@register_gradient("cast_like")
def cast_like_grad(orig, grad):
x, like = orig.args
return [cast_like(grad, x), zeros_like(like)]
@register_gradient("nn.batch_flatten")
def batch_flatten_grad(orig, grad):
"""Returns grad reshaped to data dims"""
data = orig.args[0]
return [reshape_like(grad, data)]
@register_gradient("transpose")
def transpose_grad(orig, grad):
"""Returns grad transposed over the complement of original transpose axes"""
orig_axes = orig.attrs.axes
if orig_axes:
dims = len(orig_axes)
new_axes = [0] * dims
for i in range(dims):
new_axes[int(orig_axes[i])] = i
else:
new_axes = None
return [transpose(grad, axes=new_axes)]
@register_gradient("negative")
def negative_grad(orig, grad):
"""Returns -grad"""
return [-grad]
@register_gradient("sum")
def sum_grad(orig, grad):
"""Returns grad broadcasted to data dims"""
data, axis = orig.args[0], _get_reduce_axis(orig)
if not orig.attrs.keepdims:
if axis is None:
axis = list(range(len(data.checked_type.concrete_shape)))
grad = _unreduce_expand(grad, axis)
return [broadcast_to_like(grad, data)]
@register_gradient("mean")
def mean_grad(orig, grad):
"""Returns grad broadcasted to data dims"""
data, axis = orig.args[0], _get_reduce_axis(orig)
shape = data.checked_type.concrete_shape
if axis is None:
axis = list(range(len(data.checked_type.concrete_shape)))
if not orig.attrs.keepdims:
grad = _unreduce_expand(grad, axis)
mult = 1.0
for a in axis:
mult /= shape[a]
return [broadcast_to_like(grad * const(mult, dtype=data.checked_type.dtype), data)]
@register_gradient("variance")
def variance_grad(orig, grad):
"""Note that we take mean as an argument in the variance node"""
data, data_mean, axis = orig.args[0], orig.args[1], _get_reduce_axis(orig)
unbiased = orig.attrs.unbiased
shape = data.checked_type.concrete_shape
if axis is None:
axis = list(range(len(data.checked_type.concrete_shape)))
if not orig.attrs.keepdims:
grad = _unreduce_expand(grad, axis)
mult1 = 2.0
mult2 = -2.0
count = 1
for a in axis:
count *= shape[a]
if unbiased:
mult2 = mult2 * count / (count - 1)
count -= 1
mult1 /= count
return [
(grad * const(mult1, dtype=data.checked_type.dtype)) * data,
const(mult2, dtype=data.checked_type.dtype) * grad * data_mean,
]
@register_gradient("copy")
def copy_grad(orig, grad):
return [grad]
@register_gradient("nn.cross_entropy")
def cross_entropy_grad(orig, grad):
x, y = orig.args
shape = shape_of(x)
batch_size = take(shape, const(0, dtype="int32"), axis=0)
grad = grad / batch_size.astype(x.checked_type.dtype)
return [-grad * y / x, -grad * log(x)]
@register_gradient("nn.cross_entropy_with_logits")
def cross_entropy_with_logits_grad(orig, grad):
x, y = orig.args
shape = shape_of(x)
batch_size = take(shape, const(0, dtype="int32"), axis=0)
grad = grad / batch_size.astype(x.checked_type.dtype)
return [-grad * y, -grad * x]
@register_gradient("take")
def take_grad(orig, grad):
"""
Returns the gradient of take.
"""
def make_scalar_tensor(v):
if isinstance(v, int):
v = const(v, dtype="int32")
return reshape(v, (1,))
# TODO(@altanh): we currently assume indices are in range
data, indices = orig.args
axis = orig.attrs.axis
batch_dims = orig.attrs.batch_dims
zero, one = map(make_scalar_tensor, [0, 1])
data_grad = zeros_like(data)
try:
data_shape = data.checked_type.concrete_shape
except TypeError as ty_err:
raise OpError("currently take_grad only supports data with concrete shape") from ty_err
if axis is None:
axis = 0
data_grad = reshape(data_grad, (-1,))
data_shape = 1
for dim in data.checked_type.concrete_shape:
data_shape *= dim
data_shape = (data_shape,)
else:
axis = int(axis)
if batch_dims is None:
batch_dims = 0
else:
batch_dims = int(batch_dims)
if batch_dims != 0:
raise OpError("take_grad only supports batch_dims equales to 0")
strides = [1] * len(data_shape)
if len(indices.checked_type.shape) == 0:
# axis on grad has been squeezed in this case
num_indices = one
indices = reshape(indices, (1,))
grad = expand_dims(grad, int(axis))
elif len(indices.checked_type.shape) == 1:
num_indices = take(shape_of(indices), zero, axis=0)
else:
raise OpError("take_grad only supports scalar or 1D indices")
def loop_cond(data_grad, i):
return squeeze(less(i, num_indices))
def loop_body(data_grad, i):
index = take(indices, i, axis=0)
grad_slice = take(grad, i, axis=axis)
begin, end = [], []
for ax, size in enumerate(data_shape):
size = make_scalar_tensor(size)
begin.append(zero if ax != axis else index)
end.append(size if ax != axis else index + one)
begin, end = concatenate(begin, axis=0), concatenate(end, axis=0)
# data_grad[:,...,index at axis,...,:] += grad_slice
update = strided_slice(data_grad, begin, end, strides=strides)
update = update + grad_slice # no need to expand grad_slice since i has shape (1,)
next_data_grad = strided_set(data_grad, update, begin, end, strides=strides)
return (next_data_grad, i + one)
loop_vars = [
Var("data_grad", type_annotation=TensorType(data_shape, data.checked_type.dtype)),
Var("i", type_annotation=TensorType((1,), "int32")),
]
loop = while_loop(loop_cond, loop_vars, loop_body)
result = loop(data_grad, zero)
data_grad = TupleGetItem(result, 0)
if orig.attrs.axis is None:
data_grad = reshape_like(data_grad, data)
return [data_grad, zeros_like(orig.args[1])]
@register_gradient("contrib_reverse_reshape")
def reverse_reshape_grad(orig, grad):
"""
Returns the gradient of reverse_reshape (same as reshape).
"""
return [reshape_like(grad, orig.args[0])]
@register_gradient("stack")
def stack_grad(orig, grad):
"""
Returns grad split across stacked inputs.
"""
stack_axis = int(orig.attrs.axis)
sections = len(orig.args[0].checked_type.fields)
splits = split(grad, sections, stack_axis)
splits = Tuple([squeeze(x, axis=[stack_axis]) for x in splits])
return [splits]
@register_gradient("squeeze")
def squeeze_grad(orig, grad):
"""
Returns grad expanded to input size.
"""
# this should work, can't use expand_dims since we lose
# squeeze information when axis=None
return [reshape_like(grad, orig.args[0])]
@register_gradient("expand_dims")
def expand_dims_grad(orig, grad):
"""
Returns grad squeezed on expanded dims.
"""
axis = int(orig.attrs.axis)
for _ in range(orig.attrs.num_newaxis):
grad = squeeze(grad, axis=[axis])
return [grad]
@register_gradient("arange")
def arange_grad(orig, grad):
"""
Returns the gradient of arange.
"""
start, stop, step = orig.args
length = take(shape_of(orig), const(0, dtype="int32"), axis=0)
grad_start = cast_like(_sum(grad), start)
grad_stop = zeros_like(stop)
grad_step = cast_like(arange(length, dtype="int32"), grad) * grad
grad_step = cast_like(_sum(grad_step), step)
return [grad_start, grad_stop, grad_step]
@register_gradient("gather_nd")
def gather_nd_grad(orig, grad):
"""
Returns the gradient of gather_nd, which is simply scatter_nd.
"""
data, indices = orig.args
return [scatter_nd(zeros_like(data), indices, grad, mode="add"), zeros_like(indices)]
@register_gradient("reshape_like")
def reshape_like_grad(orig, grad):
"""
Returns the gradient of reshape_like.
"""
data, shape_like = orig.args
return [reshape_like(grad, data), zeros_like(shape_like)]
@register_gradient("where")
def where_grad(orig, grad):
"""
Returns the gradient of where.
"""
cond, x, y = orig.args
g_zeros = zeros_like(grad)
grad_x = collapse_sum_like(where(cond, grad, g_zeros), x)
grad_y = collapse_sum_like(where(cond, g_zeros, grad), y)
return [zeros_like(cond), grad_x, grad_y]
@register_gradient("less_equal")
def less_equal_grad(orig, grad):
"""
Returns the gradient of less_equal.
"""
return [zeros_like(orig.args[0]), zeros_like(orig.args[1])]
@register_gradient("not_equal")
def not_equal_grad(orig, grad):
"""
Returns the gradient of not_equal (just zeros).
"""
return [zeros_like(orig.args[0]), zeros_like(orig.args[1])]
@register_gradient("strided_slice")
def strided_slice_grad(orig, grad):
"""
Returns the gradient of strided_slice, which is equal to grad where the
input was sliced and zero elsewhere.
"""
assert orig.attrs.axes is None, "grad for strided_slice with axes is not yet supported"
x = orig.args[0]
begin = get_const_tuple(orig.attrs.begin)
end = get_const_tuple(orig.attrs.end)
strides = get_const_tuple(orig.attrs.strides)
if orig.attrs.slice_mode == "size":
# convert sizes to ending indices and ignore strides
end = list(end)
for i, (start, size) in enumerate(zip(begin, end)):
if size == -1:
end[i] = int(x.checked_type.shape[i])
else:
end[i] = start + size
strides = None
else:
assert orig.attrs.slice_mode == "end"
return [strided_set(zeros_like(x), grad, begin, end, strides)]
@register_gradient("one_hot")
def one_hot_grad(orig, grad):
"""
Returns the gradient of one_hot, which is the sum of grad at on and off
indices for on_value and off_value respectively.
"""
indices, on_value, off_value = orig.args
g_zeros = zeros_like(grad)
on_mask = equal(orig, on_value)
grad_on = _sum(where(on_mask, grad, g_zeros))
grad_off = _sum(where(on_mask, g_zeros, grad))
return [zeros_like(indices), cast_like(grad_on, on_value), cast_like(grad_off, off_value)]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
# pylint: disable=invalid-name,unused-argument, len-as-condition, too-many-nested-blocks,
# pylint: disable=too-many-local-variables, too-many-arguments, no-else-return
from __future__ import absolute_import
import tvm
from tvm import te, topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from tvm.topi.utils import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from ._tensor import elemwise_shape_func
from .op import OpPattern
_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
_reg.register_broadcast_schedule("expand_dims")
_reg.register_broadcast_schedule("repeat")
_reg.register_broadcast_schedule("tile")
_reg.register_broadcast_schedule("where")
_reg.register_injective_schedule("squeeze")
_reg.register_injective_schedule("reshape")
_reg.register_injective_schedule("reshape_like")
_reg.register_injective_schedule("full")
_reg.register_injective_schedule("full_like")
_reg.register_injective_schedule("arange")
_reg.register_injective_schedule("meshgrid")
_reg.register_injective_schedule("reverse")
_reg.register_injective_schedule("reverse_sequence")
_reg.register_injective_schedule("cast")
_reg.register_injective_schedule("cast_like")
_reg.register_injective_schedule("reinterpret")
_reg.register_injective_schedule("strided_slice")
_reg.register_injective_schedule("slice_like")
_reg.register_injective_schedule("split")
_reg.register_injective_schedule("take")
_reg.register_injective_schedule("stack")
_reg.register_injective_schedule("contrib_reverse_reshape")
_reg.register_injective_schedule("gather")
_reg.register_injective_schedule("gather_nd")
_reg.register_injective_schedule("sequence_mask")
_reg.register_injective_schedule("one_hot")
_reg.register_reduce_schedule("collapse_sum_like")
_reg.register_reduce_schedule("collapse_sum_to")
_reg.register_injective_schedule("unravel_index")
_reg.register_injective_schedule("sparse_to_dense")
_reg.register_injective_schedule("matrix_set_diag")
_reg.register_injective_schedule("adv_index")
# concatenate
@_reg.register_compute("concatenate")
def compute_concat(attrs, inputs, output_type):
return [topi.concatenate(inputs, attrs.axis)]
_reg.register_strategy("concatenate", strategy.concatenate_strategy)
# sliding_window
@_reg.register_compute("sliding_window")
def compute_sliding_window(attrs, inputs, output_type):
"""Compute definition of sliding_window"""
return [topi.sliding_window(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]
_reg.register_strategy("sliding_window", strategy.sliding_window_strategy)
# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
"""Compute definition of strided_set"""
return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])]
_reg.register_injective_schedule("strided_set")
# layout_transform
_reg.register_injective_schedule("layout_transform")
_reg.register_pattern("layout_transform", OpPattern.INJECTIVE)
_reg.register_injective_schedule("auto_scheduler_layout_transform")
_reg.register_pattern("auto_scheduler_layout_transform", OpPattern.INJECTIVE)
_reg.register_injective_schedule("meta_schedule_layout_transform")
_reg.register_pattern("meta_schedule_layout_transform", OpPattern.INJECTIVE)
# argwhere
_reg.register_strategy("argwhere", strategy.argwhere_strategy)
# scatter
@_reg.register_compute("scatter")
def compute_scatter(attrs, inputs, output_type):
"""Compute definition of scatter"""
return [topi.scatter(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_strategy("scatter", strategy.scatter_strategy)
# sparse_fill_empty_rows
@_reg.register_compute("sparse_fill_empty_rows")
def compute_sparse_fill_empty_rows(attrs, inputs, output_type):
"""Compute definition of sparse_fill_empty_rows"""
return topi.sparse_fill_empty_rows(
inputs[0],
inputs[1],
inputs[2],
inputs[3],
output_type.fields[0].shape,
output_type.fields[1].shape,
output_type.fields[2].shape,
)
_reg.register_strategy("sparse_fill_empty_rows", strategy.sparse_fill_empty_rows_strategy)
# sparse_reshape
@_reg.register_compute("sparse_reshape")
def compute_reshape(attrs, inputs, output_type):
"""Compute definition of sparse_reshape"""
return topi.sparse_reshape(
inputs[0],
inputs[1],
inputs[2],
output_type.fields[0].shape,
output_type.fields[1].shape,
)
_reg.register_strategy("sparse_reshape", strategy.sparse_reshape_strategy)
# stft
@_reg.register_compute("stft")
def compute_stft(attrs, inputs, output_type):
"""Compute definition of stft"""
return topi.stft(
inputs[0],
attrs.n_fft,
attrs.hop_length,
attrs.win_length,
attrs.window,
attrs.normalized,
attrs.onesided,
output_type.shape,
)
_reg.register_strategy("stft", strategy.stft_strategy)
@script
def _stft_shape_func(data, n_fft, hop_length, onesided):
output_shape = output_tensor((4,), "int64")
output_shape[0] = int64(data.shape[0])
if onesided:
output_shape[1] = int64(int64(n_fft) // int64(2)) + int64(1)
else:
output_shape[1] = int64(n_fft)
output_shape[2] = int64(int64(data.shape[1] - n_fft) // int64(hop_length)) + int64(1)
output_shape[3] = int64(2)
return output_shape
@_reg.register_shape_func("stft", True)
def stft_shape_func(attrs, inputs, _):
"""
Shape func for stft.
"""
return [
_stft_shape_func(
inputs[0], convert(attrs.n_fft), convert(attrs.hop_length), convert(attrs.onesided)
)
]
# trilu
_reg.register_strategy("trilu", strategy.trilu_strategy)
# scatter_add
@_reg.register_compute("scatter_add")
def compute_scatter_add(attrs, inputs, output_type):
"""Compute definition of scatter_add"""
return [topi.scatter_add(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_strategy("scatter_add", strategy.scatter_add_strategy)
# scatter_nd
@_reg.register_compute("scatter_nd")
def compute_scatter_nd(attrs, inputs, output_type):
"""Compute definition of scatter_nd"""
return [topi.scatter_nd(inputs[0], inputs[1], inputs[2], attrs.mode)]
_reg.register_strategy("scatter_nd", strategy.scatter_nd_strategy)
# cumsum
@_reg.register_compute("cumsum")
def compute_cumsum(attrs, inputs, output_type):
"""Compute definition of cumsum"""
return [topi.cumsum(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)]
_reg.register_strategy("cumsum", strategy.cumsum_strategy)
_reg.register_shape_func("cumsum", False, elemwise_shape_func)
# cumprod
@_reg.register_compute("cumprod")
def compute_cumprod(attrs, inputs, output_type):
"""Compute definition of cumprod"""
return [topi.cumprod(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)]
_reg.register_strategy("cumprod", strategy.cumprod_strategy)
_reg.register_shape_func("cumprod", False, elemwise_shape_func)
@_reg.register_compute("unique")
def compute_unique(attrs, inputs, output_type):
"""Compute definition of unique"""
return topi.unique(inputs[0], attrs.sorted, attrs.return_counts)
_reg.register_strategy("unique", strategy.unique_strategy)
# invert_permutation
_reg.register_strategy("invert_permutation", strategy.invert_permutation_strategy)
_reg.register_shape_func("invert_permutation", False, elemwise_shape_func)
#####################
# Shape functions #
#####################
@script
def _arange_shape_func(start, stop, step):
out = output_tensor((1,), "int64")
if step[()] < 0:
out[0] = int64(ceil_div((int64(start[()]) - int64(stop[()])), int64(-step[()])))
else:
out[0] = int64(ceil_div((int64(stop[()]) - int64(start[()])), int64(step[()])))
return out
@_reg.register_shape_func("arange", True)
def arange_shape_func(attrs, inputs, _):
"""
Shape func for arange
"""
return [_arange_shape_func(*inputs)]
@script
def _strided_slice_shape_func_input_shape(data_shape, begin, end, strides, slice_mode):
ndim = len(data_shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
dim_size = int64(data_shape[i])
cbegin = int64(0)
cend = dim_size
cstride = int64(1)
if len(strides) > i:
cstride = int64(strides[i])
if len(begin) > i:
cbegin = int64(begin[i])
elif cstride < 0:
cbegin = dim_size
if len(end) <= i:
if cstride < 0:
cend = int64(0)
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = dim_size
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[i]:
cend = dim_size
else:
cend = int64(end[i])
assert cstride != 0, "Strides can't be zero."
if cbegin < 0:
cbegin += dim_size
if cend < 0:
cend += dim_size
if cstride < 0:
if cend < 0:
cend = int64(-1)
if cbegin > dim_size - 1:
cbegin = dim_size - 1
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[i] = int64(ceil_div(slice_range, step))
return out
@script
def _strided_slice_shape_func_with_axes(data_shape, begin, end, strides, slice_mode, axes):
ndim = data_shape.shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = data_shape[i]
for i in const_range(len(axes)):
dim_size = int64(data_shape[axes[i]])
cbegin = int64(0)
cend = dim_size
cstride = int64(1)
if len(strides) > i:
cstride = int64(strides[i])
if len(begin) > i:
cbegin = int64(begin[i])
elif cstride < 0:
cbegin = dim_size
if len(end) <= i:
cend = dim_size
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = dim_size
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[axes[i]]:
cend = dim_size
else:
cend = int64(end[i])
assert cstride != 0, "Strides can't be zero."
if cbegin < 0:
cbegin += dim_size
if cend < 0:
cend += dim_size
if cstride < 0:
if cend < 0:
cend = int64(-1)
if cbegin > dim_size - 1:
cbegin = dim_size - 1
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[axes[i]] = int64(ceil_div(slice_range, step))
return out
@_reg.register_shape_func("strided_slice", False)
def strided_slice_shape_func(attrs, inputs, _):
"""
Shape func for strided_slice
"""
slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
if attrs.axes is None:
return [
_strided_slice_shape_func_input_shape(
inputs[0], attrs.begin, attrs.end, attrs.strides, slice_mode
)
]
return [
_strided_slice_shape_func_with_axes(
inputs[0], attrs.begin, attrs.end, attrs.strides, slice_mode, attrs.axes
)
]
@script
def _one_hot_shape_func(indices_shape, depth, axis):
in_ndim = indices_shape.shape[0]
out_ndim = in_ndim + 1
true_axis = in_ndim if axis == -1 else axis
indices_i = 0
out = output_tensor((out_ndim,), "int64")
for i in range(out_ndim):
if i == true_axis:
out[i] = int64(depth)
else:
out[i] = int64(indices_shape[indices_i])
indices_i += 1
return out
@_reg.register_shape_func("one_hot", False)
def one_hot_shape_func(attrs, inputs, _):
"""
Shape func for one_hot
"""
shape_func = [_one_hot_shape_func(inputs[0], convert(attrs.depth), convert(attrs.axis))]
return shape_func
@script
def _concatenate_shape_func(inputs, axis):
ndim = inputs[0].shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
out[i] = inputs[0][i]
for j in const_range(1, len(inputs)):
assert out[i] == inputs[j][i], "Dims mismatch in the inputs of concatenate."
else:
out[i] = int64(0)
for j in const_range(len(inputs)):
out[i] += inputs[j][i]
return out
@_reg.register_shape_func("concatenate", False)
def concatenate_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0]
return [_concatenate_shape_func(inputs, convert(axis))]
@script
def _reshape_shape_func_input_shape(data_shape, newshape, ndim, allowzero):
out = output_tensor((ndim,), "int64")
src_idx = 0
dst_idx = 0
infer_idx = -1
copy = False
skip = 0
for i in const_range(len(newshape)):
if skip > 0:
skip -= 1
elif newshape[i] > 0:
out[dst_idx] = int64(newshape[i])
src_idx += 1
dst_idx += 1
elif newshape[i] == 0:
if allowzero:
out[dst_idx] = int64(newshape[i])
else:
out[dst_idx] = data_shape[src_idx]
src_idx += 1
dst_idx += 1
elif newshape[i] == -1:
assert infer_idx < 0, "One and only one dim can be inferred"
out[dst_idx] = int64(1)
infer_idx = i
src_idx += 1
dst_idx += 1
elif newshape[i] == -2:
copy = True
elif newshape[i] == -3:
assert data_shape.shape[0] - src_idx > 1, "Not enough dims in input shape for -3"
out[dst_idx] = data_shape[src_idx] * data_shape[src_idx + 1]
src_idx += 2
dst_idx += 1
elif newshape[i] == -4:
assert len(newshape) - i > 2, "Not enough dims in new shape for -4"
if newshape[i + 1] == -1:
assert newshape[i + 2] != -1, "Split dims cannot both be -1."
out[dst_idx] = data_shape[src_idx] // int64(newshape[i + 2])
out[dst_idx + 1] = int64(newshape[i + 2])
else:
out[dst_idx] = int64(newshape[i + 1])
if newshape[i + 2] == -1:
out[dst_idx + 1] = data_shape[src_idx] // int64(newshape[i + 1])
else:
out[dst_idx + 1] = int64(newshape[i + 2])
assert (
data_shape[src_idx] == out[dst_idx] * out[dst_idx + 1]
), "Product of split dims doesn't match to input dim"
src_idx += 1
dst_idx += 2
skip = 2
else:
assert False, "Invalid special values in new shape"
if len(data_shape.shape) > 0:
# if data is not constant, we can then handle -1 and -2
if copy:
for i in range(src_idx, data_shape.shape[0]):
out[dst_idx] = data_shape[i]
dst_idx += 1
if infer_idx >= 0:
old_size = int64(1)
for i in const_range(data_shape.shape[0]):
old_size *= data_shape[i]
new_size = int64(1)
for i in const_range(out.shape[0]):
new_size *= out[i]
out[infer_idx] = old_size // new_size
return out
@_reg.register_shape_func("reshape", False)
def reshape_shape_func(attrs, inputs, out_ndims):
newshape = get_const_tuple(attrs.newshape)
allowzero = attrs.allowzero
return [
_reshape_shape_func_input_shape(
inputs[0], convert(newshape), out_ndims[0], convert(allowzero)
)
]
@script
def _take_no_axis_shape_func(indices_shape, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(out_ndim):
out[i] = indices_shape[i]
return out
@script
def _take_with_axis_shape_func(data_shape, indices_shape, axis, batch_dims, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(axis):
out[i] = data_shape[i]
if len(indices_shape.shape) == 0:
# indices is constant
for i in const_range(axis + 1, len(data_shape)):
out[i - 1] = data_shape[i]
else:
for i in const_range(len(indices_shape) - batch_dims):
out[axis + i] = indices_shape[i + batch_dims]
for i in const_range(axis + 1, len(data_shape)):
out[len(indices_shape) + i - 1 - batch_dims] = data_shape[i]
return out
@_reg.register_shape_func("take", False)
def take_shape_func(attrs, inputs, out_ndims):
"""
Shape function for take op.
"""
if attrs.axis is None:
return [_take_no_axis_shape_func(inputs[1], out_ndims[0])]
axis = get_const_int(attrs.axis)
batch_dims = get_const_int(attrs.batch_dims)
data_ndim = int(inputs[0].shape[0])
if inputs[1].shape:
indices_ndim = int(inputs[1].shape[0])
if axis < 0:
axis += data_ndim
assert 0 <= axis < data_ndim
if batch_dims < 0:
batch_dims += indices_ndim
return [_take_with_axis_shape_func(*inputs, convert(axis), convert(batch_dims), out_ndims[0])]
@_reg.register_legalize("take")
def legalize_dyn_topk(attrs, inputs, types):
"""Legalize take op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.take_legalize(attrs, inputs, types)
@script
def _argwhere_shape_func_1d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(1)
for i1 in range(condition.shape[0]):
if condition[i1] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_2d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(2)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
if condition[i1, i2] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_3d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(3)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
if condition[i1, i2, i3] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_4d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(4)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
if condition[i1, i2, i3, i4] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_5d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(5)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
for i5 in range(condition.shape[4]):
if condition[i1, i2, i3, i4, i5] != 0:
out[0] += int64(1)
return out
@_reg.register_shape_func("argwhere", True)
def argwhere_shape_func(attrs, inputs, out_ndims):
"""
Shape function for argwhere.
"""
if len(inputs[0].shape) == 1:
return [_argwhere_shape_func_1d(inputs[0])]
if len(inputs[0].shape) == 2:
return [_argwhere_shape_func_2d(inputs[0])]
if len(inputs[0].shape) == 3:
return [_argwhere_shape_func_3d(inputs[0])]
if len(inputs[0].shape) == 4:
return [_argwhere_shape_func_4d(inputs[0])]
if len(inputs[0].shape) == 5:
return [_argwhere_shape_func_5d(inputs[0])]
return ValueError("Does not support rank higher than 5 in argwhere")
_reg.register_shape_func("scatter", False, elemwise_shape_func)
_reg.register_shape_func("scatter_add", False, elemwise_shape_func)
_reg.register_shape_func("scatter_nd", False, elemwise_shape_func)
@script
def _sparse_fill_empty_rows_shape_func(sparse_indices, dense_shape):
new_sparse_indices_shape = output_tensor((2,), "int64")
new_sparse_values_shape = output_tensor((1,), "int64")
empty_row_indicator_shape = output_tensor((1,), "int64")
num_dense_rows = int64(dense_shape[0])
if int64(sparse_indices.shape[0]) == int64(0): # Handle Empty Case
# Total rows will equal dense_shape[0]
new_sparse_indices_shape[0] = num_dense_rows
new_sparse_indices_shape[1] = int64(sparse_indices.shape[1])
new_sparse_values_shape[0] = num_dense_rows
empty_row_indicator_shape[0] = num_dense_rows
return (new_sparse_indices_shape, new_sparse_values_shape, empty_row_indicator_shape)
else:
count = int64(sparse_indices.shape[0]) # Add count of all rows already in sparse_indices
for i in range(1, int64(sparse_indices.shape[0])):
index = int64(sparse_indices[i, 0])
prev_index = int64(sparse_indices[i - 1, 0] + 1)
if index > prev_index:
count += index - prev_index # Add count of all rows between two consecutive indices
count += int64(sparse_indices[0, 0]) # Add count from 0 to first row id in sparse_indices
count += int64(
num_dense_rows - 1 - sparse_indices[sparse_indices.shape[0] - 1, 0]
) # Add count from last row id to dense_shape - 1
new_sparse_indices_shape[0] = int64(count)
new_sparse_indices_shape[1] = int64(sparse_indices.shape[1])
new_sparse_values_shape[0] = int64(count)
empty_row_indicator_shape[0] = num_dense_rows
return (new_sparse_indices_shape, new_sparse_values_shape, empty_row_indicator_shape)
@_reg.register_shape_func("sparse_fill_empty_rows", True)
def sparse_fill_empty_rows_func(attrs, inputs, _):
return _sparse_fill_empty_rows_shape_func(inputs[0], inputs[2])
@script
def _sparse_reshape_shape_func(sparse_indices_shape, prev_shape_shape, new_shape_shape):
indices_shape = output_tensor((2,), "int64")
indices_shape[0] = int64(sparse_indices_shape[0])
indices_shape[1] = int64(new_shape_shape[0])
shape_tensor = output_tensor((1,), "int64")
shape_tensor[0] = int64(new_shape_shape[0])
return (indices_shape, shape_tensor)
@_reg.register_shape_func("sparse_reshape", False)
def sparse_reshape_shape_func(attrs, inputs, _):
"""
Shape func for sparse_reshape.
"""
return _sparse_reshape_shape_func(inputs[0], inputs[1], inputs[2])
@script
def _layout_transform_shape_func(
data_shape, out_layout_len, dst_equal_list, dst_mul_list, dst_div_list, dst_mix_list
):
out = output_tensor((out_layout_len,), "int64")
for i in const_range(len(dst_equal_list)):
out[dst_equal_list[i][0]] = data_shape[dst_equal_list[i][1]]
for i in const_range(len(dst_mul_list)):
out[dst_mul_list[i][0]] = data_shape[dst_mul_list[i][1]] * data_shape[dst_mul_list[i][2]]
for i in const_range(len(dst_div_list)):
out[dst_div_list[i][0]] = data_shape[dst_div_list[i][1]] // dst_div_list[i][3]
out[dst_div_list[i][2]] = int64(dst_div_list[i][3])
for i in const_range(len(dst_mix_list)):
out[dst_mix_list[i][0]] = (
data_shape[dst_mix_list[i][1]] * dst_mix_list[i][2] // dst_mix_list[i][4]
)
out[dst_mix_list[i][3]] = int64(dst_mix_list[i][4])
return out
@_reg.register_shape_func("layout_transform", False)
def layout_transform_shape_func(attrs, inputs, _):
"""
Shape function for layout_transform op.
"""
def _fetch_axis(layout):
major_axes = []
minor_axes = {}
num_start = -1
for i, item in enumerate(layout):
if "A" <= item <= "Z":
major_axes.append(item)
elif "a" <= item <= "z":
last_num = int(layout[num_start:i])
minor_axes[item] = last_num
num_start = -1
elif num_start < 0:
num_start = i
return major_axes, minor_axes
_, src_minor_axes = _fetch_axis(attrs.src_layout)
dst_major_axes, dst_minor_axes = _fetch_axis(attrs.dst_layout)
src_letter_list = []
dst_letter_list = []
for item in attrs.src_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
src_letter_list.append(item)
for item in attrs.dst_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
dst_letter_list.append(item)
out_layout_len = len(dst_major_axes) + len(dst_minor_axes)
dst_equal_list = []
dst_mul_list = []
dst_div_list = []
dst_mix_list = []
for key in dst_major_axes:
if key.lower() not in dst_minor_axes:
if key.lower() not in src_minor_axes:
dst_equal_list.append((dst_letter_list.index(key), src_letter_list.index(key)))
else:
dst_mul_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_letter_list.index(key.lower()),
)
)
else:
if key.lower() not in src_minor_axes:
dst_div_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
else:
dst_mix_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_minor_axes[key.lower()],
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
return [
_layout_transform_shape_func(
inputs[0],
convert(out_layout_len),
convert(dst_equal_list),
convert(dst_mul_list),
convert(dst_div_list),
convert(dst_mix_list),
)
]
@script
def _expand_dim_shape_func(data_shape, ndim, axis, num_newaxis):
out = output_tensor((ndim + num_newaxis,), "int64")
for i in const_range(out.shape[0]):
if i < axis:
out[i] = data_shape[i]
elif i < axis + num_newaxis:
out[i] = int64(1)
else:
out[i] = data_shape[i - num_newaxis]
return out
@_reg.register_shape_func("expand_dims", False)
def expand_dim_shape_func(attrs, inputs, _):
"""
Shape function for expand_dim op.
"""
axis = get_const_int(attrs.axis)
num_newaxis = get_const_int(attrs.num_newaxis)
if axis < 0:
axis = inputs[0].shape[0] + axis + 1
ndim = inputs[0].shape[0] if inputs[0].shape else 0
return [_expand_dim_shape_func(inputs[0], convert(ndim), convert(axis), convert(num_newaxis))]
@script
def _transpose_shape_func(data_shape, axes):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(len(axes)):
out[i] = data_shape[axes[i]]
return out
@_reg.register_shape_func("transpose", False)
def transpose_shape_func(attrs, inputs, _):
"""
Shape function for transpose op.
"""
axes = attrs.axes if attrs.axes is None else get_const_tuple(attrs.axes)
if axes is None:
axes = list(range(inputs[0].shape[0].value))
axes.reverse()
axes = list(axes)
for i, axis in enumerate(axes):
if axis < 0:
axes[i] = inputs[0].shape[0] + axis
return [_transpose_shape_func(inputs[0], convert(axes))]
_reg.register_schedule("transpose", strategy.schedule_transpose)
@script
def _squeeze_shape_func(data_shape, keep_axes, remove_axes):
out = output_tensor((len(keep_axes),), "int64")
for i in const_range(len(keep_axes)):
out[i] = data_shape[keep_axes[i]]
for i in const_range(len(remove_axes)):
assert data_shape[remove_axes[i]] == 1, "Removed dimension must have size 1"
return out
@_reg.register_shape_func("squeeze", False)
def squeeze_shape_func(attrs, inputs, _):
"""
Shape function for squeeze op.
"""
axis = attrs.axis if attrs.axis is None else get_const_tuple(attrs.axis)
keep_axes = []
remove_axes = []
if axis is not None:
for i in range(inputs[0].shape[0].value):
if i not in axis:
keep_axes.append(i)
else:
remove_axes.append(i)
# Due to current relay type system, it is possible even
# a static kernel function needs shape function. To handle
# this case, we allow axis to be None in squeeze shape func
# for now.
# TODO(kevinthesun): Enhance relay type system to avoid this.
if keep_axes:
out = _squeeze_shape_func(inputs[0], convert(keep_axes), convert(remove_axes))
else:
out = te.compute((), lambda *indices: 0)
return [out]
@script
def _reshape_like_shape_func(target_shape):
out = output_tensor((target_shape.shape[0],), "int64")
for i in const_range(target_shape.shape[0]):
out[i] = target_shape[i]
return out
@_reg.register_shape_func("reshape_like", False)
def reshape_like_shape_func(attrs, inputs, _):
"""
Shape function for reshape_like op.
"""
return [_reshape_like_shape_func(inputs[1])]
@script
def _tile_shape_func(data, reps, ndim, tndim, rndim):
out = output_tensor((tndim,), "int64")
if ndim == rndim:
for i in const_range(tndim):
out[i] = data[i] * int64(reps[i])
elif ndim > rndim:
ngap = ndim - rndim
for i in const_range(ndim):
if i < ngap:
out[i] = data[i]
else:
out[i] = data[i] * int64(reps[i - ngap])
else:
rgap = rndim - ndim
for i in const_range(rndim):
if i < rgap:
out[i] = int64(reps[i])
else:
out[i] = int64(reps[i]) * data[i - rgap]
return out
@_reg.register_shape_func("tile", False)
def tile_shape_func(attrs, inputs, _):
"""
Shape function for tile op.
"""
reps = get_const_tuple(attrs.reps)
ndim = inputs[0].shape[0].value
rndim = len(reps)
tndim = ndim if ndim > rndim else rndim
return [
_tile_shape_func(inputs[0], convert(reps), convert(ndim), convert(tndim), convert(rndim))
]
@script
def _split_shape_func(data_shape, index, indices_or_sections, param_is_indices, axis):
out = output_tensor((data_shape.shape[0],), "int64")
if param_is_indices:
for i in const_range(data_shape.shape[0]):
if i == axis:
assert (
data_shape[axis] % indices_or_sections[0] == 0
), "num_sections must be an integer factor of the size of axis"
out[i] = ceil_div(data_shape[axis], indices_or_sections[0])
else:
out[i] = data_shape[i]
else:
start = int64(0)
if index > 0:
start = int64(indices_or_sections[index - 1])
end = data_shape[axis]
if index < len(indices_or_sections):
end = int64(indices_or_sections[index])
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = end - start
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("split", False)
def split_shape_func(attrs, inputs, _):
"""
Shape function for split op.
"""
if isinstance(attrs.indices_or_sections, (int, tvm.tir.IntImm)):
indices_or_sections = get_const_int(attrs.indices_or_sections)
assert indices_or_sections > 0, "Slice count must be > 0"
else:
indices_or_sections = list(get_const_tuple(attrs.indices_or_sections))
assert sorted(indices_or_sections)[0] > 0 and indices_or_sections == sorted(
indices_or_sections
), "split_indices must be sorted"
axis = get_const_int(attrs.axis)
if axis < 0:
axis += get_const_int(inputs[0].shape[0])
num_out = (
indices_or_sections
if isinstance(indices_or_sections, int)
else len(indices_or_sections) + 1
)
param_is_indices = isinstance(indices_or_sections, int)
if param_is_indices:
indices_or_sections = [indices_or_sections]
return [
_split_shape_func(
inputs[0],
convert(i),
convert(indices_or_sections),
convert(param_is_indices),
convert(axis),
)
for i in range(num_out)
]
@script
def _repeat_shape_func(data_shape, repeats, axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = int64(data_shape[i] * repeats)
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("repeat", False)
def repeat_shape_func(attrs, inputs, _):
"""
Shape func for repeat.
"""
axis = get_const_int(attrs.axis)
if axis < 0:
axis = inputs[0].shape[0] + axis
return [_repeat_shape_func(inputs[0], attrs.repeats, convert(axis))]
@_reg.register_shape_func("broadcast_to_like", False)
def broadcast_to_like_shape_func(attrs, inputs, _):
"""
Shape func for broadcast_to_like.
"""
return [topi.math.identity(inputs[1])]
@script
def _stack_shape_func(data_shape, axis, num_inputs):
out = output_tensor((data_shape.shape[0] + 1,), "int64")
for i in const_range(data_shape.shape[0] + 1):
if i == axis:
out[i] = int64(num_inputs)
elif i < axis:
out[i] = data_shape[i]
else:
out[i] = data_shape[i - 1]
return out
@_reg.register_shape_func("stack", False)
def stack_shape_func(attrs, inputs, _):
"""
Shape func for stack.
"""
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0] + 1
return [_stack_shape_func(inputs[0], convert(axis), convert(len(inputs)))]
@script
def _broadcast_shape_tensors(shape_tensor1, shape_tensor2):
rank1 = shape_tensor1.shape[0]
rank2 = shape_tensor2.shape[0]
out_rank = max(rank1, rank2)
bcast_shape_tensor = output_tensor((out_rank,), "int64")
for index in const_range(out_rank):
dim1 = int64(1)
dim2 = int64(1)
if rank1 == out_rank:
dim1 = shape_tensor1[index]
elif rank1 - (out_rank - index) >= 0:
dim1 = shape_tensor1[rank1 - (out_rank - index)]
if rank2 == out_rank:
dim2 = shape_tensor2[index]
elif rank2 - (out_rank - index) >= 0:
dim2 = shape_tensor2[rank2 - (out_rank - index)]
assert dim1 == dim2 or dim1 == 1 or dim2 == 1, "Invalid broadcast shapes"
bcast_shape_tensor[index] = max(dim1, dim2)
return bcast_shape_tensor
@_reg.register_shape_func("where", False)
def where_shape_func(attrs, inputs, _):
"""
Shape func for where.
"""
def ensure_tensor(tensor):
if len(tensor.shape) == 0:
return topi.full((1,), "int64", 1)
return tensor
cond_shape = ensure_tensor(inputs[0])
x_shape = ensure_tensor(inputs[1])
y_shape = ensure_tensor(inputs[2])
bcast_shape = _broadcast_shape_tensors(x_shape, y_shape)
out_shape = _broadcast_shape_tensors(bcast_shape, cond_shape)
return [out_shape]
@script
def _adv_index_post_process(data_shape, bcast_shape, num_indices):
data_rank = data_shape.shape[0]
bcast_rank = bcast_shape.shape[0]
out = output_tensor((data_rank + bcast_rank - num_indices,), "int64")
for i in const_range(bcast_rank):
out[i] = bcast_shape[i]
for i in const_range(data_rank - num_indices):
out[i + bcast_rank] = data_shape[i + num_indices]
return out
@_reg.register_shape_func("adv_index", False)
def adv_index_shape_func(attrs, inputs, _):
"""
Shape func for adv_index.
"""
bcast_shape = inputs[1]
for i in inputs[2:]:
bcast_shape = _broadcast_shape_tensors(bcast_shape, i)
return [_adv_index_post_process(inputs[0], bcast_shape, convert(len(inputs) - 1))]
@script
def _unique_shape(data_shape):
unique_shape = output_tensor((1,), "int64")
indices_shape = output_tensor((1,), "int64")
inverse_indices_shape = output_tensor((1,), "int64")
num_unique_shape = output_tensor((1,), "int64")
unique_shape[0] = data_shape[0]
indices_shape[0] = data_shape[0]
inverse_indices_shape[0] = data_shape[0]
num_unique_shape[0] = int64(1)
return (unique_shape, indices_shape, inverse_indices_shape, num_unique_shape)
@script
def _unique_with_counts_shape(data_shape):
unique_shape = output_tensor((1,), "int64")
indices_shape = output_tensor((1,), "int64")
inverse_indices_shape = output_tensor((1,), "int64")
num_unique_shape = output_tensor((1,), "int64")
counts_shape = output_tensor((1,), "int64")
unique_shape[0] = data_shape[0]
indices_shape[0] = data_shape[0]
inverse_indices_shape[0] = data_shape[0]
num_unique_shape[0] = int64(1)
counts_shape[0] = data_shape[0]
return (unique_shape, indices_shape, inverse_indices_shape, num_unique_shape, counts_shape)
@_reg.register_shape_func("unique", False)
def unique_shape_func(attrs, inputs, _):
"""
Shape func for unique operator.
"""
if attrs.return_counts:
return _unique_with_counts_shape(inputs[0])
else:
return _unique_shape(inputs[0])
@script
def _gather_nd_shape(data_shape, indices_shape, batch_dims, index_rank):
ndim = data_shape.shape[0]
# using mdim = indices_shape[0] wouldn't work because a rank cannot
# depend on a runtime shape dimension of indices tensor, even if the
# dimension is always a known, fixed value. As a workaround, we assume that
# the fixed gather dimension (the size of an indexing tuple) is recorded
# in gather_nd op attributes.
mdim = index_rank
kdim = indices_shape.shape[0] - 1
out_shape = output_tensor((kdim + ndim - (mdim + batch_dims),), "int64")
for i in range(1, kdim + 1):
out_shape[i - 1] = indices_shape[i]
for i in range(mdim + batch_dims, ndim):
out_shape[kdim + i - (mdim + batch_dims)] = data_shape[i]
return out_shape
@_reg.register_shape_func("gather_nd", False)
def gather_nd_shape_func(attrs, inputs, _):
"""
Shape func for gather_nd operator.
"""
batch_dims = get_const_int(attrs.batch_dims)
index_rank = get_const_int(attrs.index_rank)
assert index_rank > 0, "index_rank needs to be specified for dynamic gather_nd"
return [_gather_nd_shape(inputs[0], inputs[1], convert(batch_dims), convert(index_rank))]
@script
def _gather_shape(data_shape, indices_shape, axis):
out_shape = output_tensor((data_shape.shape[0],), "int64")
for i in range(data_shape.shape[0]):
if i != axis:
assert (
data_shape[i] == indices_shape[i]
), "data and indices size at non-gather axes must be the same"
out_shape[i] = indices_shape[i]
return out_shape
@_reg.register_shape_func("gather", False)
def gather_shape_func(attrs, inputs, _):
"""
Shape func for gather operator.
"""
return [_gather_shape(inputs[0], inputs[1], attrs.axis)]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/algorithm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Classic algorithm operation"""
from __future__ import absolute_import as _abs
from ..expr import Constant, Expr, TupleWrapper
from . import _make
from .dyn import _make as _dyn_make
def sort(data, axis=-1, is_ascend=1):
"""Performs sorting along the given axis and returns data in sorted order.
Parameters
----------
data : relay.Expr
The input data tensor.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
Returns
-------
out : relay.Expr
Tensor with same shape as data.
"""
return _make.sort(data, axis, is_ascend)
def argsort(data, axis=-1, is_ascend=1, dtype="int32"):
"""Performs sorting along the given axis and returns an array of indices
having same shape as an input array that index data in sorted order.
Parameters
----------
data : relay.Expr
The input data tensor.
valid_count : tvm.te.Tensor
The number of valid elements to be sorted.
axis : int, optional
Axis long which to sort the input tensor.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the output indices.
Returns
-------
out : relay.Expr
Tensor with same shape as data.
"""
return _make.argsort(data, axis, is_ascend, dtype)
def topk(data, k=1, axis=-1, ret_type="both", is_ascend=False, dtype="int32"):
"""Get the top k elements in an input tensor along the given axis.
ret_type specifies the return type, can be one of ("both", "values", "indices").
Parameters
----------
data : relay.Expr
The input data tensor.
k : int or relay.Expr, optional
Number of top elements to select. Return all elements if k < 1.
axis : int, optional
Axis long which to sort the input tensor.
ret_type: str, optional
The return type [both, values, indices].
"both": return both top k data and indices.
"values": return top k data only.
"indices": return top k indices only.
is_ascend : boolean, optional
Whether to sort in ascending or descending order.
dtype : string, optional
The data type of the indices output.
Returns
-------
out : relay.Expr or List[relay.Expr]
The computed result.
"""
if isinstance(k, Constant):
k = k.data.numpy().item()
if isinstance(k, Expr):
out = _dyn_make.topk(data, k, axis, ret_type, is_ascend, dtype)
else:
out = _make.topk(data, k, axis, ret_type, is_ascend, dtype)
if ret_type == "both":
return TupleWrapper(out, 2)
return out
def searchsorted(sorted_sequence, values, right=False, dtype="int32"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : relay.Expr
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : relay.Expr
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : relay.Expr
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
return _make.searchsorted(sorted_sequence, values, right, dtype)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/annotation/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Annotation related operators."""
from __future__ import absolute_import as _abs
from .annotation import *
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/annotation/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.annotation._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/annotation/annotation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Annotation operations."""
from tvm import target
from tvm.runtime import ndarray as _nd
from tvm.runtime import Device as _Device
from . import _make
from .. import op as reg
def _make_virtual_device(device):
if isinstance(device, _Device):
return target.VirtualDevice(device)
if isinstance(device, str):
return target.VirtualDevice(_nd.device(device))
if isinstance(device, target.VirtualDevice):
return device
raise ValueError("expecting a Device or device name, but received a %s" % (type(device)))
def on_device(body, device, constrain_result=False, constrain_body=True):
"""Annotates a body expression with device constraints. The constraint influences
how the body is compiled, where the body is evaluated, and where the result of
evaluation is stored.
Note that the defaults for the constrain_body and constrain_result parameters should
almost never need to be overridden by the user. These parameters are exposed here
to help unit tests exercise the PlanDevices pass machinery.
Parameters
----------
body : tvm.relay.Expr
The expression to be annotated.
device : Union[:py:class:`Device`, str]
The device to annotate with.
constrain_result : bool
If false (the default), the result of the on_device is not constrained to be on device.
constrain_body : bool
If true (the default), the body of the on_device is constrained to be on device.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.OnDevice(body, _make_virtual_device(device), constrain_result, constrain_body)
def function_on_device(function, param_devices, result_device):
"""Annotates a Relay function with the device types on which its parameters and result should
be stored.
Parameters
----------
function : tvm.relay.Function
The function to be annotated.
param_devices : Array[Union[:py:class:`Device`, str]]
The devices for each parameter.
result_device: Union[:py:class:`Device`, str]
The device for the function result.
Returns
-------
result : tvm.relay.Function
The annotated function.
"""
return _make.FunctionOnDevice(
function,
[_make_virtual_device(d) for d in param_devices],
_make_virtual_device(result_device),
)
def stop_fusion(data):
"""Annotate an expression to prevent it being fused with following expressions.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.stop_fusion(data)
def checkpoint(data):
"""Annotate an expression to be a checkpoint for the checkpointing memory optimization.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.checkpoint(data)
reg.register_injective_schedule("annotation.checkpoint")
def compiler_begin(data, compiler):
"""Annotate an expression to indicate that it is the beginning of
a regeion that will be handled by the given compiler.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
compiler : Str
The compiler used to generate code of the annotated region.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.compiler_begin(data, compiler)
def compiler_end(data, compiler):
"""Annotate an expression to indicate that it is the end of a region that
is handled by the provided compiler.
Parameters
----------
data : tvm.relay.Expr
The expression to be annotated.
compiler : Str
The compiler used to generate code of the annotated region.
Returns
-------
result : tvm.relay.Expr
The annotated expression.
"""
return _make.compiler_end(data, compiler)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Contrib modules."""
from .register import get_pattern_table, register_pattern_table
from .arm_compute_lib import *
from .dnnl import *
from .tachikoma import *
from .bnns import *
from .coreml import *
from .ethosn import *
from .libtorch import *
from .tensorrt import *
from .cutlass import *
from .clml import *
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/_ethosn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Expose 'is supported' functions to Python."""
import tvm._ffi
tvm._ffi._init_api("relay.ethos-n.support", __name__)
tvm._ffi._init_api("relay.backend.contrib.ethos-n", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/arm_compute_lib.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, dangerous-default-value
"""Arm Compute Library supported operators."""
import tvm
from tvm import relay
from tvm._ffi import register_func
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.expr import const
from ...dataflow_pattern import is_constant, is_expr, is_op, is_tuple, wildcard
from ..strategy.generic import is_depthwise_conv2d
from .register import register_pattern_table
def is_arm_compute_runtime_enabled():
"""Check if the ACL graph executor is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.op.is_arm_compute_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def partition_for_arm_compute_lib(mod, params=None, disabled_ops=["concatenate"], **opts):
"""Partition the graph greedily offloading supported
operators to Arm Compute Library.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
disabled_ops : Optional[list]
Ops do not want to offload to ACL.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(arm_compute_lib_pattern_table(disabled_ops)),
transform.AnnotateTarget("arm_compute_lib", False),
transform.PartitionGraph(),
]
)
return seq(mod)
@register_func("relay.ext.arm_compute_lib.optimize")
def preprocess_module(mod):
"""
Pre-process a module containing functions ready for ACL codegen. For now we enforce OHWI
kernel layout and fold the transforms away.
Parameters
----------
mod : Module
The module to run passes on.
Returns
-------
preprocessed_mod : The processed module.
"""
def convert_layout_conv2d(conv2d_function):
def convert_conv(attrs, inputs, tinfos, desired_layouts):
new_attrs = dict(attrs)
data_info = tinfos[0]
weight_info = tinfos[1]
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
new_attrs["data_layout"] = desired_data_layout
new_attrs["kernel_layout"] = desired_kernel_layout
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
dkl = desired_kernel_layout
new_attrs["kernel_layout"] = dkl[3] + dkl[1:3] + dkl[0]
return conv2d_function(*inputs, **new_attrs)
return convert_conv
with OpAttrContext(
"nn.conv2d", "FTVMConvertOpLayout", convert_layout_conv2d(tvm.relay.nn.conv2d)
), OpAttrContext(
"qnn.conv2d", "FTVMConvertOpLayout", convert_layout_conv2d(tvm.relay.qnn.op.conv2d)
):
seq = tvm.transform.Sequential(
[
transform.ConvertLayout(
{"nn.conv2d": ["NHWC", "OHWI"], "qnn.conv2d": ["NHWC", "OHWI"]}
),
transform.FoldConstant(),
]
)
preprocessed_mod = seq(mod)
return preprocessed_mod
@register_pattern_table("arm_compute_lib")
def arm_compute_lib_pattern_table(disabled_ops=["concatenate"]):
"""Get the ACL pattern table."""
def conv_pattern():
"""Create a convolution pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("nn.conv2d")(pattern, is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(is_op("nn.relu"))
return pattern
def qnn_conv_pattern():
"""Create a quantized convolution pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("qnn.conv2d")(
pattern, is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(is_op("nn.relu"))
pattern = is_op("qnn.requantize")(
pattern, wildcard(), wildcard(), is_constant(), is_constant()
)
return pattern
def dense_pattern():
"""Create a dense (fully-connected) pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("nn.dense")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
return pattern
def qnn_dense_pattern():
"""Create a quantized dense (fully-connected) pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = is_op("qnn.requantize")(
pattern, wildcard(), wildcard(), is_constant(), is_constant()
)
return pattern
def avg_pool2d_pattern():
"""Creates a pattern that matches either quantized
avg_pool2d or quantized global_avg_pool2d.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern) | is_op("nn.global_avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
return pattern
def l2_pool2d_pattern():
"""Create an l2 pooling pattern from equivalent relay operators.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the convolution pattern.
"""
pattern = is_op("power")(wildcard(), is_expr(const(2.0)))
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("sqrt")(pattern)
return pattern
def concatenate_pattern():
"""Create an concatenate pattern from equivalent relay operators.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the concatenate pattern.
"""
pattern = is_op("concatenate")(is_tuple(None))
return pattern
def check_conv(extract):
"""Check conv pattern is supported by ACL."""
call = extract
while call.op.name != "nn.conv2d":
call = call.args[0]
return conv2d(call)
def check_qnn_conv(extract):
"""Check qnn conv pattern is supported by ACL."""
if extract.attrs.out_dtype != "uint8":
return False
call = extract
while call.op.name != "qnn.conv2d":
call = call.args[0]
return qnn_conv2d(call)
def check_dense(extract):
"""Check conv pattern is supported by ACL."""
call = extract
while call.op.name != "nn.dense":
call = call.args[0]
return dense(call)
def check_qnn_dense(extract):
"""Check qnn conv pattern is supported by ACL."""
if extract.attrs.out_dtype != "uint8":
return False
call = extract
while call.op.name != "qnn.dense":
call = call.args[0]
return qnn_dense(call)
def check_avg_pool2d(extract):
"""Check average pool2d pattern is supported by ACL."""
if extract.attrs.dtype != "uint8":
return False
pool = extract.args[0]
if pool.args[0].attrs.dtype != "int32":
return False
return avg_pool2d(pool, from_quantized_composite=True)
def check_l2_pool2d(extract):
"""Check l2 pool2d pattern is supported by ACL."""
pool = extract.args[0]
return avg_pool2d(pool)
def check_concatenate(expr):
"""Check concatenate pattern is supported by ACL."""
if "concatenate" in disabled_ops:
return False
attrs, type_args = expr.attrs, expr.type_args
for idx in range(len(type_args[0].fields)):
if type_args[0].fields[idx].dtype not in ["float32", "uint8"]:
return False
# ACL concatenate only supports maximum 4 dimensions input tensor
if attrs.axis not in [-4, -3, -2, -1, 0, 1, 2, 3]:
return False
return True
return [
("arm_compute_lib.conv2d", conv_pattern(), check_conv),
("arm_compute_lib.qnn_conv2d", qnn_conv_pattern(), check_qnn_conv),
("arm_compute_lib.dense", dense_pattern(), check_dense),
("arm_compute_lib.qnn_dense", qnn_dense_pattern(), check_qnn_dense),
("arm_compute_lib.qnn_conv2d", qnn_conv_pattern(), check_qnn_conv),
("arm_compute_lib.avg_pool2d", avg_pool2d_pattern(), check_avg_pool2d),
("arm_compute_lib.l2_pool2d", l2_pool2d_pattern(), check_l2_pool2d),
("arm_compute_lib.concatenate", concatenate_pattern(), check_concatenate),
]
def _register_external_op_helper(op_name, supported=True):
@tvm.ir.register_op_attr(op_name, "target.arm_compute_lib")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("reshape")
@tvm.ir.register_op_attr("nn.conv2d", "target.arm_compute_lib")
def conv2d(expr):
"""Check if the external ACL codegen for conv2d should be used."""
attrs, args = expr.attrs, expr.args
if attrs.data_layout != "NHWC":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.shape[0] != 1 or data_typ.dtype != "float32":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype != "float32":
return False
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if is_depthwise:
return depthwise_conv2d(attrs, args)
# ACL doesn't support grouped convolution
if attrs.groups != 1 and not is_depthwise:
return False
return True
def qnn_conv2d(expr):
"""Check if the external ACL codegen for qnn.conv2d should be used."""
attrs, args = expr.attrs, expr.args
if attrs.data_layout != "NHWC":
return False
if attrs.out_dtype != "int32" and attrs.out_dtype != "":
return False
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.shape[0] != 1 or data_typ.dtype != "uint8":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype != "uint8":
return False
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if is_depthwise:
return depthwise_conv2d(attrs, args)
# ACL doesn't support grouped convolution
if attrs.groups != 1 and not is_depthwise:
return False
return True
def depthwise_conv2d(attrs, args):
"""Check if the external ACL codegen for depthwise convolution should be used.
Note
----
Relay does not have a depthwise conv2d operator whilst ACL does. We simply
separate the checks for depthwise for clarity.
"""
kernel_typ = args[1].checked_type
# Only supports 3x3, 5x5 depthwise
if (
kernel_typ.shape[0] not in [3, 5]
or kernel_typ.shape[1] not in [3, 5]
or kernel_typ.shape[0] != kernel_typ.shape[1]
):
return False
# Stride must be (1, 1) or (2, 2)
if (attrs.strides[0], attrs.strides[1]) not in [(1, 1), (2, 2)]:
return False
return True
@tvm.ir.register_op_attr("nn.dense", "target.arm_compute_lib")
def dense(expr):
"""Check if the external ACL codegen for dense should be used."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype != "float32":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype != "float32":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
return True
def qnn_dense(expr):
"""Check if the external ACL codegen for qnn.dense should be used."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype != "uint8":
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype != "uint8":
return False
if attrs.out_dtype != "int32":
return False
return True
def check_dilation(attrs):
"""Prevents offloading if dilation other than (1, 1)"""
if not isinstance(attrs, relay.op.op_attrs.GlobalPool2DAttrs):
if not (len(attrs.dilation) == 2 and attrs.dilation[0] == 1 and attrs.dilation[1] == 1):
return False
return True
@tvm.ir.register_op_attr("nn.max_pool2d", "target.arm_compute_lib")
def max_pool2d(expr):
"""Check if the external ACL codegen for maxpool2d should be used."""
attrs, args = expr.attrs, expr.args
if attrs.layout != "NHWC":
return False
typ = args[0].checked_type
if typ.dtype not in ["float32", "uint8"]:
return False
return check_dilation(attrs)
@tvm.ir.register_op_attr("nn.avg_pool2d", "target.arm_compute_lib")
def avg_pool2d(expr, from_quantized_composite=False):
"""Check if the external ACL codegen for avgpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if from_quantized_composite:
if typ.dtype != "int32":
return False
else:
if typ.dtype not in ["float32"]:
return False
if attrs.layout != "NHWC":
return False
return check_dilation(attrs)
@tvm.ir.register_op_attr("nn.global_max_pool2d", "target.arm_compute_lib")
def global_max_pool2d(expr):
"""Check if the external ACL codegen for gloval_maxpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if typ.dtype not in ["float32", "uint8"]:
return False
if attrs.layout != "NHWC":
return False
return True
@tvm.ir.register_op_attr("nn.global_avg_pool2d", "target.arm_compute_lib")
def global_avg_pool2d(expr):
"""Check if the external ACL codegen for global_avgpool2d should be used."""
attrs, args = expr.attrs, expr.args
typ = args[0].checked_type
if typ.dtype not in ["float32"]:
return False
if attrs.layout != "NHWC":
return False
return True
@tvm.ir.register_op_attr("maximum", "target.arm_compute_lib")
def maximum(expr):
"""Check if the external ACL codegen for maximum should be used."""
args = expr.args
type_a = args[0].checked_type
type_b = args[0].checked_type
return (type_a.dtype == "float32") and (type_b.dtype == "float32")
@tvm.ir.register_op_attr("add", "target.arm_compute_lib")
def add(expr):
"""Check if the external ACL codegen for add should be used."""
args = expr.args
for typ in [args[0].checked_type, args[1].checked_type]:
if typ.dtype != "float32":
return False
return True
@tvm.ir.register_op_attr("qnn.add", "target.arm_compute_lib")
def qnn_add(expr):
"""Check if the external ACL codegen for add should be used."""
args = expr.args
for typ in [args[0].checked_type, args[1].checked_type]:
if typ.dtype != "uint8":
return False
return True
class OpAttrContext(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/bnns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""BNNS library supported operators.
Is a part of Accelerate framework on macOS/iOS platforms. Apple provide several APIs
to handle tensor processing. Particularly:
* BNNS (basic neural )
* vDSP (1D and 2D tensor processing)
"""
import math
import tvm.ir
from tvm.relay import transform
from tvm.relay.expr import const
from tvm.relay.build_module import bind_params_by_name
from .register import register_pattern_table, get_pattern_table
from ...dataflow_pattern import wildcard, is_op, is_expr
def partition_for_bnns(mod, params=None):
"""Partition the graph greedily offloading supported
operators to BNNS.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.DynamicToStatic(),
transform.AlterOpLayout(),
# TODO(apeskov): WA. AlterOpLayout call lead to constants shape transformation
# Some expand_dims op may appears after constants. It breaks BNNS fusing.
# So we have to call FoldConstant right before bnns composite passes.
transform.FoldConstant(),
transform.MergeComposite(get_pattern_table("bnns")),
transform.AnnotateTarget("bnns"),
# If you no need in per layer performance statistic you can
# uncomment next line
# transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
return seq(mod)
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by BNNS.
Parameters
----------
op_name : Str
The name of supported operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by BNNS.
"""
@tvm.ir.register_op_attr(op_name, "target.bnns")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_matmul")
@tvm.ir.register_op_attr("nn.max_pool2d", "target.bnns")
def max_pool2d_check(expr):
"""Check if the nn.max_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.avg_pool2d", "target.bnns")
def avg_pool2d_check(expr):
"""Check if the nn.avg_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.global_max_pool2d", "target.bnns")
def global_max_pool2d_check(expr):
"""Check if the nn.global_max_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
@tvm.ir.register_op_attr("nn.global_avg_pool2d", "target.bnns")
def global_avg_pool2d_check(expr):
"""Check if the nn.global_avg_pool2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if attrs.layout != "NCHW":
return False
return True
def dtype_is_supported(dtype):
"""Check if data type is supported by BNNS backend"""
return dtype in ("", "float32")
@tvm.ir.register_op_attr("nn.conv2d", "target.bnns")
def conv2d_check(expr):
"""Check if the conv2d can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if len(data_typ.shape) != 4 or data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 4 or kernel_typ.dtype != "float32":
return False
if attrs.data_layout != "NCHW":
return False
if not dtype_is_supported(attrs.out_dtype):
return False
return True
def bias_check(expr):
"""Check is bias added through the correct dimension"""
attrs, args = expr.attrs, expr.args
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
if expr.op.name == "nn.bias_add":
return attrs.axis == 1
if expr.op.name == "add":
b_shape = args[1].checked_type.shape
if len(b_shape) == 4:
return bool(b_shape[0] == 1 and b_shape[2] == 1 and b_shape[3] == 1)
if len(b_shape) == 3:
return bool(b_shape[1] == 1 and b_shape[2] == 1)
return False
@tvm.ir.register_op_attr("nn.dense", "target.bnns")
def dense(expr):
"""Check if the dense can be used in BNNS."""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
if data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant):
return False
kernel_typ = args[1].checked_type
if len(kernel_typ.shape) != 2 or kernel_typ.dtype != "float32":
return False
if attrs.out_dtype != "float32" and attrs.out_dtype != "":
return False
return True
def make_conv_pattern(with_bias=True, activation="none"):
"""Make pattern for bnns.conv2d primitive"""
data = wildcard()
weight = wildcard()
bias = wildcard()
pat = is_op("nn.conv2d")(data, weight)
if with_bias:
pat = is_op("add")(pat, bias) | is_op("nn.bias_add")(pat, bias)
if activation == "relu":
pat = is_op("nn.relu")(pat)
elif activation == "sigmoid":
pat = is_op("sigmoid")(pat)
return pat
def check_conv(extract):
"""Check conv pattern is supported by BNNS."""
bias_is_ok = True
call = extract
while call.op.name != "nn.conv2d":
if call.op.name in ("nn.bias_add", "add"):
bias_is_ok &= bias_check(call)
call = call.args[0]
return conv2d_check(call) and bias_is_ok
def make_dense_bias_pattern():
"""Make pattern for bnns.dense primitive"""
data = wildcard()
weight = wildcard()
bias = wildcard()
d = is_op("nn.dense")(data, weight)
return is_op("add")(d, bias)
def make_dense_bias_gelu_pattern():
"""Make pattern for bnns.dense primitive with fused bias and gelu activation"""
dense_bias = make_dense_bias_pattern()
const1 = is_expr(const(0.044715))
const2 = is_expr(const(math.sqrt(2 / math.pi)))
gelu = is_op("power")(dense_bias, is_expr(const(3, dtype="float32")))
gelu = is_op("multiply")(gelu, const1)
gelu = is_op("add")(gelu, dense_bias)
gelu = is_op("multiply")(gelu, const2)
gelu = is_op("tanh")(gelu)
gelu = is_op("add")(gelu, is_expr(const(1, dtype="float32")))
gelu = is_op("multiply")(gelu, is_expr(const(0.5)))
gelu = is_op("multiply")(gelu, dense_bias)
return gelu
def check_dense(extract):
"""Check dense pattern is supported by BNNS."""
call = extract
while call.op.name != "nn.dense":
call = call.args[0]
return dense(call)
@tvm.ir.register_op_attr("nn.instance_norm", "target.bnns")
def instance_norm_check(expr):
"""Check if the nn.instance_norm can be executed in BNNS"""
attrs, args = expr.attrs, expr.args
data_typ = args[0].checked_type
rank = len(data_typ.shape)
if rank < 3 or rank > 4 or data_typ.dtype != "float32":
return False
if not isinstance(args[1], tvm.relay.expr.Constant) or not isinstance(
args[2], tvm.relay.expr.Constant
):
return False
if attrs.axis == 0 and rank == 3 or attrs.axis == 1 and rank == 4:
return True
return False
@register_pattern_table("bnns")
def pattern_table():
"""Get BNNS specific fusing patterns collection"""
conv2d_bias_pat = (
"bnns.conv2d_bias",
make_conv_pattern(with_bias=True),
check_conv,
)
conv2d_bias_relu_pat = (
"bnns.conv2d_bias_relu",
make_conv_pattern(with_bias=True, activation="relu"),
check_conv,
)
conv2d_relu_pat = (
"bnns.conv2d_relu",
make_conv_pattern(with_bias=False, activation="relu"),
check_conv,
)
conv2d_bias_sigmoid_pat = (
"bnns.conv2d_bias_sigmoid",
make_conv_pattern(with_bias=True, activation="sigmoid"),
check_conv,
)
conv2d_sigmoid_pat = (
"bnns.conv2d_sigmoid",
make_conv_pattern(with_bias=False, activation="sigmoid"),
check_conv,
)
dense_bias_gelu = ("bnns.dense_bias_gelu", make_dense_bias_gelu_pattern(), check_dense)
dense_bias = ("bnns.dense_bias", make_dense_bias_pattern(), check_dense)
bnns_patterns = [
conv2d_bias_relu_pat,
conv2d_relu_pat,
conv2d_bias_sigmoid_pat,
conv2d_sigmoid_pat,
conv2d_bias_pat,
dense_bias_gelu,
dense_bias,
]
return bnns_patterns
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/clml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""CLML Library supported operators."""
import tvm
from tvm import relay
from tvm._ffi import register_func
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ...dataflow_pattern import wildcard, is_op, is_constant, is_tuple_get_item, is_tuple
from .register import register_pattern_table
from ..strategy.generic import is_depthwise_conv2d
def is_clml_runtime_enabled():
"""Check if the CLML graph runtime is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.op.is_clml_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def partition_for_clml(mod, params=None):
"""Partition the graph greedily offloading supported
operators to CLML Library.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.FoldConstant(),
transform.MergeComposite(clml_pattern_table()),
transform.AnnotateTarget("clml", False),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
result_mod = seq(mod)
return result_mod
@register_func("relay.ext.clml.optimize")
def preprocess_module(mod):
"""
Pre-process a module containing functions ready for CLML codegen. For now we enforce OIHW
kernel layout and fold the transforms away.
Parameters
----------
mod : Module
The module to run passes on.
Returns
-------
preprocessed_mod : The processed module.
"""
def convert_layout_conv2d(conv2d_function):
def convert_conv(attrs, inputs, tinfos, desired_layouts):
new_attrs = dict(attrs)
data_info = tinfos[0]
weight_info = tinfos[1]
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
new_attrs["data_layout"] = desired_data_layout
new_attrs["kernel_layout"] = desired_kernel_layout
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
dkl = desired_kernel_layout
new_attrs["kernel_layout"] = dkl[1] + dkl[0] + dkl[2] + dkl[3]
return conv2d_function(*inputs, **new_attrs)
return convert_conv
with OpAttrContext(
"nn.conv2d", "FTVMConvertOpLayout", convert_layout_conv2d(tvm.relay.nn.conv2d)
):
seq = tvm.transform.Sequential(
[
transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"]}),
transform.FoldConstant(),
]
)
preprocessed_mod = seq(mod)
return preprocessed_mod
@register_pattern_table("clml")
def clml_pattern_table():
"""Get the CLML pattern table."""
def conv_pattern():
"""Create a convolution pattern."""
pattern = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = pattern.optional(lambda x: is_op("add")(x, is_constant()))
pattern = pattern.optional(
lambda x: is_op("nn.batch_norm")(
x, is_constant(), is_constant(), is_constant(), is_constant()
)
)
pattern = pattern.optional(is_tuple_get_item)
pattern = pattern.optional(is_op("nn.relu"))
pattern = pattern.optional(is_op("clip"))
return pattern
def batch_norm_pattern():
"""Create a batch norm pattern."""
pattern = is_op("nn.batch_norm")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_tuple_get_item(pattern)
return pattern
def concat_pattern():
"""Create a concat pattern.
Returns
-------
pattern : dataflow_pattern.AltPattern
Denotes the concat pattern.
"""
pattern = is_tuple(None)
pattern = is_op("concatenate")(pattern)
return pattern
def dense_pattern():
"""Create a dense pattern."""
pattern = is_op("nn.dense")(wildcard(), is_constant())
pattern = pattern.optional(lambda x: is_op("add")(x, is_constant()))
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
return pattern
def pad_pattern():
"""Create a pad pattern."""
pattern = is_op("nn.pad")(wildcard(), wildcard())
return pattern
def check_conv(extract):
"""Check conv pattern is supported by CLML."""
call = extract
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "nn.relu":
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
elif call.op.name == "clip":
if call.attrs["a_min"] != 0.0 or call.attrs["a_max"] != 6.0:
return False
call = call.args[0]
if isinstance(call, tvm.relay.expr.TupleGetItem):
call = call.tuple_value
while call.op.name != "nn.conv2d":
call = call.args[0]
attrs, args = call.attrs, call.args
if attrs.data_layout != "NCHW":
return False
data_typ = args[0].checked_type
kernel_typ = args[1].checked_type
is_depthwise = is_depthwise_conv2d(
data_typ.shape,
attrs["data_layout"],
kernel_typ.shape,
attrs["kernel_layout"],
attrs["groups"],
)
if attrs.groups != 1 and not is_depthwise:
return False
return True
return [
("clml.conv2d", conv_pattern(), check_conv),
("clml.dense", dense_pattern()),
("clml.pad", pad_pattern()),
("clml.concat", concat_pattern()),
("clml.batch_norm", batch_norm_pattern()),
]
def _register_external_op_helper(op_name, supported=True):
@tvm.ir.register_op_attr(op_name, "target.clml")
def _func_wrapper(expr):
return supported
return _func_wrapper
_register_external_op_helper("clip")
_register_external_op_helper("nn.relu")
_register_external_op_helper("nn.global_avg_pool2d")
_register_external_op_helper("nn.global_max_pool2d")
_register_external_op_helper("nn.avg_pool2d")
_register_external_op_helper("nn.max_pool2d")
_register_external_op_helper("nn.softmax")
_register_external_op_helper("reshape")
_register_external_op_helper("add")
_register_external_op_helper("subtract")
_register_external_op_helper("multiply")
_register_external_op_helper("minimum")
_register_external_op_helper("maximum")
class OpAttrContext(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/cmsisnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Arm(R) CMSIS-NN supported operators for Cortex-M."""
import tvm.ir
from tvm.target import Target
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ...dataflow_pattern import is_constant, is_op, wildcard
from .register import register_pattern_table
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
def enabled():
return "cmsis-nn" in Target.list_kinds()
def partition_for_cmsisnn(mod, params=None, mod_name="default", **opts):
"""Partition the graph greedily offloading supported
operators on Cortex-M using CMSIS-NN
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
mod_name: str, optional
The module name
Returns
-------
ret : Module
annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cmsis-nn"),
transform.PartitionGraph(mod_name=mod_name),
GenerateCMSISNNConstants(),
CMSISNNFusePads(),
ScalarToTensorConstants(),
ExtractConstantsFromPartitionedFunction(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cmsis-nn")
def pattern_table():
"""Get the CMSIS-NN compiler pattern table."""
def qnn_softmax_pattern():
"""Create pattern for quantized softmax"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("nn.softmax")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def check_qnn_softmax(pattern):
"""Check if softmax is supported by CMSIS-NN."""
dequantize_call = pattern.args[0].args[0]
scale = pattern.args[1].data.numpy().item(0)
zero_point = pattern.args[2].data.numpy().item(0)
# check for dtypes of quantize and dequantize
return (
(scale == 1.0 / 256 and zero_point == -128)
and pattern.attrs.out_dtype == "int8"
and dequantize_call.args[0].checked_type.dtype == "int8"
)
def qnn_conv2d_pattern(with_pad):
"""Create pattern for qnn.conv2D with optional pad and/or optional fused relu."""
conv2d_input = wildcard()
if with_pad:
conv2d_input = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
conv2d_input,
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
qnn_conv2d | bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def check_qnn_conv2d(pattern):
"""Check if the Conv2D is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
bias_add = None
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
conv2d = bias_add.args[0]
else:
conv2d = requantize_input
conv2d_input = conv2d.args[0]
conv2d_weight = conv2d.args[1]
# check if depthwise Conv2D
kernel_layout = conv2d.attrs.kernel_layout
pos_o = kernel_layout.index("O")
groups = conv2d.attrs.groups
is_depthwise = False
if groups == int(conv2d_input.checked_type.shape[3]) and groups == int(
conv2d_weight.checked_type.shape[pos_o]
):
is_depthwise = True
# check if dtypes are supported for the following entities
# (input_dtype, weight_dtype, bias_dtype, out_dtype, pattern_dtype)
are_dtypes_valid = False
conv2d_input_dtype = conv2d_input.checked_type.dtype
if bias_add:
bias_dtype = bias_add.args[1].checked_type.dtype
else:
# this is only to enable to following check that validates all sorts of dtypes
bias_dtype = "int32" if conv2d_input_dtype == "int8" else "int64"
valid_dtypes = None
if conv2d_input_dtype == "int8":
valid_dtypes = ("int8", "int8", "int32", "int32", "int8")
elif conv2d_input_dtype == "int16":
valid_dtypes = ("int16", "int8", "int64", "int64", "int16")
if (
conv2d_input_dtype,
conv2d_weight.checked_type.dtype,
bias_dtype,
conv2d.attrs.out_dtype,
pattern.checked_type.dtype,
) == valid_dtypes:
are_dtypes_valid = True
# input_zero_point should be 0 when int16
valid_input_zp = True
if conv2d_input_dtype == "int16" and conv2d.args[2].data.numpy().item(0) != 0:
valid_input_zp = False
# kernel zero_point should be 0
kernel_zp = conv2d.args[3].data.numpy()
kernel_zp = [kernel_zp] if kernel_zp.ndim == 0 else kernel_zp
# combination of all checks to decide if pattern is eligible for partitioning
ret = (
are_dtypes_valid
and valid_input_zp
and all([zp == 0 for zp in kernel_zp])
and (not is_depthwise or bias_add is not None)
)
return ret
def check_qnn_conv2d_pad(pattern):
"""Check if the Pad followed by Conv2D is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
conv2d = bias_add.args[0]
else:
conv2d = requantize_input
conv2d_input = conv2d.args[0]
# check if sum of paddings from pad() and conv2d() satisfies CMSIS-NN constraints
can_pad_be_fused = True
if isinstance(conv2d_input, tvm.relay.expr.Call) and str(conv2d_input.op.name) == "nn.pad":
pad_top, pad_left, pad_bottom, pad_right = GetEffectiveConv2DPadding(
conv2d, conv2d_input
)
# check if difference in the side paddings is 1 along each dimension
pad_w_diff = int(pad_right - pad_left)
pad_h_diff = int(pad_bottom - pad_top)
can_pad_be_fused = pad_w_diff in [0, 1] and pad_h_diff in [0, 1]
ret = check_qnn_conv2d(pattern) and can_pad_be_fused
return ret
def qnn_fully_connected_pattern():
"""Create pattern for qnn.dense with optional Relu."""
qnn_fc = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
bias_add = is_op("nn.bias_add")(qnn_fc, is_constant())
req = is_op("qnn.requantize")(
qnn_fc | bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def check_qnn_fully_connected(pattern):
"""Check if the fully connected is supported by CMSIS-NN."""
if str(pattern.op.name) == "clip":
relu = pattern
requantize = relu.args[0]
else:
requantize = pattern
requantize_input = requantize.args[0]
bias_add = None
bias_dtype = "int32"
if str(requantize_input.op.name) == "nn.bias_add":
bias_add = requantize_input
fc = bias_add.args[0]
bias_dtype = bias_add.args[1].checked_type.dtype
else:
fc = requantize_input
fc_input = fc.args[0]
fc_weight = fc.args[1]
# kernel zero_point should be 0
kernel_zp = fc.args[3].data.numpy().item(0)
return (
fc.attrs.out_dtype == "int32"
and fc_input.checked_type.dtype == "int8"
and fc_weight.checked_type.dtype == "int8"
and pattern.checked_type.dtype == "int8"
and bias_dtype == "int32"
and kernel_zp == 0
)
def qnn_avg_pool2d_pattern():
"""Matches average pooling with optional Relu"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
pattern = pattern.optional(is_op("clip"))
return pattern
def check_qnn_avg_pool2d(pattern):
"""Check if avg pool2d is supported by CMSIS-NN."""
output = pattern
if str(pattern.op.name) == "clip":
pooling = pattern.args[0].args[0]
else:
pooling = pattern.args[0]
input_op = pooling.args[0].args[0]
return (
pooling.attrs.layout == "NHWC"
and int(input_op.checked_type.shape[0]) == 1
and input_op.checked_type.dtype == "int8"
and output.checked_type.dtype == "int8"
)
def qnn_max_pool2d_pattern():
"""Matches max pool2d with optional Relu"""
pattern = is_op("nn.max_pool2d")(wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
def check_qnn_max_pool2d(pattern):
"""Check if max pool2d is supported by CMSIS-NN."""
output = pattern
if str(pattern.op.name) == "clip":
pooling = pattern.args[0]
else:
pooling = pattern
input_op = pooling.args[0]
return (
pooling.attrs.layout == "NHWC"
and int(input_op.checked_type.shape[0]) == 1
and input_op.checked_type.dtype == "int8"
and output.checked_type.dtype == "int8"
)
def binary_op_pattern(op):
"""Matches QNN binary operation"""
pattern = is_op(f"qnn.{op}")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
return pattern.optional(is_op("clip"))
def check_qnn_binary_op(pattern):
"""Check if binary op is supported by CMSIS-NN."""
binary_op = pattern
if str(pattern.op.name) == "clip":
binary_op = pattern.args[0]
arg0 = binary_op.args[0]
arg1 = binary_op.args[1]
both_args_scalar = False
if (
isinstance(arg0, tvm.relay.expr.Constant)
and len(arg0.checked_type.shape) == 0
and isinstance(arg1, tvm.relay.expr.Constant)
and len(arg1.checked_type.shape) == 0
):
both_args_scalar = True
return (
arg0.checked_type.dtype == "int8"
and arg1.checked_type.dtype == "int8"
and not both_args_scalar
)
return [
("cmsis-nn.qnn_conv2d", qnn_conv2d_pattern(with_pad=True), check_qnn_conv2d_pad),
("cmsis-nn.qnn_conv2d", qnn_conv2d_pattern(with_pad=False), check_qnn_conv2d),
("cmsis-nn.qnn_fully_connected", qnn_fully_connected_pattern(), check_qnn_fully_connected),
("cmsis-nn.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_qnn_avg_pool2d),
("cmsis-nn.qnn_max_pool2d", qnn_max_pool2d_pattern(), check_qnn_max_pool2d),
("cmsis-nn.qnn_mul", binary_op_pattern("mul"), check_qnn_binary_op),
("cmsis-nn.qnn_add", binary_op_pattern("add"), check_qnn_binary_op),
("cmsis-nn.qnn_softmax", qnn_softmax_pattern(), check_qnn_softmax),
]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/coreml.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""CoreML codegen supported operators."""
import tvm.ir
from tvm.contrib.target.coreml import _convert_map
from ...expr import Constant
def _register_coreml_op(op_name):
"""Register a function to check the given operator is supported by Core ML.
Paramters
---------
op_name : Str
The name of operator that will be registered.
"""
def _check_supported(expr):
attrs, args = expr.attrs, expr.args
if op_name == "nn.conv2d":
if not isinstance(args[1], Constant):
return False
if attrs["kernel_layout"] not in ["HWIO", "OIHW"]:
return False
return True
tvm.ir.register_op_attr(op_name, "target.coremlcompiler", _check_supported)
for op in _convert_map:
_register_coreml_op(op)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/cublas.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""cuBLAS Relay integration."""
from typing import Callable, List, Tuple, Dict, Optional
import tvm
import tvm.ir
from tvm import relay
from tvm import te
from tvm.relay import transform
from tvm.contrib import cublas
from ...dataflow_pattern import is_op, wildcard
from .te_target import lower_composite, relay_to_runtime
from .register import register_pattern_table
tvm._ffi.register_func("relay.ext.cublas", relay_to_runtime(tvm.target.cuda()))
def partition_for_cublas(
mod: tvm.IRModule, params: Optional[Dict[str, tvm.runtime.NDArray]] = None
) -> tvm.IRModule:
"""Partition the graph to offload for cuBLAS.
Parameters
----------
mod : tvm.IRModule
The module to partition.
params : Optional[Dict[str, tvm.runtime.NDArray]]
Constant input parameters.
Returns
-------
tvm.IRModule
The partitioned module.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cublas"),
transform.PartitionGraph(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cublas")
def pattern_table() -> List[Tuple[str, relay.Pattern, Callable[[relay.Call], bool]]]:
"""Get the cuBLAS pattern table."""
def matmul_pattern() -> relay.Pattern:
"""Create pattern for matmul."""
return is_op("nn.matmul")(wildcard(), wildcard())
def batch_matmul_pattern() -> relay.Pattern:
"""Create pattern for batch_matmul."""
return is_op("nn.batch_matmul")(wildcard(), wildcard())
def dense_pattern() -> relay.Pattern:
"""Create pattern for dense."""
return is_op("nn.dense")(wildcard(), wildcard())
def check_matmul_like(matched: relay.Call) -> bool:
"""Check if matmul is supported by cuBLAS."""
# Input data types can't be mixed
if matched.args[0].checked_type.dtype != matched.args[1].checked_type.dtype:
return False
in_dtype = matched.args[0].checked_type.dtype
out_dtype = matched.checked_type.dtype
# Only the following data type combinations are supported
if (in_dtype, out_dtype) not in [
("float32", "float32"),
("float16", "float16"),
("float16", "float32"),
("int8", "int32"),
("float64", "float64"),
("int8", "float32"),
]:
return False
# If inputs are int8, input column strides must be a multiple of 4
if in_dtype == "int8":
if (
matched.args[0].checked_type.shape[-1] % 4 != 0
or matched.args[1].checked_type.shape[-1] % 4 != 0
):
return False
return True
return [
("cublas.matmul", matmul_pattern(), check_matmul_like),
("cublas.batch_matmul", batch_matmul_pattern(), check_matmul_like),
("cublas.dense", dense_pattern(), check_matmul_like),
]
@lower_composite("cublas.matmul")
def _lower_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a matmul using cuBLAS."""
return cublas.matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
)
@lower_composite("cublas.batch_matmul")
def _lower_batch_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a batch_matmul using cuBLAS."""
return cublas.batch_matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
)
@lower_composite("cublas.dense")
def _lower_dense(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a dense using cuBLAS."""
return cublas.matmul(
inputs[0], inputs[1], transa=False, transb=True, dtype=op.checked_type.dtype
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/cudnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""cuDNN Relay integration."""
from typing import Callable, List, Tuple
import tvm
import tvm.ir
from tvm import relay
from tvm import te
from tvm.relay import transform
from tvm.contrib import cudnn
from ...dataflow_pattern import is_op, wildcard
from .te_target import lower_composite, relay_to_runtime
from .register import register_pattern_table
tvm._ffi.register_func("relay.ext.cudnn", relay_to_runtime(tvm.target.cuda()))
def partition_for_cudnn(mod: tvm.IRModule) -> tvm.IRModule:
"""Partition the graph to offload for cuDNN.
Parameters
----------
mod : tvm.IRModule
The module to partition.
Returns
-------
tvm.IRModule
The partitioned module.
"""
seq = tvm.transform.Sequential(
[
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("cudnn"),
transform.PartitionGraph(),
transform.InferType(),
]
)
return seq(mod)
@register_pattern_table("cudnn")
def pattern_table() -> List[Tuple[str, relay.Pattern, Callable[[relay.Call], bool]]]:
"""Get the cuDNN pattern table."""
def softmax_pattern() -> relay.Pattern:
"""Create pattern for softmax."""
return is_op("nn.softmax")(wildcard())
def log_softmax_pattern() -> relay.Pattern:
"""Create pattern for log_softmax."""
return is_op("nn.log_softmax")(wildcard())
def conv2d_pattern() -> relay.Pattern:
"""Create pattern for conv2d."""
return is_op("nn.conv2d")(wildcard(), wildcard())
def conv2d_bias_act_pattern() -> relay.Pattern:
"""Create pattern for fused conv2d+bias+activation."""
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
return bias.optional(is_op("nn.relu"))
def check_softmax(matched: relay.Call) -> bool:
"""Check if softmax is supported by cuDNN."""
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
return True
def check_log_softmax(matched: relay.Call) -> bool:
"""Check if log_softmax is supported by cuDNN."""
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
if len(matched.args[0].checked_type.shape) != 2:
return False
if matched.attrs["axis"] not in (1, -1):
return False
return True
def check_conv2d(matched: relay.Call) -> bool:
if matched.args[0].checked_type.dtype not in ["float64", "float32", "float16"]:
return False
if matched.attrs["data_layout"] != "NCHW" or matched.attrs["kernel_layout"] != "OIHW":
return False
padding = matched.attrs["padding"]
if padding[0] != padding[2] or padding[1] != padding[3]:
return False
return True
def check_conv2d_bias_act(matched: relay.Call) -> bool:
return True
return [
("cudnn.softmax", softmax_pattern(), check_softmax),
("cudnn.log_softmax", log_softmax_pattern(), check_log_softmax),
("cudnn.conv2d_bias_act", conv2d_bias_act_pattern(), check_conv2d_bias_act),
("cudnn.conv2d", conv2d_pattern(), check_conv2d),
]
@lower_composite("cudnn.softmax")
def _lower_softmax(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a softmax using cuDNN."""
return cudnn.softmax(inputs[0], axis=op.attrs["axis"])
@lower_composite("cudnn.log_softmax")
def _lower_log_softmax(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a log_softmax using cuDNN."""
return cudnn.log_softmax(inputs[0], axis=op.attrs["axis"])
@lower_composite("cudnn.conv2d_bias_act")
def _lower_conv2d_bias_act(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a fused conv2d+bias+activation using cuDNN."""
conv_dtype = op.checked_type.dtype
if op.op.name == "nn.relu":
activation_mode = 1 # Relu
conv2d = op.args[0].args[0]
else:
activation_mode = 5 # Identity
conv2d = op.args[0]
conv_mode = 1
tensor_format = 0
algo = 1
pad = conv2d.attrs["padding"]
strides = conv2d.attrs["strides"]
dilation = conv2d.attrs["dilation"]
groups = conv2d.attrs["groups"]
oshape = cudnn.conv_output_shape(
tensor_format,
pad,
strides,
dilation,
inputs[0].shape,
inputs[1].shape,
inputs[0].dtype,
conv_dtype,
groups,
)
return te.extern(
oshape,
inputs,
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cudnn.conv2d+bias+act.forward",
conv_mode,
tensor_format,
algo,
pad[0],
pad[1],
strides[0],
strides[1],
dilation[0],
dilation[1],
activation_mode,
0,
ins[0],
ins[1],
ins[2],
outs[0],
conv_dtype,
groups,
),
name="y",
)
@lower_composite("cudnn.conv2d")
def _lower_conv2d(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a conv2d using cuDNN."""
return cudnn.conv_forward(
inputs[0],
inputs[1],
pad=op.attrs["padding"],
stride=op.attrs["strides"],
dilation=op.attrs["dilation"],
conv_mode=1,
tensor_format=0,
algo=1,
conv_dtype=op.checked_type.dtype,
groups=op.attrs["groups"],
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/cutlass.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Patterns supported CUTLASS."""
from functools import partial
from tvm import relay
from tvm.ir.transform import Sequential, PassContext
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
from ...dataflow_pattern import wildcard, is_op, is_constant
def make_gelu_pattern(bias_out, out_dtype="float16"):
mul = is_op("multiply")(bias_out, is_constant() | wildcard())
if out_dtype == "float16":
erf = is_op("cast")(is_op("erf")(is_op("cast")(mul)))
else:
erf = is_op("erf")(mul)
mul_half = is_op("multiply")(erf, is_constant() | wildcard())
add = is_op("add")(mul_half, is_constant() | wildcard())
return is_op("multiply")(add, bias_out)
def make_gemm_pattern(with_bias=True, with_act=None, out_dtype="float16"):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
gemm = is_op("nn.dense")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
gemm_out = add_or_bias_add(gemm, bias)
else:
gemm_out = gemm
if with_act is None:
return gemm_out
if isinstance(with_act, str) and with_act == "relu":
return is_op("nn.relu")(gemm_out)
assert isinstance(with_act, str) and with_act == "gelu"
return make_gelu_pattern(gemm_out, out_dtype)
def make_batch_matmul_pattern():
return is_op("nn.batch_matmul")(wildcard(), wildcard())
def make_conv2d_pattern(with_bias=False, with_act=None):
"""Create a pattern for dense op followed by activations."""
data = wildcard()
weight = wildcard()
bias = wildcard()
conv2d = is_op("nn.conv2d")(data, weight)
if with_bias:
add_or_bias_add = is_op("add") | is_op("nn.bias_add")
conv2d_out = add_or_bias_add(conv2d, bias)
else:
conv2d_out = conv2d
if with_act is not None:
if with_act == "relu":
return is_op("nn.relu")(conv2d_out)
if with_act == "sigmoid":
return is_op("sigmoid")(conv2d_out)
if with_act == "silu":
return is_op("multiply")(conv2d_out, is_op("sigmoid")(conv2d_out))
if with_act == "hardswish":
rhs = is_op("divide")(
is_op("clip")(is_op("add")(conv2d_out, is_constant())), is_constant()
)
return is_op("multiply")(conv2d_out, rhs)
raise ValueError("Unknown activation %s." % with_act)
return conv2d_out
def make_conv2d_transpose_pattern():
return is_op("nn.conv2d_transpose")(wildcard(), wildcard())
def make_conv2d_backward_weight_pattern():
return is_op("nn.conv2d_backward_weight")(wildcard(), wildcard())
def make_residual_block_pattern(tensor_op_out, binary_op="add", with_act="relu"):
"""Add pattern for residual blocks."""
residual_input = wildcard()
binary_out = is_op(binary_op)(tensor_op_out, residual_input) | is_op(binary_op)(
residual_input, tensor_op_out
)
if with_act is not None and with_act == "relu":
return is_op("nn.relu")(binary_out)
return binary_out
def check_dtype(lhs, rhs):
"""Check if dtypes in the given workload are supported by CUTLASS."""
return (
(lhs.dtype == "float16" and rhs.dtype == "float16")
or (lhs.dtype == "float32" and rhs.dtype == "float32")
or (lhs.dtype in ["int8", "uint8"] and rhs.dtype in ["int8", "uint8"])
)
def get_root_call(call, root_op_name):
if not isinstance(call, relay.Call):
return None
if str(call.op) == root_op_name:
return call
return get_root_call(call.args[0], root_op_name)
def check_gemm(call):
"""Check if the given dense workload can be offloaded to CUTLASS."""
dense = get_root_call(call, "nn.dense")
lhs = dense.args[0].checked_type
rhs = dense.args[1].checked_type
return check_dtype(lhs, rhs)
def check_batch_matmul(call):
"""Check if the given batch_matmul workload can be offloaded to CUTLASS."""
batch_matmul = get_root_call(call, "nn.batch_matmul")
lhs = batch_matmul.args[0].checked_type
rhs = batch_matmul.args[1].checked_type
transpose_a = batch_matmul.attrs.transpose_a
transpose_b = batch_matmul.attrs.transpose_b
return check_dtype(lhs, rhs) and not transpose_a and transpose_b
def is_depthwise_conv2d(ic, oc, groups):
return ic == oc == groups
def check_conv2d_common(op_name, expected_kernel_layout, call):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, op_name)
data_layout = conv2d.attrs.data_layout
kernel_layout = conv2d.attrs.kernel_layout
data = conv2d.args[0].checked_type
weight = conv2d.args[1].checked_type
if (
data_layout != "NHWC"
or kernel_layout != expected_kernel_layout
or not check_dtype(data, weight)
):
return False
IC = data.shape[3]
OC = weight.shape[0]
return not is_depthwise_conv2d(IC, OC, conv2d.attrs.groups)
def check_conv2d(call):
return check_conv2d_common("nn.conv2d", "OHWI", call)
def check_conv2d_transpose(call):
# conv2d_transpose is implemented as dgrad, needs to swap the roles of C and K
return check_conv2d_common("nn.conv2d_transpose", "IHWO", call)
def check_conv2d_backward_weight(call):
return check_conv2d_common("nn.conv2d_backward_weight", "NHWC", call)
def check_conv2d_residual(call, binary_op):
"""Check if the given conv2d workload can be offloaded to CUTLASS."""
conv2d = get_root_call(call, "nn.conv2d")
if not check_conv2d(call):
return False
residual_binop = get_root_call(call, binary_op)
lhs = residual_binop.args[0]
rhs = residual_binop.args[1]
# residual_input is pattern-matched as a wildcard. Make sure it does not sit between
# residual binary op and the root conv2d of this pattern.
# If the root conv2d is the parent of both lhs and rhs, we should reject this pattern.
if get_root_call(lhs, "nn.conv2d") == conv2d and get_root_call(rhs, "nn.conv2d") == conv2d:
return False
return all(x == y for (x, y) in zip(lhs.checked_type.shape, rhs.checked_type.shape))
@register_pattern_table("cutlass")
def pattern_table():
"""Returns list of triples describing the name, dataflow pattern and predicate for all
the CUTLASS-supported operators."""
dense_pat = ("cutlass.dense", make_gemm_pattern(False, None), check_gemm)
dense_bias_pat = ("cutlass.dense_bias", make_gemm_pattern(True, None), check_gemm)
dense_bias_relu_pat = ("cutlass.dense_bias_relu", make_gemm_pattern(True, "relu"), check_gemm)
dense_bias_gelu_fp16_pat = (
"cutlass.dense_bias_gelu_fp16",
make_gemm_pattern(True, "gelu"),
check_gemm,
)
dense_bias_gelu_fp32_pat = (
"cutlass.dense_bias_gelu_fp32",
make_gemm_pattern(True, "gelu", out_dtype="float32"),
check_gemm,
)
dense_patterns = [
dense_bias_gelu_fp16_pat,
dense_bias_gelu_fp32_pat,
dense_bias_relu_pat,
dense_bias_pat,
dense_pat,
("cutlass.batch_matmul", make_batch_matmul_pattern(), check_batch_matmul),
]
conv2d_patterns = [
(
"cutlass.conv2d_bias_hardswish",
make_conv2d_pattern(with_bias=True, with_act="hardswish"),
check_conv2d,
),
(
"cutlass.conv2d_bias_silu",
make_conv2d_pattern(with_bias=True, with_act="silu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_relu",
make_conv2d_pattern(with_bias=True, with_act="relu"),
check_conv2d,
),
(
"cutlass.conv2d_bias_sigmoid",
make_conv2d_pattern(with_bias=True, with_act="sigmoid"),
check_conv2d,
),
("cutlass.conv2d_bias", make_conv2d_pattern(with_bias=True), check_conv2d),
("cutlass.conv2d", make_conv2d_pattern(), check_conv2d),
]
# For now, no fusion for grad kernels
conv2d_grad_patterns = [
("cutlass.conv2d_transpose", make_conv2d_transpose_pattern(), check_conv2d_transpose),
(
"cutlass.conv2d_backward_weight",
make_conv2d_backward_weight_pattern(),
check_conv2d_backward_weight,
),
]
residual_block_patterns = []
for with_act, postfix in [("relu", "_relu"), (None, "")]:
for name, pat, _ in conv2d_patterns[:-1]:
for bin_op in ["add", "multiply"]:
residual_block_patterns.append(
(
name + "_residual_" + bin_op + postfix,
make_residual_block_pattern(pat, bin_op, with_act=with_act),
partial(check_conv2d_residual, binary_op=bin_op),
)
)
return residual_block_patterns + dense_patterns + conv2d_patterns + conv2d_grad_patterns
def partition_for_cutlass(mod, params=None):
"""Partition the input module into CUTLASS-supported subgraphs."""
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
with PassContext(opt_level=3):
mod = remove_bn_pass(mod)
cutlass_patterns = relay.op.contrib.get_pattern_table("cutlass")
seq = Sequential(
[
transform.InferType(),
transform.MergeComposite(cutlass_patterns),
transform.AnnotateTarget(["cutlass"], include_non_call_ops=False),
transform.PartitionGraph(bind_constants=False),
]
)
return seq(mod)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/cvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import logging
import tvm
from ... import dataflow_pattern as dp
from ...dataflow_pattern import wildcard, is_op, is_constant, is_expr, rewrite, DFPatternCallback
from .register import register_pattern_table
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by CVMRuntime.
Parameters
----------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by DNNL.
"""
@tvm.ir.register_op_attr(op_name, "target.cvm")
def _func_wrapper(expr: tvm.relay.expr.ExprWithOp):
# DNNL does not support pooling with ceil_mode = True.
if "pool" in op_name:
attrs = dict(get_attrs(expr))
if "ceil_mode" in attrs.keys() and attrs["ceil_mode"]:
return False
return supported
return _func_wrapper
_register_external_op_helper("nn.conv2d")
_register_external_op_helper("nn.dense")
_register_external_op_helper("nn.max_pool2d")
_register_external_op_helper("nn.relu")
_register_external_op_helper("abs")
_register_external_op_helper("clip")
_register_external_op_helper("add")
_register_external_op_helper("bias_add")
_register_external_op_helper("multiply")
def make_nn_pattern(op_name, with_bias=False):
pat = op_name.replace("nn", "cvm")
data, weight = wildcard(), wildcard()
out = dp.is_op(op_name)(data, weight)
if with_bias:
bias = wildcard()
bias_out = dp.is_op("add")(out, bias)
bias_out = dp.is_op("nn.bias_add")(out, bias) | bias_out
return [(pat, bias_out), (pat, out)]
return [(pat, out)]
def make_unary_pattern(op_name):
pat = "cvm." + op_name
data = wildcard()
out = dp.is_op(op_name)(data)
return [(pat, out)]
def make_binary_pattern(op_name):
pat = "cvm." + op_name
A, B = wildcard(), wildcard()
out = dp.is_op(op_name)(A, B)
return [(pat, out)]
@register_pattern_table("cvm")
def pattern_table():
cvm_patterns = list()
for op_name in ["nn.conv2d", "nn.dense"]:
cvm_patterns.extend(make_nn_pattern(
op_name, with_bias=True))
for op_name in ["nn.relu", "nn.max_pool2d"]:
cvm_patterns.extend(make_nn_pattern(op_name))
for op_name in ["abs", "clip"]:
cvm_patterns.extend(make_unary_pattern(op_name))
for op_name in ["add", "bias_add", "multiply"]:
cvm_patterns.extend(make_binary_pattern(op_name))
return cvm_patterns
def get_op_name(expr):
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr):
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr):
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/dnnl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""DNNL library supported operators.
There are two ways to registering a function for an op to indicate if it is
supported by DNNL.
- The first and simplest way is to use the helper so that
users only need to provide the operator name and a boolean value to indicate if
it is supported. For example:
.. code-block:: python
add = _register_external_op_helper("add")
add = _register_external_op_helper("add", True)
add = _register_external_op_helper("add", False)
- The other way is to implement the function by themselves to
check the attributes of the op and decide if it should be offloaded to DNNL.
"""
import logging
from functools import reduce
import tvm.ir
from tvm.ir import Op
from tvm import relay
from tvm.relay import transform
from tvm.relay.expr import GlobalVar
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
from tvm.relay.expr import const
from tvm.relay.analysis import analysis as _analysis
from tvm.relay import expr as _expr
from tvm.relay.expr import Call, TupleGetItem
from ... import _ffi_api
from ...dataflow_pattern import wildcard, is_op, is_constant, is_expr, rewrite, DFPatternCallback
from .register import register_pattern_table
logger = logging.getLogger("DNNL")
supported_post_elts = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by DNNL.
Parameters
----------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by DNNL.
"""
@tvm.ir.register_op_attr(op_name, "target.dnnl")
def _func_wrapper(expr):
args = expr.args
if any([x.checked_type.dtype == "int64" for x in args]):
logger.info("DNNL does not support int64.")
return False
# DNNL does not support pooling with ceil_mode = True.
if "pool" in op_name:
attrs = dict(get_attrs(expr))
if "ceil_mode" in attrs.keys() and attrs["ceil_mode"]:
return False
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_norm")
_register_external_op_helper("nn.conv1d")
_register_external_op_helper("nn.conv2d")
_register_external_op_helper("nn.conv3d")
_register_external_op_helper("nn.conv2d_transpose")
_register_external_op_helper("nn.conv3d_transpose")
_register_external_op_helper("nn.dense")
_register_external_op_helper("nn.max_pool2d")
_register_external_op_helper("nn.avg_pool2d")
_register_external_op_helper("nn.global_avg_pool2d")
_register_external_op_helper("nn.max_pool3d")
_register_external_op_helper("nn.avg_pool3d")
_register_external_op_helper("abs")
_register_external_op_helper("clip")
_register_external_op_helper("exp")
_register_external_op_helper("log")
_register_external_op_helper("sqrt")
_register_external_op_helper("round")
_register_external_op_helper("nn.relu")
_register_external_op_helper("nn.leaky_relu")
_register_external_op_helper("tanh")
_register_external_op_helper("sigmoid")
_register_external_op_helper("nn.softmax")
_register_external_op_helper("add")
_register_external_op_helper("multiply")
_register_external_op_helper("nn.layer_norm")
_register_external_op_helper("nn.batch_matmul")
def append_eltwise_ops(op, eltwise):
"""Append element-wise post-ops to conv / conv_transpose / dense
Parameters
----------
op : str
The op name to be attached with element-wise post-op.
eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : CallPattern
Call node sequence.
"""
if eltwise == "gelu":
const1 = wildcard()
const2 = wildcard()
const3 = wildcard()
div = is_op("divide")(op, const1)
erf_val = is_op("erf")(div)
added_erf_val = is_op("add")(erf_val, const2)
mul_val = is_op("multiply")(op, added_erf_val)
op = is_op("multiply")(mul_val, const3)
elif eltwise == "swish":
sig_out = is_op("sigmoid")(op)
op = is_op("multiply")(op, sig_out)
elif eltwise == "mish":
const1 = wildcard()
exp = is_op("exp")(op)
add = is_op("add")(exp, const1)
log = is_op("log")(add)
tanh = is_op("tanh")(log)
op = is_op("multiply")(op, tanh)
elif eltwise:
op = is_op(eltwise)(op)
return op
def make_conv_pattern(conv_name, with_bias=True, with_eltwise=None):
"""Create patterns related to conv and conv_transpose.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `conv / conv_transpose`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
conv_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise)
data = wildcard()
weight = wildcard()
bias = wildcard()
conv = is_op(conv_name)(data, weight)
if with_bias:
conv_out = is_op("add")(conv, bias)
else:
conv_out = conv
return append_eltwise_ops(conv_out, with_eltwise)
def make_conv_bias_sum_relu_pattern(conv_type, has_relu=True):
"""Create patterns with sum op.
Parameters
----------
conv_type : str
Should be nn.conv1d / nn.conv2d / nn.conv3d.
has_relu : bool
Whether attach relu.
Returns
-------
out : CallPattern
Call node sequence.
"""
data1 = wildcard()
weight = wildcard()
bias = wildcard()
data2 = wildcard()
out = is_op(conv_type)(data1, weight)
out = is_op("add")(out, bias)
out = is_op("add")(out, data2)
if has_relu:
out = is_op("nn.relu")(out)
return out
def get_op_name(expr):
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr):
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr):
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
def make_sum_pattren_predicate(checker):
"""Check whether the conv_bias_add_sum pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
for e, op_name in zip([expr, expr.args[0]], ["sum", "bias_add"]):
args = get_args(e)
attrs = get_attrs(e.args[0])
if not checker(attrs, args, op_name):
return False
return True
return predicate
def make_bias_add_pattren_predicate(checker):
"""Check whether the conv_bias pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
if get_op_name(expr) == "add":
args = get_args(expr)
attrs = get_attrs(expr.args[0])
if not checker(attrs, args, "bias_add"):
return False
return True
return predicate
def add_checker(attrs, args, op_name):
"""Check if add is aligned with elementwise_add and bias_add."""
if op_name == "sum":
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "add":
return False
if tuple(get_shape(args[0])) != tuple(get_shape(args[1])):
return False
if op_name == "bias_add":
if attrs is None:
return False
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "nn.conv2d":
return False
channel = dict(attrs)["channels"]
const_shape = get_shape(args[1])
if channel != reduce(lambda x, y: x * y, const_shape):
return False
return True
def make_dense_pattern(with_bias=True, with_eltwise=None):
"""Create patterns related to nn.dense.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
dense_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise)
data = wildcard()
weight = wildcard()
bias = wildcard()
dense = is_op("nn.dense")(data, weight)
if with_bias:
dense_out = is_op("add")(dense, bias)
else:
dense_out = dense
return append_eltwise_ops(dense_out, with_eltwise)
def make_dnnl_pattern(op_name, with_bias, with_eltwise):
"""Create dnnl patterns.
Parameters
----------
op_name : str
The first call node's op name.
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
pat_name = op_name.replace("nn", "dnnl")
if "_transpose" in op_name:
pat_name = "dnnl.deconv" + op_name.split("_")[0][-2::]
pat_name += "_bias" if with_bias else ""
pat_name += ("_" + with_eltwise.split(".")[-1]) if with_eltwise else ""
if "conv" in op_name:
dnnl_pattern = (
pat_name,
make_conv_pattern(op_name, with_bias, with_eltwise),
make_bias_add_pattren_predicate(add_checker),
)
elif op_name == "nn.dense":
dnnl_pattern = (pat_name, make_dense_pattern(with_bias, with_eltwise))
else:
logger.warning(
"Currently, only conv1d, conv2d, conv2d_transpose, conv3d_transpose, "
"dense op are supported, but got %s.",
op_name,
)
dnnl_pattern = ()
return dnnl_pattern
def make_qnn_conv2d_pattern():
"""Make qnn.conv2d based pattern supported by DNNL
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.conv2d")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. Ex: act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "dnnl.qnn.conv2d", pat
def make_qnn_dense_pattern():
"""Make qnn.dense based pattern supported by DNNL
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.dense")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. ex act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "dnnl.qnn.dense", pat
@register_pattern_table("dnnl")
def pattern_table():
"""Create dnnl patterns.
Returns
-------
dnnl_patterns : List[dnnl_pattern]
Created patterns.
"""
dnnl_patterns = list()
dnnl_patterns.append(make_qnn_conv2d_pattern())
dnnl_patterns.append(make_qnn_dense_pattern())
dnnl_patterns.append(
(
"dnnl.conv2d_bias_sum_relu",
make_conv_bias_sum_relu_pattern("nn.conv2d"),
make_sum_pattren_predicate(add_checker),
)
)
dnnl_patterns.append(
(
"dnnl.conv2d_bias_sum",
make_conv_bias_sum_relu_pattern("nn.conv2d", False),
make_sum_pattren_predicate(add_checker),
)
)
elt_list = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
for with_bias in [True, False]:
for elt in elt_list:
if not with_bias and not elt:
continue
for conv_name in [
"nn.conv1d",
"nn.conv2d",
"nn.conv3d",
"nn.conv2d_transpose",
"nn.conv3d_transpose",
]:
dnnl_patterns.append(make_dnnl_pattern(conv_name, with_bias, elt))
dnnl_patterns.append(make_dnnl_pattern("nn.dense", with_bias, elt))
return dnnl_patterns
def get_optimal_layout_for_conv(
data_layout, kernel_layout, weight_shape, out_shape, paddings, strides, dilates, groups, dtype
):
"""Get the optimal layout of dnnl, given shape of conv2d.
Parameters
----------
data_layout, kernel_layout,weight_shape, out_shape, paddings, strides, dilates, groups
: String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
def get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
):
"""Get the optimal layout of dnnl, given shape of tranposed conv2d.
Parameters
----------
data_layout, kernel_layout, weight_shape, out_shape, paddings, output_paddings, strides,
dilates, groups
: Int, String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
def get_shape(tensor):
"""Get tensor's shape."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.concrete_shape
if isinstance(tensor, relay.expr.Constant):
return tensor.data.shape
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.concrete_shape
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].shape
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].shape
return tensor.checked_type.shape
raise TypeError("Unsupport data type: %s" % type(tensor))
def get_dtype(tensor):
"""Get tensor's dtype."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.dtype
if isinstance(tensor, relay.expr.Constant):
return tensor.data.dtype
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.dtype
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].dtype
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].dtype
return tensor.checked_type.dtype
raise TypeError("Unsupport data type: %s" % type(tensor))
def tag2layout(input_data, is_weight=False, conv_type="Conv1D"):
"""Transfer layout, denoted with `a, b, c, d, e`,
into valid layout (NCHW / OIHW) of TVM."""
if "Conv1D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "W"}
weight_dic = {"a": "O", "b": "I", "c": "W", "d": "G"}
elif "Conv2D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "H", "d": "W"}
weight_dic = {"a": "O", "b": "I", "c": "H", "d": "W"}
if "e" in input_data:
weight_dic = {"a": "G", "b": "O", "c": "I", "d": "H", "e": "W"}
elif "Conv3D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "D", "d": "H", "e": "W"}
weight_dic = {"a": "O", "b": "I", "c": "D", "d": "H", "e": "W", "f": "G"}
dic = weight_dic if is_weight else data_dic
res = ""
for i in input_data:
if i.isupper():
i = i.lower()
res += dic[i]
dic[i] = dic[i].lower()
elif i.islower():
res += dic[i]
elif i.isdigit():
res += i
else:
raise ValueError("Unsupport layout format: %s" % input_data)
return res
def legalize_pad_avg_pool(attrs, inputs, types):
"""Legalize pad->avg_pool2d pattern.
Fuse this pattern into one avg_pool2d with padding = (1, 1),
and count_include_pad = True"""
data = inputs[0]
new_attrs = dict(attrs)
if isinstance(data, relay.expr.Call) and data.op.name == "nn.pad":
new_attrs["padding"] = (1, 1)
new_attrs["count_include_pad"] = True
return relay.nn.avg_pool2d(data.args[0], **new_attrs)
return relay.nn.avg_pool2d(data, **attrs)
def legalize_group_conv(attrs, inputs, types):
"""Legalize group conv / conv_transpose calculation.
Alter weight layout from OIHW to GOIHW / IOHW to GIOHW"""
groups = attrs.groups
data, weight = inputs
if groups == 1:
if "Transpose" not in type(attrs).__name__:
return relay.nn.conv2d(data, weight, **attrs)
return relay.nn.conv2d_transpose(data, weight, **attrs)
OC, IC, H, W = get_shape(weight)
new_attrs = dict(attrs)
weight = relay.reshape(weight, (groups, OC // groups, IC, H, W))
if "Transpose" not in type(attrs).__name__:
new_attrs["kernel_layout"] = "GOIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
new_attrs["kernel_layout"] = "GIOHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
def alter_conv(attrs, inputs, tinfos, out_type):
"""The convolution's layout auto-query func for dnnl."""
data, weight = inputs
groups = str(attrs.groups)
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1D":
return relay.nn.conv1d(data, weight, **new_attrs)
if conv_type == "Conv2D":
return relay.nn.conv2d(data, weight, **new_attrs)
return relay.nn.conv3d(data, weight, **new_attrs)
def alter_conv_transpose(attrs, inputs, tinfos, out_type):
"""The transposed convolution's layout auto-query func for dnnl."""
data, weight = inputs
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
output_paddings = ",".join([str(x) for x in attrs.get_int_tuple("output_padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
groups = str(attrs.groups)
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv_transpose(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1DTranspose":
return relay.nn.conv1d_transpose(data, weight, **new_attrs)
if conv_type == "Conv2DTranspose":
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
return relay.nn.conv3d_transpose(data, weight, **new_attrs)
class IsComputeIntensiveGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and
its transpose and dense.
"""
def __init__(self):
ExprVisitor.__init__(self)
self.is_compute_intensive = False
def visit_call(self, call):
compute_intensive_ops = set(
[
"nn.conv1d",
"nn.conv2d",
"nn.conv2d_transpose",
"nn.conv3d",
"nn.conv3d_transpose",
"nn.dense",
"nn.layer_norm",
"nn.batch_matmul",
"nn.global_avg_pool2d",
]
)
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op) in compute_intensive_ops:
self.is_compute_intensive = True
return super().visit_call(call)
def is_graph_compute_intensive(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if it's compute intensive"
"""
self.visit(subgraph)
return self.is_compute_intensive
def is_valid_subgraph(body):
"""Final check on whether the subgraph is valid and should be offloaded to DNNL."""
return IsComputeIntensiveGraph().is_graph_compute_intensive(body)
def prune_dnnl_subgraphs(mod):
"""
Removes invalid subgraphs, which does not contain compute intensive dnnl ops.
"""
class SubgraphRemover(ExprMutator):
"""
Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.
"""
def __init__(self, subgraphs_to_remove, mod, new_mod):
ExprMutator.__init__(self)
self.subgraphs_to_remove = subgraphs_to_remove
self.mod = mod
self.new_mod = new_mod
def visit_call(self, call):
if isinstance(call.op, GlobalVar):
name = call.op.name_hint
if name in self.subgraphs_to_remove:
# "Inline" the subgraph back into new main function.
func = self.mod[name]
var_map = {}
for arg, param in zip(call.args, func.params):
var_map[param] = super().visit(arg)
new_body = relay.bind(func.body, var_map)
return new_body
if name != "main":
args = []
for arg in call.args:
args.append(super().visit(arg))
return call.op(*args)
return super().visit_call(call)
subgraphs_to_remove = []
# If only one subgraph, do nothing.
if len(mod.get_global_vars()) <= 2:
return mod
# Remove invalid subgraphs
for subgraph in mod.get_global_vars():
name = subgraph.name_hint
if not mod[name].attrs or mod[name].attrs["Compiler"] != "dnnl":
continue
if not is_valid_subgraph(mod[name].body):
subgraphs_to_remove.append(name)
# Create new pruned module
new_mod = tvm.IRModule(mod.functions, mod.type_definitions)
new_mod["main"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod["main"])
new_mod = transform.RemoveUnusedFunctions()(new_mod)
return new_mod
class LayerNormRewrite(DFPatternCallback):
"""
A callback to rewrite the following operators into a single layer normalization operator.
Pattern #1:
1 %4 = mean(%3, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
2 %5 = subtract(%3, %4) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %6 = cast(%5, dtype="float32") /* ty=Tensor[(1, 3136, 64), float32] */;
4 %7 = power(%6, 2f /* ty=float32 */) /* ty=Tensor[(1, 3136, 64), float32] */;
5 %8 = mean(%7, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
6 %9 = add(%8, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 3136, 1), float32] */;
7 %10 = sqrt(%9) /* ty=Tensor[(1, 3136, 1), float32] */;
8 %11 = divide(%5, %10) /* ty=Tensor[(1, 3136, 64), float32] */;
9 %12 = multiply(%11, meta[relay.Constant][2] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
10 %13 = add(%12, meta[relay.Constant][3] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %0 = mean(%input, axis=[-1], keepdims=True);
2 %1 = variance(%input, %0, axis=[-1], keepdims=True);
3 %2 = add(%1, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 49, 1), float32] */;
4 %3 = subtract(%input, %0);
5 %4 = sqrt(%2) /* ty=Tensor[(1, 49, 1), float32] */;
6 %5 = divide(%3, %4);
7 %6 = multiply(%5, meta[relay.Constant][0] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */;
8 %7 = add(%6, meta[relay.Constant][1] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */
"""
def __init__(self):
super(LayerNormRewrite, self).__init__()
self.data = wildcard()
self.gamma = wildcard()
self.beta = wildcard()
mu = is_op("mean")(self.data)
diff = is_op("subtract")(self.data, mu)
cdiff = diff | is_op("cast")(diff)
const_two = is_expr(relay.const(2)) | is_expr(relay.const(2.0))
p1 = is_op("power")(cdiff, const_two)
mp1 = is_op("mean")(p1) | is_op("variance")(self.data, mu)
eps = is_expr(relay.const(1e-5)) | is_expr(relay.const(1e-6))
added_eps = is_op("add")(mp1, eps)
deno = is_op("sqrt")(added_eps)
div_out = is_op("divide")(diff, deno)
div_out2 = diff * is_op("rsqrt")(added_eps)
weighted = is_op("multiply")(div_out | div_out2, self.gamma)
added_bias = is_op("add")(weighted, self.beta)
self.pattern = added_bias
def callback(self, pre, post, node_map):
data = node_map[self.data][0]
gamma = node_map[self.gamma][0]
beta = node_map[self.beta][0]
return relay.op.nn.layer_norm(data=data, gamma=gamma, beta=beta)
def rewrite_layer_norm(mod):
"""Rewrite the input graph to replace multiple operators with a TVM native layer normalization
operator so that we can offload them to dnnl layer normalization byoc part.
"""
mod["main"] = rewrite(LayerNormRewrite(), mod["main"])
return mod
class DenseReshapeBiasGeluRewrite(DFPatternCallback):
"""
A callback to reorder reshape operators when the patterns are as below:
Pattern #1:
1 %62 = nn.dense(%61, meta[relay.Constant][13] /* ty=Tensor[(64, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 64), float32] */;
2 %63 = reshape(%62, newshape=[1, 3136, 64]) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %64 = add(meta[relay.Constant][4] /* ty=Tensor[(64), float32] */, %63)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %76 = nn.dense(%75, meta[relay.Constant][18] /* ty=Tensor[(512, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 512), float32] */;
2 %77 = reshape(%76, newshape=[1, 3136, 512]) /* ty=Tensor[(1, 3136, 512), float32] */;
3 %78 = add(meta[relay.Constant][15] /* ty=Tensor[(512), float32] */, %77)
/* ty=Tensor[(1, 3136, 512), float32] */;
4 %79 = divide(%78, 1.41421f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
5 %80 = erf(%79) /* ty=Tensor[(1, 3136, 512), float32] */;
6 %81 = add(%80, 1f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
7 %82 = multiply(%78, %81) /* ty=Tensor[(1, 3136, 512), float32] */;
8 %83 = multiply(%82, 0.5f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
"""
def __init__(self, has_gelu=True):
super(DenseReshapeBiasGeluRewrite, self).__init__()
self.data = wildcard()
self.weight = wildcard()
self.bias = wildcard()
self.const1 = wildcard()
self.const2 = wildcard()
self.const3 = wildcard()
self.attr_map = {}
self.has_gelu = has_gelu
den = is_op("nn.dense")(self.data, self.weight)
re_den = is_op("reshape")(den)
added = is_op("add")(self.bias, re_den)
if self.has_gelu:
divisor = is_op("divide")(added, self.const1)
val_erf = is_op("erf")(divisor)
added_erf = is_op("add")(val_erf, self.const2)
mul1 = is_op("multiply")(added, added_erf)
mul2 = is_op("multiply")(mul1, self.const3)
self.pattern = mul2
else:
self.pattern = added
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("reshape"):
new_attrs = {}
for k in expr.attrs.keys():
new_attrs[k] = expr.attrs[k]
self.attr_map["reshape"] = new_attrs
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
weight = node_map[self.weight][0]
bias = node_map[self.bias][0]
den = relay.op.nn.dense(data, weight)
added = relay.op.add(bias, den)
if not self.has_gelu:
return relay.op.reshape(added, self.attr_map["reshape"]["newshape"])
const1 = node_map[self.const1][0]
const2 = node_map[self.const2][0]
const3 = node_map[self.const3][0]
divisor = relay.op.divide(added, const1)
val_erf = relay.op.erf(divisor)
added_erf = relay.op.add(val_erf, const2)
mul1 = relay.op.multiply(added, added_erf)
mul2 = relay.op.multiply(mul1, const3)
return relay.op.reshape(mul2, self.attr_map["reshape"]["newshape"])
def rewrite_dense_bias_gelu_reshape_last(mod):
"""Rewrite the input graph to reorder reshape operators so that
we can perform dense_bias_gelu/dense_bias fusion and then offload
them to byoc part.
"""
mod["main"] = rewrite(
[DenseReshapeBiasGeluRewrite(), DenseReshapeBiasGeluRewrite(has_gelu=False)], mod["main"]
)
return mod
class ResNetV1Rewrite(DFPatternCallback):
"""
A callback to advance downsize operation when the patterns are as pattern1,
and the result is written in pattern2:
Pattern #1:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3));
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = add(%33, %25);
%35 = nn.relu(%34);
%36 = nn.conv2d(%35, ty=Tensor[(128, 256, 1, 1), strides=[2, 2]);
%37 = add(%36, ty=Tensor[(128, 1, 1));
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, ty=Tensor[(128, 128, 3, 3));
%40 = add(%39, ty=Tensor[(128, 1, 1)]);
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, ty=Tensor[(512, 128, 1, 1));
%43 = nn.conv2d(%35, ty=Tensor[(512, 256, 1, 1), strides=[2, 2]);
%44 = add(%42, ty=Tensor[(512, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, %45);
%47 = nn.relu(%46);
Pattern #2:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3), strides=[2, 2]);
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = nn.max_pool2d(%25, pool_size=[1, 1], strides=[2, 2], padding=[0, 0, 0, 0]);
%35 = add(%33, %34);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, ty=Tensor[(128, 256, 1, 1));
%38 = add(%37, ty=Tensor[(128, 1, 1));
%39 = nn.relu(%38);
%40 = nn.conv2d(%39, ty=Tensor[(128, 128, 3, 3));
%41 = add(%40, ty=Tensor[(128, 1, 1));
%42 = nn.relu(%41);
%43 = nn.conv2d(%42, ty=Tensor[(512, 128, 1, 1));
%44 = nn.conv2d(%36, ty=Tensor[(512, 256, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, ty=Tensor[(512, 1, 1));
%47 = add(%45, %46);
%48 = nn.relu(%47);
"""
def __init__(self):
super(ResNetV1Rewrite, self).__init__()
self.attr_lst = []
self.data = wildcard()
self.w1, self.b1 = wildcard(), wildcard()
self.w2, self.b2 = wildcard(), wildcard()
self.w3, self.b3 = wildcard(), wildcard()
self.w4, self.b4 = wildcard(), wildcard()
self.w5, self.b5 = wildcard(), wildcard()
self.w6, self.b6 = wildcard(), wildcard()
self.w7, self.b7 = wildcard(), wildcard()
conv1 = is_op("nn.conv2d")(self.data, self.w1).has_attr({"kernel_size": [1, 1]})
conv1 = is_op("add")(conv1, self.b1)
conv1 = is_op("nn.relu")(conv1)
conv2 = is_op("nn.conv2d")(conv1, self.w2).has_attr({"kernel_size": [3, 3]})
conv2 = is_op("add")(conv2, self.b2)
conv2 = is_op("nn.relu")(conv2)
conv3 = is_op("nn.conv2d")(conv2, self.w3).has_attr({"kernel_size": [1, 1]})
conv3 = is_op("add")(conv3, self.b3)
conv3 = is_op("add")(conv3, self.data)
conv3 = is_op("nn.relu")(conv3)
left_conv4 = is_op("nn.conv2d")(conv3, self.w4).has_attr({"strides": [2, 2]})
left_conv4 = is_op("add")(left_conv4, self.b4)
left_conv4 = is_op("nn.relu")(left_conv4)
left_conv5 = is_op("nn.conv2d")(left_conv4, self.w5).has_attr({"kernel_size": [3, 3]})
left_conv5 = is_op("add")(left_conv5, self.b5)
left_conv5 = is_op("nn.relu")(left_conv5)
left_conv6 = is_op("nn.conv2d")(left_conv5, self.w6).has_attr({"kernel_size": [1, 1]})
left_conv6 = is_op("add")(left_conv6, self.b6)
right_conv7 = is_op("nn.conv2d")(conv3, self.w7).has_attr({"strides": [2, 2]})
right_conv7 = is_op("add")(right_conv7, self.b7)
out = is_op("add")(left_conv6, right_conv7)
out = is_op("nn.relu")(out)
self.pattern = out
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("nn.conv2d"):
self.attr_lst.append(expr.attrs)
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
w1, b1 = node_map[self.w1][0], node_map[self.b1][0]
w2, b2 = node_map[self.w2][0], node_map[self.b2][0]
w3, b3 = node_map[self.w3][0], node_map[self.b3][0]
w4, b4 = node_map[self.w4][0], node_map[self.b4][0]
w5, b5 = node_map[self.w5][0], node_map[self.b5][0]
w6, b6 = node_map[self.w6][0], node_map[self.b6][0]
w7, b7 = node_map[self.w7][0], node_map[self.b7][0]
new_attrs = self.attr_lst[-7]
conv1 = relay.op.nn.conv2d(data, w1, **new_attrs)
conv1 = relay.op.add(conv1, b1)
conv1 = relay.op.nn.relu(conv1)
new_attrs = dict(self.attr_lst[-6])
new_attrs["strides"] = [2, 2]
conv2 = relay.op.nn.conv2d(conv1, w2, **new_attrs)
conv2 = relay.op.add(conv2, b2)
conv2 = relay.op.nn.relu(conv2)
new_attrs = self.attr_lst[-5]
conv3 = relay.op.nn.conv2d(conv2, w3, **new_attrs)
conv3 = relay.op.add(conv3, b3)
max_pool = relay.op.nn.max_pool2d(
data, pool_size=(1, 1), strides=(2, 2), layout=new_attrs["data_layout"]
)
conv3 = relay.op.add(conv3, max_pool)
conv3 = relay.op.nn.relu(conv3)
new_attrs = dict(self.attr_lst[-4])
new_attrs["strides"] = [1, 1]
left_conv4 = relay.op.nn.conv2d(conv3, w4, **new_attrs)
left_conv4 = relay.op.add(left_conv4, b4)
left_conv4 = relay.op.nn.relu(left_conv4)
new_attrs = self.attr_lst[-3]
left_conv5 = relay.op.nn.conv2d(left_conv4, w5, **new_attrs)
left_conv5 = relay.op.add(left_conv5, b5)
left_conv5 = relay.op.nn.relu(left_conv5)
new_attrs = self.attr_lst[-2]
left_conv6 = relay.op.nn.conv2d(left_conv5, w6, **new_attrs)
left_conv6 = relay.op.add(left_conv6, b6)
new_attrs = dict(self.attr_lst[-1])
new_attrs["strides"] = [1, 1]
right_conv7 = relay.op.nn.conv2d(conv3, w7, **new_attrs)
right_conv7 = relay.op.add(right_conv7, b7)
out = relay.op.add(left_conv6, right_conv7)
out = relay.op.nn.relu(out)
self.attr_lst = []
return out
def rewrite_resnetv1(mod):
"""Rewrite the the ResNetV1 downsize block to reduce the computation complexity."""
mod["main"] = rewrite(ResNetV1Rewrite(), mod["main"])
return mod
class LegalizeQnnOpForDnnl(DFPatternCallback):
"""Legalize QNN based patterns to match DNNL
original pattern:
OP = qnn.dense | qnn.conv2d
%1 = OP<int>(SRC, WGH) - OP<int>(src_zp, WGH) // qnn.conv2d
%2 = %1 + orig_bias // bias
%2 = (%1 - rq_in_zp) * rq_in_scl / rq_out_scl + rq_out_zp // qnn.requantize
%3 = act(%2) // activation == clip
%4 = ((%3 - sum_lh_zp) * sum_lh_scl + (SRC2 - sum_rh_zp) * sum_rh_scl) // qnn.add
/ sum_out_scl + sum_out_zp
transform to DNNL compatible:
%1 = OP<int>(SRC, WGH)
%2 = cast(%1, dtype="float")
%2 = (%1 + bias) * o_scl
%3 = act(%2) * act_scl
%4 = %3 + SRC2 * sum_scl
%5 = %4 + dst_zp
%6 = cast(%5, dtype="float")
where:
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
bias = orig_bias - OP(src_zp, WGH) - rq_in_zp + rq_out_zp * rq_out_scl / rq_in_scl
dst_zp = sum_out_zp - sum_lhs_zp * sum_lhs_scl / sum_out_scl -
sum_rhs_zp * sum_rhs_scl / sum_out_scl
"""
def __init__(self):
super(LegalizeQnnOpForDnnl, self).__init__()
self.src = wildcard()
self.wgh = wildcard()
self.bias = wildcard()
self.sum_src = wildcard()
self.src_scl = is_constant()
self.src_zp = is_constant()
self.wgh_scl = is_constant()
self.wgh_zp = is_expr(const(0))
self.rq_in_scl = is_constant()
self.rq_in_zp = is_constant()
self.rq_out_scl = is_constant()
self.rq_out_zp = is_constant()
self.sum_lhs_scl = is_constant()
self.sum_lhs_zp = is_constant()
self.sum_rhs_scl = is_constant()
self.sum_rhs_zp = is_constant()
self.sum_out_scl = is_constant()
self.sum_out_zp = is_constant()
self.root = (is_op("qnn.conv2d") | is_op("qnn.dense"))(
self.src, self.wgh, self.src_zp, self.wgh_zp, self.src_scl, self.wgh_scl
)
pat = is_op("add")(self.root, self.bias) | self.root # optional bias
pat = is_op("qnn.requantize")(
pat, self.rq_in_scl, self.rq_in_zp, self.rq_out_scl, self.rq_out_zp
)
pat = is_op("clip")(pat)
cast = is_op("cast")(pat)
pat = is_op("qnn.add")(
cast,
self.sum_src,
self.sum_lhs_scl,
self.sum_lhs_zp,
self.sum_rhs_scl,
self.sum_rhs_zp,
self.sum_out_scl,
self.sum_out_zp,
)
pat = is_op("clip")(pat)
self.pattern = pat | cast
def callback(self, pre, post, node_map):
root = node_map[self.root][0]
src = node_map[self.src][0]
wgh = node_map[self.wgh][0]
bias = node_map.get(self.bias, default=[relay.const(0, dtype="int32")])[0]
src_zp = node_map[self.src_zp][0]
rq_in_scl = node_map[self.rq_in_scl][0]
rq_in_zp = node_map[self.rq_in_zp][0]
rq_out_scl = node_map[self.rq_out_scl][0]
rq_out_zp = node_map[self.rq_out_zp][0]
final_dtype = node_map[self.pattern][0].checked_type.dtype
if root.op == relay.op.get("qnn.conv2d"):
dst_layout = root.attrs.out_layout
dst_layout = root.attrs.data_layout if dst_layout == "" else dst_layout
wgh_layout = root.attrs.kernel_layout
else:
# qnn.dense has no layout attributes. Assume that is plain
dst_layout = "NC"
wgh_layout = "OI"
# TODO(@apeskov): dst_layout may ne blocked
bias_rank = len(dst_layout) - dst_layout.index("C")
sum_src = node_map[self.sum_src][0] if self.sum_src in node_map else None
# Default values if qnn.sum is not present
sum_lhs_scl = node_map[self.sum_lhs_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_lhs_zp = node_map[self.sum_lhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_rhs_scl = node_map[self.sum_rhs_scl][0] if sum_src else relay.const(0, dtype="float32")
sum_rhs_zp = node_map[self.sum_rhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_out_scl = node_map[self.sum_out_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_out_zp = node_map[self.sum_out_zp][0] if sum_src else relay.const(0, dtype="int32")
def cast_fp(op):
return relay.op.cast(op, dtype="float32")
# recalculate some factors
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
dst_zp = (
cast_fp(sum_out_zp)
- cast_fp(sum_lhs_zp) * sum_lhs_scl / sum_out_scl
- cast_fp(sum_rhs_zp) * sum_rhs_scl / sum_out_scl
)
bias = self.squeeze_bias(bias, dst_layout)
bias = (
cast_fp(bias)
- cast_fp(self.fake_op(src_zp, wgh, wgh_layout))
- cast_fp(rq_in_zp)
+ cast_fp(rq_out_zp) * rq_out_scl / rq_in_scl
)
bias = self.broadcast_to_rank(bias, bias_rank)
zero_zp = relay.const(0, dtype="int32")
one_scl = relay.const(1.0, dtype="float32")
# construct new graph with proper post op ordering
gr = tvm.relay.Call(
root.op,
[src, wgh, zero_zp, zero_zp, one_scl, one_scl],
root.attrs,
root.type_args,
root.span,
)
gr = relay.op.cast(gr, dtype="float32")
gr = gr + bias
gr = gr * o_scl
gr = relay.op.clip(gr, 0, 255) * act_scl
gr = gr + sum_scl * cast_fp(sum_src) if sum_src else gr
gr = gr + dst_zp
gr = relay.op.cast(gr, dtype=final_dtype)
return gr
@staticmethod
def fake_op(zp, wgh, layout):
"""Fake operator implementation for zp broadcast input"""
# Conv: reduce kernel {OC, IC, KH, KW} -> {OC} in case of group that is still correct
# Dense: reduce kernel {OC, IC} -> {OC}
wgh_int = relay.op.cast(wgh, dtype="int32")
reduced_kernel = relay.op.sum(
wgh_int, axis=[layout.index("O")], keepdims=False, exclude=True
)
return zp * reduced_kernel
@staticmethod
def squeeze_bias(bias, layout):
shape = transform.InferTypeLocal(bias).concrete_shape
c_position = layout.index("C") - len(layout) + len(shape)
squeeze_idxs = [i for i in range(len(shape)) if i != c_position]
return relay.op.squeeze(bias, squeeze_idxs)
@staticmethod
def broadcast_to_rank(op, rank):
"""Scalar or 1D tensor are supported"""
shape = transform.InferTypeLocal(op).concrete_shape
if len(shape) == 0:
return op
if len(shape) == 1:
return relay.op.expand_dims(op, 1, rank - 1)
raise ValueError("Unexpected bias rank to broadcast. Only 0 and 1 are supported.")
def legalize_qnn_for_dnnl(mod):
"""Transform qnn primitives to DNNL compatible form. Eliminate source zero point and apply
strict sequence of post ops."""
mod["main"] = rewrite(LegalizeQnnOpForDnnl(), mod["main"])
seq = tvm.transform.Sequential(
[
transform.InferType(),
# transform.SimplifyInference(), # TODO: this pass decompose nn.layer_norm
# transform.FoldScaleAxis(), # TODO: fail inside TVM in case of grouped convolutions.
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/ethosn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Arm(R) Ethos(TM)-N NPU supported operators."""
from enum import Enum
from distutils.version import LooseVersion
import tvm.ir
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from ...dataflow_pattern import is_constant, is_op, wildcard
from . import _ethosn
from .register import register_pattern_table
class Available(Enum):
UNAVAILABLE = 0
SW_ONLY = 1
SW_AND_HW = 2
def __bool__(self):
return self != Available.UNAVAILABLE
def ethosn_available():
"""Return whether Ethos-N software and hardware support is available"""
if not tvm.get_global_func("relay.ethos-n.query", True):
print("skip because Ethos-N module is not available")
return Available.UNAVAILABLE
hw = tvm.get_global_func("relay.ethos-n.query")()
return Available.SW_AND_HW if hw else Available.SW_ONLY
def ethosn_api_version() -> str:
"""
Returns the semantic version of the driver stack api that is
being used.
Returns
-------
str
Semantic version string (e.g. 3.0.1).
"""
return tvm.get_global_func("relay.ethos-n.api.version")()
def ConvertEquivalents() -> tvm.ir.IRModule: # pylint: disable=invalid-name
"""Converts operations into a numerically equivalent form
that can be understood by the NPU codegen.
Returns
-------
Pass
The module pass.
"""
return _ethosn.ConvertEquivalents()
def InlineNonComputeIntensivePartitions() -> tvm.ir.IRModule: # pylint: disable=invalid-name
"""This pass checks whether functions partitioned for the NPU are considered
non-compute intensive. If they are not, they will be unpartitioned and passed onto
other backends to consider.
A partitioned function is currently considered non-compute intensive if it contains
no multiply accumulate operations.
Returns
-------
Pass
The module pass.
"""
return _ethosn.InlineNonComputeIntensivePartitions()
def is_inline_non_compute_intensive_partitions_enabled() -> bool:
"""
Determine whether to inline none-compute-intensive partitions.
Returns
-------
True if inlining should happen, False if not.
"""
compiler_attrs = tvm.get_global_func("relay.ext.ethos-n.get_compiler_attrs")()
if not compiler_attrs:
return False
return compiler_attrs.inline_non_compute_intensive_partitions
def partition_for_ethosn(mod, params=None, **opts):
"""Partition the graph greedily offloading supported
operators to Arm Ethos-N NPU.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
ret : annotated and partitioned module.
"""
api_version = ethosn_api_version()
supported_api_versions = ["3.1.0"]
if all(api_version != LooseVersion(exp_ver) for exp_ver in supported_api_versions):
raise ValueError(
f"Driver stack version {api_version} is unsupported. "
f"Please use version in {supported_api_versions}."
)
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
passes = [
transform.InferType(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("ethos-n"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
ConvertEquivalents(),
]
if is_inline_non_compute_intensive_partitions_enabled():
passes.append(InlineNonComputeIntensivePartitions())
return tvm.transform.Sequential(passes)(mod)
@register_pattern_table("ethos-n")
def pattern_table():
"""Get the Ethos-N compiler pattern table."""
def qnn_conv_pattern():
pattern = is_op("nn.pad")(wildcard(), wildcard()) | wildcard()
pattern = is_op("qnn.conv2d")(
pattern, is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_op("nn.bias_add")(pattern, is_constant())
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_fc_pattern():
pattern = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
pattern = is_op("nn.bias_add")(pattern, is_constant())
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_avg_pool2d_pattern():
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
return pattern
def qnn_sigmoid_pattern():
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("sigmoid")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def qnn_mean_pattern():
pattern = is_op("cast")(wildcard())
pattern = is_op("mean")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_tanh_pattern():
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("tanh")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def qnn_leaky_relu_pattern():
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("nn.leaky_relu")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
def qnn_requantize_pattern():
pattern = is_op("qnn.requantize")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_resize_pattern():
pattern = is_op("image.resize2d")(wildcard()).has_attr({"method": "nearest_neighbor"})
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def qnn_mul_pattern():
"""
Multiply is supported when one input is a constant of shape [1, ..., C],
where C matches the number of channels of the other input.
"""
mul_op = is_op("qnn.mul")
gen_mul_inputs = lambda x, y: mul_op(
x,
y,
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
input_is_left = gen_mul_inputs(wildcard(), is_constant())
input_is_right = gen_mul_inputs(is_constant(), wildcard())
return input_is_left | input_is_right
def qnn_add_pattern(has_constant_input=False):
add_op = is_op("qnn.add")
gen_add_inputs = lambda x, y: add_op(
x,
y,
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
if has_constant_input:
input_is_left = gen_add_inputs(wildcard(), is_constant())
input_is_right = gen_add_inputs(is_constant(), wildcard())
return input_is_left | input_is_right
else:
return gen_add_inputs(wildcard(), wildcard())
def qnn_conv2d_transpose_pattern():
pattern = is_op("qnn.conv2d_transpose")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
).has_attr({"data_layout": "NHWC"})
pattern = pattern.optional(lambda x: is_op("nn.bias_add")(x, is_constant()))
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
def check_conv2d(extract):
"""Check if a conv2d is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.conv2d(extract)
def check_fc(extract):
"""Check if a fully connected is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.fc(extract)
def check_avg_pool2d(extract):
"""Check if a avg pool2d is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.avg_pool2d(extract)
def check_mean(extract):
"""Check if mean is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.mean(extract)
def check_conv2d_transpose(extract):
"""Check if conv2d_transpose is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.conv2d_transpose(extract)
def check_sigmoid(extract):
"""Check if a sigmoid is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.sigmoid(extract)
def check_tanh(extract):
"""Check if tanh is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.tanh(extract)
def check_leaky_relu(extract):
"""Check if Leaky ReLU is supported."""
if not ethosn_available():
return False
return _ethosn.leaky_relu(extract)
def check_mul_to_reinterpret_quantize(extract):
"""Check if Mul is supported by converting to reinterpret quantize"""
if not ethosn_available():
return False
converted_extract = _ethosn.ConvertQnnMultiplyToReinterpretQuantize(extract)
if converted_extract:
return _ethosn.reinterpret_quantize(converted_extract)
return False
def check_mul_to_depthwise(extract):
"""Check if Mul is supported by converting to a depthwise operation."""
if not ethosn_available():
return False
converted_extract = _ethosn.ConvertQnnMultiplyToDepthwise(extract)
if converted_extract:
return _ethosn.conv2d(converted_extract)
return False
def check_requantize(extract):
"""Check if requantize is supported."""
if not ethosn_available():
return False
return _ethosn.requantize(extract)
def check_resize(extract):
"""Check if resize (nearest neighbor) is supported."""
if not ethosn_available():
return False
return _ethosn.resize(extract)
def check_add(extract):
"""Check if an addition is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.addition(extract)
def check_add_to_reinterpret_quantize(extract):
"""Check if addition can be converted to a reinterpret quantize operation."""
if not ethosn_available():
return False
converted_extract = _ethosn.ConvertQnnAddToReinterpretQuantize(extract)
if converted_extract:
return _ethosn.reinterpret_quantize(converted_extract)
return False
def check_add_to_depthwise(extract):
"""Check if addition can be converted to a depthwise operation."""
if not ethosn_available():
return False
converted_extract = _ethosn.ConvertQnnAddToDepthwise(extract)
if converted_extract:
return _ethosn.conv2d(converted_extract)
return False
return [
(
"ethos-n.qnn_mul_to_reinterpret_quantize",
qnn_mul_pattern(),
check_mul_to_reinterpret_quantize,
),
("ethos-n.qnn_mul_to_depthwise", qnn_mul_pattern(), check_mul_to_depthwise),
(
"ethos-n.qnn_add_to_reinterpret_quantize",
qnn_add_pattern(True),
check_add_to_reinterpret_quantize,
),
("ethos-n.qnn_add_to_depthwise", qnn_add_pattern(True), check_add_to_depthwise),
("ethos-n.qnn_add", qnn_add_pattern(), check_add),
("ethos-n.qnn_conv2d", qnn_conv_pattern(), check_conv2d),
("ethos-n.qnn_conv2d_transpose", qnn_conv2d_transpose_pattern(), check_conv2d_transpose),
("ethos-n.qnn_avg_pool2d", qnn_avg_pool2d_pattern(), check_avg_pool2d),
("ethos-n.qnn_sigmoid", qnn_sigmoid_pattern(), check_sigmoid),
("ethos-n.qnn_fc", qnn_fc_pattern(), check_fc),
("ethos-n.qnn_mean", qnn_mean_pattern(), check_mean),
("ethos-n.qnn_tanh", qnn_tanh_pattern(), check_tanh),
("ethos-n.qnn_leaky_relu", qnn_leaky_relu_pattern(), check_leaky_relu),
("ethos-n.qnn_resize", qnn_resize_pattern(), check_resize),
("ethos-n.qnn_requantize", qnn_requantize_pattern(), check_requantize),
]
@tvm.ir.register_op_attr("nn.max_pool2d", "target.ethos-n")
def max_pool2d(expr):
"""Check if a max pool2d is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.max_pool2d(expr)
@tvm.ir.register_op_attr("reshape", "target.ethos-n")
def reshape(expr):
"""Check if a reshape is supported by Ethos-N."""
if not ethosn_available():
return False
return _ethosn.reshape(expr)
@tvm.ir.register_op_attr("qnn.concatenate", "target.ethos-n")
def qnn_concatenate(expr):
"""Check if a concatenate is supported by Ethos-N."""
if not ethosn_available():
return False
if not _ethosn.concatenate(expr):
return False
# Support library has some unenforced restrictions on qnn params
args = expr.args
min_range = 1e9
max_range = -1e9
qnn_params = []
for i in range(len(args[1].fields)):
scale = args[1].fields[i].data.numpy()
zero_point = args[2].fields[i].data.numpy()
min_range = min(-1 * zero_point * scale, min_range)
max_range = max((255 - zero_point) * scale, max_range)
qnn_params.append((scale, zero_point))
scale = (max_range - min_range) / 255
zero_point = int(-min_range / scale)
if (scale, zero_point) in qnn_params:
return True
return False
@tvm.ir.register_op_attr("split", "target.ethos-n")
def split(expr):
"""Check if a split is supported by Ethos-N."""
if not ethosn_available():
return False
if ethosn_api_version() == LooseVersion("3.0.1"):
return False
if not _ethosn.split(expr):
return False
return True
@tvm.ir.register_op_attr("nn.depth_to_space", "target.ethos-n")
def depth_to_space(expr):
"""Check if a depth_to_space is supported by Ethos-N."""
if not ethosn_available():
return False
if not _ethosn.depth_to_space(expr):
return False
return True
@tvm.ir.register_op_attr("clip", "target.ethos-n")
def clip(expr):
"""Check if a clip is supported by Ethos-N."""
if not ethosn_available():
return False
if not _ethosn.relu(expr):
return False
return True
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/ethosu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=ungrouped-imports, import-outside-toplevel
"""Arm(R) Ethos(TM)-U NPU supported operators."""
import functools
from typing import Dict, List, Tuple, Callable, Optional
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm.relay.expr import Constant, Call # type: ignore
from tvm.relay.op.contrib.register import register_pattern_table # type: ignore
from tvm.relay.dataflow_pattern import wildcard, is_op, is_constant, is_tuple # type: ignore
from tvm.relay.build_module import bind_params_by_name # type: ignore
try:
# As ethos-u-vela package is an optional TVM dependency, we want to lazy load it
# and check whether it is installed or not.
#
# In order to show the appropriate error messages when we try to invoke code that
# rely on imports from ethos-u-vela, we protect them with the decorator @requires_vela
# implemented below.
from ethosu.vela import api as vapi # type: ignore
except ImportError:
vapi = None
def requires_vela(func):
"""Decorator to check whether we have the required dependency ethos-u-vela
installed as a python package"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not vapi:
raise ImportError(
"The 'ethos-u-vela' python package is required for the Arm(R) Ethos(TM)-U NPU "
"backend. Please install the dependency using your Python package manager."
) from None
return func(*args, **kwargs)
return wrapper
class TensorParams:
"""
This class will parse a tvm Expr along with quantization scale
and zero point to populate parameters that are required
for the creation of tensors in Vela.
"""
@requires_vela
def __init__(self, tensor, layout=None, scale=None, zero_point=None):
self.tensor = tensor
if isinstance(tensor, Constant):
self.values = tensor.data.asnumpy()
else:
self.values = None
self.dtype = tensor.checked_type.dtype
self.shape = [int(i) for i in tensor.checked_type.shape]
self.layout = layout
if scale is not None and zero_point is not None:
self.q_params = vapi.NpuQuantization(
scale.data.asnumpy().astype("float32"), zero_point.data.asnumpy().astype(self.dtype)
)
else:
# put default values
self.q_params = vapi.NpuQuantization(1.0, 0)
def check_strides(strides: List[int], stride_range=None) -> bool:
"""This function checks whether strides are within the limits supported by the NPU"""
if stride_range is None:
stride_range = (1, 3)
smin, smax = stride_range
if not smax >= strides[0] >= smin:
return False
if not smax >= strides[1] >= smin:
return False
return True
def check_valid_dtypes(tensor_params: List[TensorParams], supported_dtypes: List[type]) -> bool:
"""This function checks whether dtypes are supported by the NPU"""
for tep in tensor_params:
# Check for dtypes
if np.dtype(tep.dtype) not in supported_dtypes:
return False
# Check for shape sizes
if any(dimlen > 65536 for dimlen in tep.shape):
return False
return True
def check_weights(weights: TensorParams, dilation: List[int]):
"""This function checks whether weight tensor is compatible with the NPU"""
from tvm.relay.backend.contrib.ethosu.util import get_dim_value
dilated_height_range = (1, 64)
dilated_hxw_range = (1, 64 * 64)
weights_limit = 127 * 65536
dilated_width = (weights.shape[get_dim_value(weights.layout, "W")] - 1) * dilation[0] + 1
dilated_height = (weights.shape[get_dim_value(weights.layout, "H")] - 1) * dilation[1] + 1
dh_min, dh_max = dilated_height_range
if not dh_min <= dilated_height <= dh_max:
return False
dilated_hxw = dilated_height * dilated_width
dhxw_min, dhxw_max = dilated_hxw_range
if not dhxw_min <= dilated_hxw <= dhxw_max:
return False
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = (
get_dim_value(weights.layout, "H"),
get_dim_value(weights.layout, "W"),
get_dim_value(weights.layout, "I"),
)
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
return sum_weights <= weights_limit
def check_bias(bias: TensorParams):
"""This function checks whether the bias values fit in 40 bits"""
if bias and bias.dtype == np.dtype("int64"):
valid = all(len(bin(bias_value)[2:]) <= 40 for bias_value in bias.values)
return valid
return True
def check_batch_size(ifm: TensorParams):
"""This function checks for the number of batches vela currently supports"""
return ifm.shape[0] == 1
def check_dilation(dilation: List[int], dilation_range=None):
"""This function checks whether dilation is within the limits supported by the NPU"""
if dilation_range is None:
dilation_range = (1, 2)
dmin, dmax = dilation_range
if not dmin <= dilation[0] <= dmax:
return False
if not dmin <= dilation[1] <= dmax:
return False
return True
def check_padding(padding: List[int], bounds: List[int]):
"""This function checks whether padding is within the limits supported by the NPU"""
if len(padding) != 4 or len(bounds) != 4:
return False
top, left, bottom, right = padding
topb, leftb, bottomb, rightb = bounds
return not (top > topb or left > leftb or bottom > bottomb or right > rightb)
def check_pool_shape(pool_shape: tvm.ir.container.Array) -> bool:
if len(pool_shape) != 2:
return False
if pool_shape[1] > 256:
return False
if pool_shape[0] * pool_shape[1] > 256 * 256:
return False
return True
def check_dimensions(tensor: TensorParams):
"""This function checks that the tensor has no more than 4 dimensions"""
return len(tensor.shape) <= 4
class QnnConv2DParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d composite function
and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
activation_map = {"clip": "CLIP"}
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import QConv2DArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
activation = None
separate_padding = None
if str(func_body.op) in self.activation_map.keys():
activation = func_body
requantize_op = activation.args[0]
else:
requantize_op = func_body
bias_add = requantize_op.args[0]
qnn_conv2d = bias_add.args[0]
if isinstance(qnn_conv2d.args[0], relay.Call) and str(qnn_conv2d.args[0].op) == "nn.pad":
separate_padding = qnn_conv2d.args[0]
data_layout = qnn_conv2d.attrs.data_layout
self.kernel_layout = qnn_conv2d.attrs.kernel_layout
# We consider the weights & biases as params as it should be a Constant
self.weights = TensorParams(
qnn_conv2d.args[QConv2DArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d.args[QConv2DArgs.WEIGHTS_SCALE.value],
qnn_conv2d.args[QConv2DArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
ifm_tensor = (
separate_padding.args[0] if separate_padding else qnn_conv2d.args[QConv2DArgs.IFM.value]
)
self.ifm = TensorParams(
ifm_tensor,
data_layout,
qnn_conv2d.args[QConv2DArgs.IFM_SCALE.value],
qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d.attrs
pad_value = int(qnn_conv2d.args[QConv2DArgs.IFM_ZERO_POINT.value].data.asnumpy())
self.padding = self.extract_padding(attrs.padding, separate_padding, pad_value)
self.strides = attrs.strides
self.dilation = attrs.dilation
self.activation = activation
self.channels = attrs.channels
# If groups are equal to channel, its a depthwise_conv2d
self.groups = attrs.groups
self.is_depthwise = False
channels_axis = {"HWIO": 3, "HWOI": 2}
if self.groups == self.weights.shape[channels_axis[self.kernel_layout]]:
self.is_depthwise = True
@staticmethod
def extract_padding(
operator_padding: Tuple[int, int, int, int],
separate_padding: relay.Call,
pad_value: int,
) -> Optional[Tuple[int, int, int, int]]:
"""
Convolution operations can sometimes have padding represented as a separate
padding operation before the convolution operation itself. Here we can check
whether these representations can be combined into a single padding attribute
as part of the NPU convolution itself. If the padding specified by the separate
nn.pad operation is not supported, None will be returned. This will cause the
nn.pad to be offloaded separately.
"""
if separate_padding is None:
return operator_padding
if pad_value != int(separate_padding.args[1].data.asnumpy()):
return None
pad_width = separate_padding.attrs["pad_width"]
if len(pad_width) != 4:
return None
if list(pad_width[0]) != [0, 0] or list(pad_width[3]) != [0, 0]:
return None
top, left, bottom, right = operator_padding
return [
top + pad_width[1][0],
left + pad_width[2][0],
bottom + pad_width[1][1],
right + pad_width[2][1],
]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
legal_groups = [1, self.ofm.shape[3]]
if self.groups not in legal_groups:
return False
# This should be a valid QnnDepthwiseConv2DParams, not QnnConv2DParams
return not self.is_depthwise
class QnnConv2DTransposeParams:
"""
This class will parse a Call to a ethosu.qnn_conv2d_transpose composite
function and extract quantization information of all the associated tensors.
"""
composite_name = "ethos-u.qnn_conv2d_transpose"
# The NPU only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
@requires_vela
def __init__(self, func_body: tvm.relay.Function):
from tvm.relay.backend.contrib.ethosu.util import QConv2DTransposeArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
requantize = func_body
call = func_body.args[0]
if str(call.op) == "nn.bias_add":
bias_add = call
call = call.args[0]
else:
bias_add = None
qnn_conv2d_transpose = call
data_layout = qnn_conv2d_transpose.attrs.data_layout
self.kernel_layout = qnn_conv2d_transpose.attrs.kernel_layout
self.weights = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS.value],
self.kernel_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
data_layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM.value],
data_layout,
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_SCALE.value],
qnn_conv2d_transpose.args[QConv2DTransposeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
data_layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = qnn_conv2d_transpose.attrs
self.strides = attrs.strides
self.dilation = attrs.dilation
self.padding = attrs.padding
self.channels = attrs.channels
self.groups = attrs.groups
self.output_padding = attrs.output_padding
kernel_size_map = {
"IOHW": self.weights.shape[2:4],
}
self.kernel_shape = kernel_size_map[str(self.weights.layout)]
# Different padding is used in the legalization from conv2d_transpose
# to conv2d, so we to calculate it here to check that the new size fits
# within the bounds of the NPU before offloading.
pad_top = int(self.kernel_shape[0]) - 1 - int(self.padding[0])
pad_left = int(self.kernel_shape[1]) - 1 - int(self.padding[1])
pad_bottom = int(self.kernel_shape[0]) - 1 - int(self.padding[2])
pad_right = int(self.kernel_shape[1]) - 1 - int(self.padding[3])
if self.strides == [2, 2]:
pad_bottom -= 1
pad_right -= 1
self.legalize_padding = [pad_top, pad_left, pad_bottom, pad_right]
def is_valid(self) -> bool:
"""
This function checks whether QnnConv2D has compatible attributes with the NPU
"""
def check_compatible_output_size(ifm_shape, ofm_shape, padding, strides, kernel_shape):
is_valid_padding = padding == [0, 0, 0, 0]
if is_valid_padding:
expected_height = ifm_shape[1] * strides[0] + (kernel_shape[0] - strides[0])
expected_width = ifm_shape[2] * strides[1] + (kernel_shape[1] - strides[1])
else:
expected_height = ifm_shape[1] * strides[0]
expected_width = ifm_shape[2] * strides[1]
return ofm_shape[1] == expected_height and ofm_shape[2] == expected_width
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if self.biases and not check_bias(self.biases):
return False
if not check_strides(self.strides, stride_range=(2, 2)):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation, dilation_range=(1, 1)):
return False
if not check_compatible_output_size(
self.ifm.shape,
self.ofm.shape,
[int(x) for x in self.padding],
self.strides,
self.kernel_shape,
):
return False
if not check_padding(self.legalize_padding, self.padding_bounds):
return False
if self.kernel_shape[0] - 2 - int(self.padding[2]) < 0:
return False
if self.kernel_shape[1] - 2 - int(self.padding[3]) < 0:
return False
if self.groups != 1:
return False
if list(self.output_padding) != [0, 0]:
return False
return True
class QnnDepthwiseConv2DParams(QnnConv2DParams):
"""
This class will parse a call to a ethosu.depthwise_conv2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.depthwise_conv2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [31, 31, 32, 32]
def __init__(self, func_body: tvm.relay.expr.Call):
QnnConv2DParams.__init__(self, func_body)
def is_valid(self):
"""
Checks whether QnnDepthwiseConv2D + activation function has compatible attributes with HW
"""
tensor_params = [self.weights, self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if not check_weights(self.weights, self.dilation):
return False
if not check_bias(self.biases):
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_dilation(self.dilation):
return False
if not self.padding or not check_padding(self.padding, self.padding_bounds):
return False
if self.weights.layout != "HWOI":
return False
# only depth multiplier of size 1 is supported
if self.weights.shape[3] != 1:
return False
if not self.is_depthwise:
return False
return True
def qnn_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWIO"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_depthwise_conv2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for depthwise qnn.conv2D with optional fused RELU activation.
"""
optional_pad = is_op("nn.pad")(wildcard(), is_constant())
qnn_conv2d = is_op("qnn.conv2d")(
optional_pad | wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
).has_attr({"kernel_layout": "HWOI"})
bias_add = is_op("nn.bias_add")(qnn_conv2d, is_constant())
req = is_op("qnn.requantize")(
bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
clip_or_req = req.optional(is_op("clip"))
return clip_or_req
def qnn_conv2d_transpose_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.conv2d_transpose.
"""
qnn_conv2d_transpose = is_op("qnn.conv2d_transpose")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
).has_attr({"kernel_layout": "IOHW"})
optional_bias_add = (
is_op("nn.bias_add")(qnn_conv2d_transpose, is_constant()) | qnn_conv2d_transpose
)
req = is_op("qnn.requantize")(
optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
return req
class MaxPool2DParams:
"""
This class will parse a call to a ethos-u.maxpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.maxpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [127, 127, 128, 128]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op) == "clip":
clip = func_body
pool_op = clip.args[0]
else:
pool_op = func_body
attrs = pool_op.attrs
self.ifm = TensorParams(pool_op.args[0], attrs.layout)
self.ofm = TensorParams(pool_op, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.activation = clip
self.pooling_type = "MAX"
def is_valid(self):
"""
This function checks whether MaxPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
return True
def qnn_maxpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.max_pool2d with optional fused RELU activation.
"""
pattern = is_op("nn.max_pool2d")(wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class AvgPool2DParams:
"""
This class will parse a call to a ethos-u.avgpool2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.avgpool2d"
# The hardware only supports padding upto the numbers as follows
padding_bounds = [3, 3, 4, 4]
def __init__(self, func_body: Call):
clip = None
if str(func_body.op) == "clip":
clip = func_body
cast2 = clip.args[0]
else:
cast2 = func_body
avgpool = cast2.args[0]
cast1 = avgpool.args[0]
attrs = avgpool.attrs
self.ifm = TensorParams(cast1.args[0], attrs.layout)
self.ofm = TensorParams(cast2, attrs.layout)
self.pool_shape = attrs.pool_size
self.strides = attrs.strides
self.padding = attrs.padding
self.count_include_pad = attrs.count_include_pad
self.activation = clip
self.pooling_type = "AVG"
def is_valid(self):
"""
This function checks whether AvgPool2D has compatible attributes with the NPU
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.uint8, np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_strides(self.strides):
return False
if not check_batch_size(self.ifm):
return False
if self.count_include_pad:
return False
if not check_padding(self.padding, self.padding_bounds):
return False
if not check_pool_shape(self.pool_shape):
return False
# Averge pool with padding only supports 1 <= pool_shape <= 8
if list(self.padding) != [0, 0, 0, 0] and (
self.pool_shape[0] > 8 or self.pool_shape[1] > 8
):
return False
return True
def qnn_avgpool2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for nn.avg_pool2d with optional fused RELU activation.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("nn.avg_pool2d")(pattern)
pattern = is_op("cast")(pattern)
pattern = pattern.optional(is_op("clip"))
return pattern
class BinaryElementwiseParams:
"""
This class will parse a call to a ethosu.binary_elementwise composite function
and extract the parameter information.
"""
def __init__(self, func_body: Call, operator_type: str, is_quantized_operation: bool):
from tvm.relay.backend.contrib.ethosu.util import BinaryElementwiseArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
current_call = func_body
clip = None
requantize = None
if is_quantized_operation:
if str(current_call.op) == "clip":
clip = current_call
current_call = clip.args[0]
else:
if str(current_call.op) == "qnn.requantize":
requantize = current_call
clip = current_call.args[0]
current_call = clip.args[0]
binary_op = current_call
layout = "NHWC"
if is_quantized_operation:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM_ZERO_POINT.value],
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
binary_op.args[BinaryElementwiseArgs.IFM2_SCALE.value],
binary_op.args[BinaryElementwiseArgs.IFM2_ZERO_POINT.value],
)
self.ofm = TensorParams(
binary_op,
layout,
binary_op.args[BinaryElementwiseArgs.OFM_SCALE.value],
binary_op.args[BinaryElementwiseArgs.OFM_ZERO_POINT.value],
)
else:
self.ifm = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ifm2 = TensorParams(
binary_op.args[BinaryElementwiseArgs.IFM2.value],
layout,
requantize.args[RequantArgs.IFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.IFM_ZERO_POINT.value] if requantize else None,
)
self.ofm = TensorParams(
func_body,
layout,
requantize.args[RequantArgs.OFM_SCALE.value] if requantize else None,
requantize.args[RequantArgs.OFM_ZERO_POINT.value] if requantize else None,
)
self.activation = clip
self.operator_type = operator_type
def can_broadcast(ifm, ifm2):
if len(ifm.shape) < len(ifm2.shape):
return False
for m, n in zip(ifm.shape[::-1], ifm2.shape[::-1]):
if m != n and m == 1:
return False
return True
if can_broadcast(self.ifm, self.ifm2):
self.reversed_operands = False
self.valid_broadcast = True
elif can_broadcast(self.ifm2, self.ifm):
self.reversed_operands = True
self.ifm, self.ifm2 = self.ifm2, self.ifm
self.valid_broadcast = True
else:
self.valid_broadcast = False
def is_valid(self):
"""
This function checks whether BinaryElementwise has compatible attributes with the NPU
"""
if np.dtype(self.ofm) == np.int32 and self.activation is not None:
return False
# Due to identity operator requiring ofm != int32 for now
if np.dtype(self.ofm) == np.int32 and len(self.ofm.shape) < 4:
return False
if len(self.ifm.shape) > 4 or len(self.ifm2.shape) > 4:
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if len(self.ifm2.shape) == 4 and self.ifm2.shape[0] != 1:
return False
if not self.valid_broadcast:
return False
return True
class AddParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Add composite function
and extract the parameter information.
"""
composite_name = "ethos-u.add"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "ADD", True)
def is_valid(self):
"""
This function checks whether Add has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_add_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.add with optional fused RELU activation.
"""
pattern = is_op("qnn.add")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class SubParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Sub composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sub"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SUB", True)
def is_valid(self):
"""
This function checks whether Sub has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_subtract_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.subtract with optional fused RELU activation.
"""
pattern = is_op("qnn.subtract")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MulParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Mul composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mul"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MUL", True)
def is_valid(self):
"""
This function checks whether Mul has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8, np.int32]
):
return False
return True
def qnn_mul_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.mul with optional fused RELU activation.
"""
pattern = is_op("qnn.mul")(
wildcard(),
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pattern = pattern.optional(is_op("clip"))
return pattern
class MinParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Min composite function
and extract the parameter information.
"""
composite_name = "ethos-u.min"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MIN", False)
def is_valid(self):
"""
This function checks whether Min has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def minimum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for minimum with optional fused RELU activation.
"""
minimum = is_op("minimum")(wildcard(), wildcard())
optional_min_clip = is_op("clip")(minimum)
optional_min_clip = is_op("qnn.requantize")(
optional_min_clip, is_constant(), is_constant(), is_constant(), is_constant()
)
return minimum | optional_min_clip
class MaxParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Max composite function
and extract the parameter information.
"""
composite_name = "ethos-u.max"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "MAX", False)
def is_valid(self):
"""
This function checks whether Max has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if self.ifm.dtype != self.ifm2.dtype:
return False
if not check_valid_dtypes(
[self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.uint8, np.int8]
):
return False
return True
def maximum_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for maximum with optional fused RELU activation.
"""
maximum = is_op("maximum")(wildcard(), wildcard())
optional_max_clip = is_op("clip")(maximum)
optional_max_clip = is_op("qnn.requantize")(
optional_max_clip, is_constant(), is_constant(), is_constant(), is_constant()
)
return maximum | optional_max_clip
class ShlParams(BinaryElementwiseParams):
"""
This class will parse a call to a ethosu.binary_elementwise Shl composite function
and extract the parameter information.
"""
composite_name = "ethos-u.shl"
def __init__(self, func_body: Call):
BinaryElementwiseParams.__init__(self, func_body, "SHL", False)
def is_valid(self):
"""
This function checks whether Shl has compatible attributes with the NPU
"""
if not super().is_valid():
return False
if not check_valid_dtypes([self.ifm, self.ifm2, self.ofm], supported_dtypes=[np.int32]):
return False
return True
def shl_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for left_shift with optional fused RELU activation.
"""
pattern = is_op("left_shift")(wildcard(), wildcard())
pattern = pattern.optional(is_op("clip"))
return pattern
class ReshapeParams:
"""
This class will parse a call to a ethosu.reshape composite function
and extract the parameter information.
"""
composite_name = "ethos-u.reshape"
def __init__(self, func_body: Call):
self.new_shape = func_body.attrs.newshape
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
def reshape_pattern():
"""Create pattern for reshape"""
pattern = is_op("reshape")(wildcard())
return pattern
class StridedSliceParams:
"""
This class will parse a call to a ethosu.strided_slice composite function
and extract the parameter information.
"""
composite_name = "ethos-u.strided_slice"
def __init__(self, func_body: Call):
self.ifm = TensorParams(func_body.args[0])
self.ofm = TensorParams(func_body)
attrs = func_body.attrs
# The indices where we begin the slice
self.begin = attrs.begin
# The indices where we end the slice
self.end = attrs.end
self.strides = attrs.strides
self.axes = attrs.axes
self.slice_mode = attrs.slice_mode
def is_valid(self):
"""
This function checks whether reshape has compatible attributes with the NPU
"""
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if len(self.begin) != len(self.end):
return False
for begin_idx, end_idx in zip(self.begin, self.end):
if begin_idx > end_idx:
return False
# Only strides of 1 are supported
if self.strides:
if not all([i == 1 for i in self.strides]):
return False
return True
def strided_slice_pattern():
"""Create pattern for strided_slice"""
pattern = is_op("strided_slice")(wildcard())
return pattern
class AbsParams:
"""
This class will parse a call to a ethosu.unary_elementwise Abs composite function
and extract the parameter information.
"""
composite_name = "ethos-u.abs"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
quantize = func_body
abs_op = quantize.args[0]
dequantize = abs_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
dequantize.args[DequantizeArgs.IFM.value],
layout,
dequantize.args[DequantizeArgs.IFM_SCALE.value],
dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout,
quantize.args[QuantizeArgs.OFM_SCALE.value],
quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
self.operator_type = "ABS"
self.activation = None
def is_valid(self):
"""Checks whether Abs has compatible attributes with HW"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8, np.uint8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not check_dimensions(self.ifm):
return False
if len(self.ifm.shape) == 4 and self.ifm.shape[0] != 1:
return False
if self.ifm.shape != self.ofm.shape:
return False
return True
def abs_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""Create pattern for abs"""
pattern = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
pattern = is_op("abs")(pattern)
pattern = is_op("qnn.quantize")(pattern, is_constant(), is_constant())
return pattern
class LutActivationParams:
"""
A parent class for LUT based activation functions that extract the input and
output tensors and check whether they are valid.
"""
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
layout = "NHWC"
quantize = func_body
activation = quantize.args[0]
dequantize = activation.args[0]
in_var = dequantize.args[0]
self.ifm = TensorParams(
in_var,
layout=layout,
scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],
zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
layout=layout,
scale=quantize.args[QuantizeArgs.OFM_SCALE.value],
zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
def is_valid(self):
"""
This function checks whether activation has compatible attributes with the NPU
"""
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
return True
class TanhParams(LutActivationParams):
composite_name = "ethos-u.tanh"
def tanh_pattern():
"""Create pattern for tanh"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
tanh = is_op("tanh")(dequant)
quant = is_op("qnn.quantize")(tanh, is_constant(), is_constant())
return quant
class SigmoidParams(LutActivationParams):
"""
This class will parse a call to a ethos-u.sigmoid composite function
and extract the parameter information.
"""
composite_name = "ethos-u.sigmoid"
def sigmoid_pattern():
"""Create pattern for sigmoid"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
sigmoid = is_op("sigmoid")(dequant)
quant = is_op("qnn.quantize")(sigmoid, is_constant(), is_constant())
return quant
class LeakyReLUParams(LutActivationParams):
"""
This class will parse a call to ethos-u.leaky_relu composite function
and extract the parameter information.
"""
composite_name = "ethos-u.leaky_relu"
def __init__(self, func_body: Call):
super().__init__(func_body)
self.alpha = func_body.args[0].attrs.alpha
def leaky_relu_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for leaky relu.
"""
dequantize = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
leaky_relu = is_op("nn.leaky_relu")(dequantize)
return is_op("qnn.quantize")(leaky_relu, is_constant(), is_constant())
class MeanParams:
"""
This class will parse a call to ethosu.mean composite function
and extract the parameter information.
"""
composite_name = "ethos-u.mean"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
requantize = func_body
mean_op = requantize.args[0]
attrs = mean_op.attrs
cast = mean_op.args[0]
layout = "NHWC"
self.ifm = TensorParams(
cast.args[0],
layout,
requantize.args[RequantArgs.IFM_SCALE.value],
requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout,
requantize.args[RequantArgs.OFM_SCALE.value],
requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
ifm_shape = self.ifm.shape
self.height = ifm_shape[0] if len(ifm_shape) in (2, 3) else ifm_shape[1]
self.width = ifm_shape[1] if len(ifm_shape) in (2, 3) else ifm_shape[2]
self.keepdims = attrs.keepdims
self.axis = list(sorted(attrs.axis))
if attrs.exclude:
self.axis = [i for i in range(len(self.ifm.shape)) if i not in self.axis]
def is_valid(self) -> bool:
"""
Checks whether Mean has compatible attributes with HW.
"""
def check_axis(num_dims, axis):
if num_dims in (2, 3):
return axis in ([0], [1], [0, 1])
return axis in ([1], [2], [1, 2])
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if self.ifm.dtype != self.ofm.dtype:
return False
if not len(self.ifm.shape) in [2, 3, 4]:
return False
if not check_axis(len(self.ifm.shape), self.axis):
return False
# MEAN has further restrictions on the input size, depending on legalization method.
input_size = self.height * self.width
if input_size > 65536:
return False
if (
self.ifm.q_params.scale_f32 != self.ofm.q_params.scale_f32
or self.ifm.q_params.zero_point != self.ofm.q_params.zero_point
) and input_size > 4096:
return False
if self.axis == [1, 2] and self.keepdims and self.ifm.dtype == "int8" and input_size > 256:
return False
# Large kernel height reshape only when axis is [1, 2]
if self.axis != [1, 2] and self.height > 64:
return False
return True
def mean_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for mean.
"""
pattern = is_op("cast")(wildcard())
pattern = is_op("mean")(pattern)
pattern = is_op("qnn.requantize")(
pattern, is_constant(), is_constant(), is_constant(), is_constant()
)
return pattern
class ConcatParams:
"""
This class will parse a call to a ethos-u.concat composite function
and extract the parameter information.
"""
composite_name = "ethos-u.concat"
def __init__(self, func_body):
self.concat = func_body
self.is_qnn_variant = self.concat.op.name == "qnn.concatenate"
self.input_tensors = [TensorParams(tensor) for tensor in list(func_body.args[0])]
self.axis = func_body.attrs.axis
if self.is_qnn_variant:
self.input_scales = [s.data.asnumpy() for s in list(func_body.args[1])]
self.input_zero_points = [zp.data.asnumpy() for zp in list(func_body.args[2])]
def is_valid(self):
"""Checks whether Concatenate has compatible attributes with the hardware"""
if not check_valid_dtypes(self.input_tensors, supported_dtypes=[np.int8]):
return False
# Check that the scales and zero points of input tensors are the same
if self.is_qnn_variant and not all(self.input_scales == self.input_scales[0]):
return False
if self.is_qnn_variant and not all(self.input_zero_points == self.input_zero_points[0]):
return False
input_dim = len(self.input_tensors[0].shape)
for tensor in self.input_tensors:
if len(tensor.shape) != input_dim:
return False
if self.axis is None:
return False
if self.axis < 0:
return False
if self.axis >= input_dim:
return False
output_shape = self.concat.checked_type.shape
if len(output_shape) != input_dim:
return False
if len(output_shape) > 3 and output_shape[0] != 1:
return False
return True
def concat_pattern():
"""Create pattern for concat"""
tensors = is_tuple(None)
scales = is_tuple(None)
zero_points = is_tuple(None)
qnn_concat = is_op("qnn.concatenate")(
tensors, scales, zero_points, is_constant(), is_constant()
)
concat = is_op("concatenate")(tensors)
return concat | qnn_concat
class SplitParams:
"""
This class will parse a call to a ethos-u.split composite function
and extract the parameter information.
"""
composite_name = "ethos-u.split"
def __init__(self, func_body):
self.split = func_body
self.input = TensorParams(func_body.args[0])
self.axis = func_body.attrs.axis
self.indices_or_sections = self.convert_indices_or_sections(
func_body.attrs.indices_or_sections
)
def convert_indices_or_sections(self, indices_or_sections):
# split_v
if isinstance(indices_or_sections, tvm.ir.container.Array):
values = [i.value for i in indices_or_sections]
# split
else:
values = indices_or_sections.value
return values
def is_valid(self):
"""Checks whether split has compatible attributes with the hardware"""
if not check_valid_dtypes([self.input], supported_dtypes=[np.int8]):
return False
return True
def split_pattern():
"Create the pattern for split"
split = is_op("split")(wildcard())
return split
class RequantizeParams:
"""
This class will parse a call to ethos-u.requantize composite function
and extract the parameter information.
"""
composite_name = "ethos-u.requantize"
def __init__(self, func_body: Call):
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
layout = "NHWC"
in_var = func_body.args[0]
requantize = func_body
self.ifm = TensorParams(
in_var,
layout=layout,
scale=requantize.args[RequantArgs.IFM_SCALE.value],
zero_point=requantize.args[RequantArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
requantize,
layout=layout,
scale=requantize.args[RequantArgs.OFM_SCALE.value],
zero_point=requantize.args[RequantArgs.OFM_ZERO_POINT.value],
)
attrs = requantize.attrs
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether qnn.requantize has compatible attributes with HW.
"""
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if not check_dimensions(self.ifm) or not check_dimensions(self.ofm):
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def requantize_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for qnn.requantize.
"""
return is_op("qnn.requantize")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant()
)
class Resize2dParams:
"""
This class will parse a call to ethos-u.resize2d composite function
and extract the parameter information.
"""
composite_name = "ethos-u.resize2d"
def __init__(self, func_body: Call):
layout = "NHWC"
resize_2d = func_body
in_var = func_body.args[0]
if (
isinstance(resize_2d, tvm.relay.expr.Call)
and isinstance(resize_2d.op, tvm.ir.Op)
and resize_2d.op.name == "qnn.quantize"
):
resize_2d = resize_2d.args[0]
in_var = in_var.args[0].args[0]
out_var = func_body
self.ifm = TensorParams(in_var, layout=layout)
self.ofm = TensorParams(out_var, layout=layout)
attrs = resize_2d.attrs
self.size = attrs.size
self.method = attrs.method
self.roi = attrs.roi
self.coordinate_transformation_mode = attrs.coordinate_transformation_mode
self.rounding_method = attrs.rounding_method
self.out_dtype = attrs.out_dtype
def is_valid(self) -> bool:
"""
Checks whether image.resize2d has compatible attributes with HW.
"""
def check_compatible_size(mode, method, upscale_size, ifm_size):
"""Checking the provided upscale_size is compatible with the NPU. The NPU only
supports upsampling when the upsampling size is 2 * input_size, or when there is
no upsampling to be done, so check that this is the case. In the special case of
resize_bilinear with align_corners=True, the NPU only supports an upsampling
size of 2 * input_size - 1."""
delta = 1 if mode == "align_corners" and method == "linear" else 0
upscale_size = np.array(upscale_size)
ifm_size = np.array(ifm_size)
ifm_upscaled = ifm_size * 2 - delta
return (ifm_upscaled == upscale_size).all() or (ifm_size == upscale_size).all()
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
if len(self.ifm.shape) != 4 or len(self.ofm.shape) != 4:
return False
if list(float(x) for x in self.roi) != [0.0] * 4:
return False
if self.method not in ("nearest_neighbor", "linear"):
return False
if self.coordinate_transformation_mode not in ("asymmetric", "align_corners"):
return False
if not check_compatible_size(
self.coordinate_transformation_mode,
self.method,
self.size,
self.ifm.shape[1:3],
):
return False
if self.rounding_method != "":
return False
if self.out_dtype and self.out_dtype != "int8":
return False
return True
def resize2d_pattern() -> tvm.relay.dataflow_pattern.DFPattern:
"""
This function creates the pattern for image.resize2d.
"""
dequant = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
resize_2d = is_op("image.resize2d")(dequant).has_attr({"method": "linear"})
quant = is_op("qnn.quantize")(resize_2d, is_constant(), is_constant())
return quant | is_op("image.resize2d")(wildcard()).has_attr({"method": "nearest_neighbor"})
class ExpandDimsParams:
"""
This class will parse a call to a ethos-u.expand_dims composite function
and extract the parameter information.
"""
composite_name = "ethos-u.expand_dims"
def __init__(self, func_body):
self.expand_dims = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether expand_dims has compatible attributes with the hardware."""
if not check_dimensions(self.input) or not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def expand_dims_pattern():
"""Create the pattern for expand_dims."""
return is_op("expand_dims")(wildcard())
class SqueezeParams:
"""
This class will parse a call to a ethos-u.squeeze composite function
and extract the parameter information.
"""
composite_name = "ethos-u.squeeze"
def __init__(self, func_body):
self.squeeze = func_body
self.input = TensorParams(func_body.args[0])
self.output = TensorParams(func_body)
def is_valid(self):
"""Checks whether squeeze has compatible attributes with the hardware."""
if not check_dimensions(self.output):
return False
if not check_valid_dtypes([self.input, self.output], supported_dtypes=[np.int8]):
return False
return True
def squeeze_pattern():
"""Create the pattern for squeeze."""
return is_op("squeeze")(wildcard())
class FullyConnectedParams:
"""
This class will parse a call to an ethos-u.fully_connected composite
function and extract the parameter information.
"""
composite_name = "ethos-u.fully_connected"
@requires_vela
def __init__(self, func_body):
from tvm.relay.backend.contrib.ethosu.util import QDenseArgs # type: ignore
from tvm.relay.backend.contrib.ethosu.util import BiasAddArgs
from tvm.relay.backend.contrib.ethosu.util import RequantArgs
self.activation = None
if str(func_body.op) == "clip":
self.activation = func_body
requantize_op = self.activation.args[0]
else:
requantize_op = func_body
call = requantize_op.args[0]
if str(requantize_op.args[0].op) == "nn.bias_add":
bias_add = call
qnn_dense = call.args[0]
else:
bias_add = None
qnn_dense = call
# weights & biases are params as they should be constant
self.weights = TensorParams(
qnn_dense.args[QDenseArgs.WEIGHTS.value],
None,
qnn_dense.args[QDenseArgs.WEIGHTS_SCALE.value],
qnn_dense.args[QDenseArgs.WEIGHTS_ZERO_POINT.value],
)
self.biases = (
TensorParams(
bias_add.args[BiasAddArgs.BIASES.value],
None,
requantize_op.args[RequantArgs.IFM_SCALE.value],
requantize_op.args[RequantArgs.IFM_ZERO_POINT.value],
)
if bias_add
else None
)
self.ifm = TensorParams(
qnn_dense.args[QDenseArgs.IFM.value],
None,
qnn_dense.args[QDenseArgs.IFM_SCALE.value],
qnn_dense.args[QDenseArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
func_body,
None,
requantize_op.args[RequantArgs.OFM_SCALE.value],
requantize_op.args[RequantArgs.OFM_ZERO_POINT.value],
)
def is_valid(self) -> bool:
"""
Checks whether Fully Connected has compatible attributes with HW
"""
def check_weights_fc(weights):
"""Checks whether weight tensor is compatible with HW"""
weights_limit = 127 * 65536
# A saturation upper bound check for accumulators
weights.values = weights.values - weights.q_params.zero_point
axis = 1
sum_weights = np.amax(np.sum(np.absolute(weights.values), axis=axis))
if not sum_weights <= weights_limit:
return False
return True
if not check_valid_dtypes([self.ifm, self.ofm], supported_dtypes=[np.int8]):
return False
if not check_weights_fc(self.weights):
return False
if not check_bias(self.biases):
return False
if not check_batch_size(self.ifm):
return False
# Check input shape
if not len(self.ifm.shape) == 2:
return False
# Check output shape
if not len(self.ofm.shape) == 2:
return False
return True
def qnn_fc_pattern():
dense = is_op("qnn.dense")(
wildcard(), is_constant(), is_constant(), is_constant(), is_constant(), is_constant()
)
optional_bias_add = is_op("nn.bias_add")(dense, is_constant())
req = is_op("qnn.requantize")(
dense | optional_bias_add, is_constant(), is_constant(), is_constant(), is_constant()
)
optional_clip = req.optional(is_op("clip"))
return optional_clip
class HardSwishParams:
"""
This class will parse a call to a ethos-u.hard_swish composite function
and extract the parameter information.
"""
composite_name = "ethos-u.hard_swish"
def __init__(self, func_body):
from tvm.relay.backend.contrib.ethosu.util import QuantizeArgs
from tvm.relay.backend.contrib.ethosu.util import DequantizeArgs
quantize = func_body
divide = quantize.args[0]
multiply = divide.args[0]
clip = multiply.args[1]
add = clip.args[0]
dequantize = add.args[0]
self.ifm = TensorParams(
dequantize.args[0],
scale=dequantize.args[DequantizeArgs.IFM_SCALE.value],
zero_point=dequantize.args[DequantizeArgs.IFM_ZERO_POINT.value],
)
self.ofm = TensorParams(
quantize,
scale=quantize.args[QuantizeArgs.OFM_SCALE.value],
zero_point=quantize.args[QuantizeArgs.OFM_ZERO_POINT.value],
)
def is_valid(self):
tensor_params = [self.ifm, self.ofm]
if not check_valid_dtypes(tensor_params, supported_dtypes=[np.int8]):
return False
return True
def hard_swish_pattern():
"""Create the pattern for hard swish."""
dequantize = is_op("qnn.dequantize")(wildcard(), is_constant(), is_constant())
add = is_op("add")(dequantize, is_constant())
clip = is_op("clip")(add)
multiply = is_op("multiply")(dequantize, clip)
divide = is_op("divide")(multiply, is_constant())
quantize = is_op("qnn.quantize")(divide, is_constant(), is_constant())
return quantize
@register_pattern_table("ethos-u")
def pattern_table() -> List[Tuple[str, tvm.relay.dataflow_pattern.DFPattern, Callable]]:
return [
(
QnnConv2DParams.composite_name,
qnn_conv2d_pattern(),
lambda pat: QnnConv2DParams(pat).is_valid(),
),
(
QnnDepthwiseConv2DParams.composite_name,
qnn_depthwise_conv2d_pattern(),
lambda pat: QnnDepthwiseConv2DParams(pat).is_valid(),
),
(
QnnConv2DTransposeParams.composite_name,
qnn_conv2d_transpose_pattern(),
lambda pat: QnnConv2DTransposeParams(pat).is_valid(),
),
(
FullyConnectedParams.composite_name,
qnn_fc_pattern(),
lambda pat: FullyConnectedParams(pat).is_valid(),
),
(
MaxPool2DParams.composite_name,
qnn_maxpool2d_pattern(),
lambda pat: MaxPool2DParams(pat).is_valid(),
),
(
AvgPool2DParams.composite_name,
qnn_avgpool2d_pattern(),
lambda pat: AvgPool2DParams(pat).is_valid(),
),
(
AddParams.composite_name,
qnn_add_pattern(),
lambda pat: AddParams(pat).is_valid(),
),
(
SubParams.composite_name,
qnn_subtract_pattern(),
lambda pat: SubParams(pat).is_valid(),
),
(
MulParams.composite_name,
qnn_mul_pattern(),
lambda pat: MulParams(pat).is_valid(),
),
(
MinParams.composite_name,
minimum_pattern(),
lambda pat: MinParams(pat).is_valid(),
),
(
MaxParams.composite_name,
maximum_pattern(),
lambda pat: MaxParams(pat).is_valid(),
),
(
ShlParams.composite_name,
shl_pattern(),
lambda pat: ShlParams(pat).is_valid(),
),
(
ReshapeParams.composite_name,
reshape_pattern(),
lambda pat: ReshapeParams(pat).is_valid(),
),
(
StridedSliceParams.composite_name,
strided_slice_pattern(),
lambda pat: StridedSliceParams(pat).is_valid(),
),
(
AbsParams.composite_name,
abs_pattern(),
lambda pat: AbsParams(pat).is_valid(),
),
(TanhParams.composite_name, tanh_pattern(), lambda pat: TanhParams(pat).is_valid()),
(
MeanParams.composite_name,
mean_pattern(),
lambda pat: MeanParams(pat).is_valid(),
),
(
LeakyReLUParams.composite_name,
leaky_relu_pattern(),
lambda pat: LeakyReLUParams(pat).is_valid(),
),
(ConcatParams.composite_name, concat_pattern(), lambda pat: ConcatParams(pat).is_valid()),
(
SigmoidParams.composite_name,
sigmoid_pattern(),
lambda pat: SigmoidParams(pat).is_valid(),
),
(
SplitParams.composite_name,
split_pattern(),
lambda pat: SplitParams(pat).is_valid(),
),
(
RequantizeParams.composite_name,
requantize_pattern(),
lambda pat: RequantizeParams(pat).is_valid(),
),
(
Resize2dParams.composite_name,
resize2d_pattern(),
lambda pat: Resize2dParams(pat).is_valid(),
),
(
ExpandDimsParams.composite_name,
expand_dims_pattern(),
lambda pat: ExpandDimsParams(pat).is_valid(),
),
(
SqueezeParams.composite_name,
squeeze_pattern(),
lambda pat: SqueezeParams(pat).is_valid(),
),
(
HardSwishParams.composite_name,
hard_swish_pattern(),
lambda pat: HardSwishParams(pat).is_valid(),
),
]
# pylint: disable=unused-argument
@requires_vela
def partition_for_ethosu(
mod: tvm.ir.IRModule,
params: Optional[Dict[str, tvm.runtime.NDArray]] = None,
mod_name: str = "default",
**opts,
):
"""This helper function partition the relay graph as produced by the
relay frontend for a given model into external functions
to be presented to the codegen.
Parameters
----------
mod : tvm.ir.IRModule
The IRModule that gets generated from a relay frontend
params : Optional[Dict[str, tvm.runtime.NDArray]]
Constant input parameters.
mod_name: str, optional
The module name
Returns
-------
mod : IRModule
The partitioned IRModule with external global functions
"""
from tvm.relay.backend.contrib.ethosu import preprocess
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
pattern = relay.op.contrib.get_pattern_table("ethos-u")
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern)(mod)
mod = relay.transform.AnnotateTarget("ethos-u")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph(mod_name)(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/libtorch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, E1102
"""Torch codegen operators"""
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def torchop(script_fn, *params):
"""Insert an Operation executed in the PyTorch JIT
The operation includes backend annotation
Currently, only tensors are supported. The shape inferrence
assumes that input shapes (and not values) determine output shapes."""
return compiler_end(
relay.op._make.torchop(
[compiler_begin(p, "torch") for p in params], script_fn.save_to_buffer()
),
"torch",
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/register.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Register utilities for external codegen."""
_PATTERN_TABLES = {}
def register_pattern_table(compiler, table=None):
"""Register a pattern table for an external compiler.
Pattern tables are used to create composite functions.
See the MergeComposite pass.
Parameters
----------
compiler : str
The name of compiler
table : function, optional
A function that returns the pattern table
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(t):
"""internal register function"""
_PATTERN_TABLES[compiler] = t()
return t
return _register(table) if table is not None else _register
def get_pattern_table(compiler):
"""Get the pattern table associated with a compiler (if it's registered)."""
return _PATTERN_TABLES[compiler] if compiler in _PATTERN_TABLES else None
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/tachikoma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Tachikoma library supported operators.
From TVM's perspective, Tachikoma is an extension of DNNL. The code below
is adapted from that of DNNL:
There are two ways to registering a function for an op to indicate if it is
supported by Tachikoma.
- The first and simplest way is to use the helper so that
users only need to provide the operator name and a boolean value to indicate if
it is supported. For example:
.. code-block:: python
add = _register_external_op_helper("add")
add = _register_external_op_helper("add", True)
add = _register_external_op_helper("add", False)
- The other way is to implement the function by themselves to
check the attributes of the op and decide if it should be offloaded to Tachikoma.
"""
import logging
from functools import reduce
import tvm.ir
from tvm.ir import Op
from tvm import relay
from tvm.relay import transform
from tvm.relay.expr import GlobalVar
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
from tvm.relay.expr import const
from tvm.relay.analysis import analysis as _analysis
from tvm.relay import expr as _expr
from tvm.relay.expr import Call, TupleGetItem
from ... import _ffi_api
from ...dataflow_pattern import wildcard, is_op, is_constant, is_expr, rewrite, DFPatternCallback
from .register import register_pattern_table
logger = logging.getLogger("Tachikoma")
supported_post_elts = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by Tachikoma.
Parameters
----------
op_name : Str
The name of operator that will be registered.
Returns
-------
f : callable
A function that returns if the operator is supported by Tachikoma.
"""
@tvm.ir.register_op_attr(op_name, "target.tachikoma")
def _func_wrapper(expr):
args = expr.args
if any([x.checked_type.dtype == "int64" for x in args]):
logger.info("Tachikoma does not support int64.")
return False
# Tachikoma does not support pooling with ceil_mode = True.
if "pool" in op_name:
attrs = dict(get_attrs(expr))
if "ceil_mode" in attrs.keys() and attrs["ceil_mode"]:
return False
return supported
return _func_wrapper
_register_external_op_helper("nn.batch_norm")
_register_external_op_helper("nn.conv1d")
_register_external_op_helper("nn.conv2d")
_register_external_op_helper("nn.conv3d")
_register_external_op_helper("nn.conv2d_transpose")
_register_external_op_helper("nn.conv3d_transpose")
_register_external_op_helper("nn.dense")
_register_external_op_helper("nn.max_pool2d")
_register_external_op_helper("nn.avg_pool2d")
_register_external_op_helper("nn.global_avg_pool2d")
_register_external_op_helper("nn.max_pool3d")
_register_external_op_helper("nn.avg_pool3d")
_register_external_op_helper("abs")
_register_external_op_helper("clip")
_register_external_op_helper("exp")
_register_external_op_helper("log")
_register_external_op_helper("sqrt")
_register_external_op_helper("round")
_register_external_op_helper("nn.relu")
_register_external_op_helper("nn.leaky_relu")
_register_external_op_helper("tanh")
_register_external_op_helper("sigmoid")
_register_external_op_helper("nn.softmax")
_register_external_op_helper("add")
_register_external_op_helper("multiply")
_register_external_op_helper("nn.layer_norm")
_register_external_op_helper("nn.batch_matmul")
def append_eltwise_ops(op, eltwise):
"""Append element-wise post-ops to conv / conv_transpose / dense
Parameters
----------
op : str
The op name to be attached with element-wise post-op.
eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : CallPattern
Call node sequence.
"""
if eltwise == "gelu":
const1 = wildcard()
const2 = wildcard()
const3 = wildcard()
div = is_op("divide")(op, const1)
erf_val = is_op("erf")(div)
added_erf_val = is_op("add")(erf_val, const2)
mul_val = is_op("multiply")(op, added_erf_val)
op = is_op("multiply")(mul_val, const3)
elif eltwise == "swish":
sig_out = is_op("sigmoid")(op)
op = is_op("multiply")(op, sig_out)
elif eltwise == "mish":
const1 = wildcard()
exp = is_op("exp")(op)
add = is_op("add")(exp, const1)
log = is_op("log")(add)
tanh = is_op("tanh")(log)
op = is_op("multiply")(op, tanh)
elif eltwise:
op = is_op(eltwise)(op)
return op
def make_conv_pattern(conv_name, with_bias=True, with_eltwise=None):
"""Create patterns related to conv and conv_transpose.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `conv / conv_transpose`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
conv_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise)
data = wildcard()
weight = wildcard()
bias = wildcard()
conv = is_op(conv_name)(data, weight)
if with_bias:
conv_out = is_op("add")(conv, bias)
else:
conv_out = conv
return append_eltwise_ops(conv_out, with_eltwise)
def make_conv_bias_sum_relu_pattern(conv_type, has_relu=True):
"""Create patterns with sum op.
Parameters
----------
conv_type : str
Should be nn.conv1d / nn.conv2d / nn.conv3d.
has_relu : bool
Whether attach relu.
Returns
-------
out : CallPattern
Call node sequence.
"""
data1 = wildcard()
weight = wildcard()
bias = wildcard()
data2 = wildcard()
out = is_op(conv_type)(data1, weight)
out = is_op("add")(out, bias)
out = is_op("add")(out, data2)
if has_relu:
out = is_op("nn.relu")(out)
return out
def get_op_name(expr):
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr):
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr):
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
def make_sum_pattren_predicate(checker):
"""Check whether the conv_bias_add_sum pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
for e, op_name in zip([expr, expr.args[0]], ["sum", "bias_add"]):
args = get_args(e)
attrs = get_attrs(e.args[0])
if not checker(attrs, args, op_name):
return False
return True
return predicate
def make_bias_add_pattren_predicate(checker):
"""Check whether the conv_bias pattern is as expected."""
def predicate(expr):
if get_op_name(expr) == "nn.relu":
expr = expr.args[0]
if get_op_name(expr) == "add":
args = get_args(expr)
attrs = get_attrs(expr.args[0])
if not checker(attrs, args, "bias_add"):
return False
return True
return predicate
def add_checker(attrs, args, op_name):
"""Check if add is aligned with elementwise_add and bias_add."""
if op_name == "sum":
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "add":
return False
if tuple(get_shape(args[0])) != tuple(get_shape(args[1])):
return False
if op_name == "bias_add":
if not isinstance(args[0].op, tvm.ir.op.Op):
return False
if args[0].op.name != "nn.conv2d":
return False
channel = dict(attrs)["channels"]
const_shape = get_shape(args[1])
if channel != reduce(lambda x, y: x * y, const_shape):
return False
return True
def make_dense_pattern(with_bias=True, with_eltwise=None):
"""Create patterns related to nn.dense.
Parameters
----------
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
dense_out : CallPattern
Call node sequence.
"""
if with_eltwise not in supported_post_elts:
raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise)
data = wildcard()
weight = wildcard()
bias = wildcard()
dense = is_op("nn.dense")(data, weight)
if with_bias:
dense_out = is_op("add")(dense, bias)
else:
dense_out = dense
return append_eltwise_ops(dense_out, with_eltwise)
def make_tachikoma_pattern(op_name, with_bias, with_eltwise):
"""Create tachikoma patterns.
Parameters
----------
op_name : str
The first call node's op name.
with_bias : bool
Whether attach `bias_add` to `nn.dense`.
with_eltwise : str
The attached elementwise post-op name.
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
pat_name = op_name.replace("nn", "tachikoma")
if "_transpose" in op_name:
pat_name = "tachikoma.deconv" + op_name.split("_")[0][-2::]
pat_name += "_bias" if with_bias else ""
pat_name += ("_" + with_eltwise.split(".")[-1]) if with_eltwise else ""
if "conv" in op_name:
tachikoma_pattern = (
pat_name,
make_conv_pattern(op_name, with_bias, with_eltwise),
make_bias_add_pattren_predicate(add_checker),
)
elif op_name == "nn.dense":
tachikoma_pattern = (pat_name, make_dense_pattern(with_bias, with_eltwise))
else:
logger.warning(
"Currently, only conv1d, conv2d, conv2d_transpose, conv3d_transpose, "
"dense op are supported, but got %s.",
op_name,
)
tachikoma_pattern = ()
return tachikoma_pattern
def make_qnn_conv2d_pattern():
"""Make qnn.conv2d based pattern supported by Tachikoma
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.conv2d")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. Ex: act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "tachikoma.qnn.conv2d", pat
def make_qnn_dense_pattern():
"""Make qnn.dense based pattern supported by Tachikoma
Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
data = wildcard()
weight = is_constant()
bias = is_constant()
o_scl = is_constant()
dst_zp = is_constant()
act_scl = is_constant()
sum_scl = is_constant()
sum_src = wildcard()
zero_zp = is_expr(const(0, dtype="int32"))
pat = is_op("qnn.dense")(data, weight, zero_zp, zero_zp, is_constant(), is_constant())
pat = is_op("cast")(pat)
pat = is_op("add")(pat, bias) | pat # optional bias
pat = is_op("multiply")(pat, o_scl)
pat = is_op("clip")(pat) # TBD, not only clip
pat = is_op("multiply")(pat, act_scl) | pat # optional multiply. ex act_scl == 1
pat = is_op("add")(pat, sum_scl * is_op("cast")(sum_src)) | pat # optional sum
pat = is_op("add")(pat, dst_zp) | pat # optional dst_zp, can be dst_zp == 0
pat = is_op("cast")(pat)
return "tachikoma.qnn.dense", pat
@register_pattern_table("tachikoma")
def pattern_table():
"""Create tachikoma patterns.
Returns
-------
tachikoma_patterns : List[tachikoma_pattern]
Created patterns.
"""
tachikoma_patterns = list()
tachikoma_patterns.append(make_qnn_conv2d_pattern())
tachikoma_patterns.append(make_qnn_dense_pattern())
tachikoma_patterns.append(
(
"tachikoma.conv2d_bias_sum_relu",
make_conv_bias_sum_relu_pattern("nn.conv2d"),
make_sum_pattren_predicate(add_checker),
)
)
tachikoma_patterns.append(
(
"tachikoma.conv2d_bias_sum",
make_conv_bias_sum_relu_pattern("nn.conv2d", False),
make_sum_pattren_predicate(add_checker),
)
)
elt_list = ["nn.relu", "tanh", "sigmoid", "clip", "gelu", "swish", "mish", None]
for with_bias in [True, False]:
for elt in elt_list:
if not with_bias and not elt:
continue
for conv_name in [
"nn.conv1d",
"nn.conv2d",
"nn.conv3d",
"nn.conv2d_transpose",
"nn.conv3d_transpose",
]:
tachikoma_patterns.append(make_tachikoma_pattern(conv_name, with_bias, elt))
tachikoma_patterns.append(make_tachikoma_pattern("nn.dense", with_bias, elt))
return tachikoma_patterns
def get_optimal_layout_for_conv(
data_layout, kernel_layout, weight_shape, out_shape, paddings, strides, dilates, groups, dtype
):
"""Get the optimal layout of tachikoma, given shape of conv2d.
Parameters
----------
data_layout, kernel_layout,weight_shape, out_shape, paddings, strides, dilates, groups
: String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
def get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
):
"""Get the optimal layout of tachikoma, given shape of tranposed conv2d.
Parameters
----------
data_layout, kernel_layout, weight_shape, out_shape, paddings, output_paddings, strides,
dilates, groups
: Int, String
Input argument.
Returns
-------
layouts : string
The result.
"""
return _ffi_api.get_optimal_layout_for_conv_transpose(
data_layout,
kernel_layout,
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
def get_shape(tensor):
"""Get tensor's shape."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.concrete_shape
if isinstance(tensor, relay.expr.Constant):
return tensor.data.shape
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.concrete_shape
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].shape
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].shape
return tensor.checked_type.shape
raise TypeError("Unsupport data type: %s" % type(tensor))
def get_dtype(tensor):
"""Get tensor's dtype."""
if isinstance(tensor, relay.expr.Var):
return tensor.type_annotation.dtype
if isinstance(tensor, relay.expr.Constant):
return tensor.data.dtype
if isinstance(tensor, tvm.ir.tensor_type.TensorType):
return tensor.dtype
if isinstance(tensor, tvm.ir.container.Array):
return tensor[-1].dtype
if isinstance(tensor, relay.expr.Call):
if tensor.op.name == "multiply":
return tensor.type_args[0].dtype
return tensor.checked_type.dtype
raise TypeError("Unsupport data type: %s" % type(tensor))
def tag2layout(input_data, is_weight=False, conv_type="Conv1D"):
"""Transfer layout, denoted with `a, b, c, d, e`,
into valid layout (NCHW / OIHW) of TVM."""
if "Conv1D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "W"}
weight_dic = {"a": "O", "b": "I", "c": "W", "d": "G"}
elif "Conv2D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "H", "d": "W"}
weight_dic = {"a": "O", "b": "I", "c": "H", "d": "W"}
if "e" in input_data:
weight_dic = {"a": "G", "b": "O", "c": "I", "d": "H", "e": "W"}
elif "Conv3D" in conv_type:
data_dic = {"a": "N", "b": "C", "c": "D", "d": "H", "e": "W"}
weight_dic = {"a": "O", "b": "I", "c": "D", "d": "H", "e": "W", "f": "G"}
dic = weight_dic if is_weight else data_dic
res = ""
for i in input_data:
if i.isupper():
i = i.lower()
res += dic[i]
dic[i] = dic[i].lower()
elif i.islower():
res += dic[i]
elif i.isdigit():
res += i
else:
raise ValueError("Unsupport layout format: %s" % input_data)
return res
def legalize_pad_avg_pool(attrs, inputs, types):
"""Legalize pad->avg_pool2d pattern.
Fuse this pattern into one avg_pool2d with padding = (1, 1),
and count_include_pad = True"""
data = inputs[0]
new_attrs = dict(attrs)
if isinstance(data, relay.expr.Call) and data.op.name == "nn.pad":
new_attrs["padding"] = (1, 1)
new_attrs["count_include_pad"] = True
return relay.nn.avg_pool2d(data.args[0], **new_attrs)
return relay.nn.avg_pool2d(data, **attrs)
def legalize_group_conv(attrs, inputs, types):
"""Legalize group conv / conv_transpose calculation.
Alter weight layout from OIHW to GOIHW / IOHW to GIOHW"""
groups = attrs.groups
data, weight = inputs
if groups == 1:
if "Transpose" not in type(attrs).__name__:
return relay.nn.conv2d(data, weight, **attrs)
return relay.nn.conv2d_transpose(data, weight, **attrs)
OC, IC, H, W = get_shape(weight)
new_attrs = dict(attrs)
weight = relay.reshape(weight, (groups, OC // groups, IC, H, W))
if "Transpose" not in type(attrs).__name__:
new_attrs["kernel_layout"] = "GOIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
new_attrs["kernel_layout"] = "GIOHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
def alter_conv(attrs, inputs, tinfos, out_type):
"""The convolution's layout auto-query func for tachikoma."""
data, weight = inputs
groups = str(attrs.groups)
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1D":
return relay.nn.conv1d(data, weight, **new_attrs)
if conv_type == "Conv2D":
return relay.nn.conv2d(data, weight, **new_attrs)
return relay.nn.conv3d(data, weight, **new_attrs)
def alter_conv_transpose(attrs, inputs, tinfos, out_type):
"""The transposed convolution's layout auto-query func for tachikoma."""
data, weight = inputs
weight_shape = ",".join([str(x) for x in get_shape(weight)])
out_shape = ",".join([str(x) for x in get_shape(out_type)])
paddings = ",".join([str(x) for x in attrs.get_int_tuple("padding")])
output_paddings = ",".join([str(x) for x in attrs.get_int_tuple("output_padding")])
strides = ",".join([str(x) for x in attrs.get_int_tuple("strides")])
dilates = ",".join([str(x) for x in attrs.get_int_tuple("dilation")])
groups = str(attrs.groups)
dtype = get_dtype(weight)
new_attrs = dict(attrs)
conv_type = type(attrs).__name__.split("Attrs")[0]
res = get_optimal_layout_for_conv_transpose(
attrs["data_layout"],
attrs["kernel_layout"],
weight_shape,
out_shape,
paddings,
output_paddings,
strides,
dilates,
groups,
dtype,
)
src_df, weight_df, dst_df = res.split(",")
new_attrs["data_layout"] = tag2layout(src_df, is_weight=False, conv_type=conv_type)
new_attrs["kernel_layout"] = tag2layout(weight_df, is_weight=True, conv_type=conv_type)
new_attrs["out_layout"] = tag2layout(dst_df, is_weight=False, conv_type=conv_type)
if conv_type == "Conv1DTranspose":
return relay.nn.conv1d_transpose(data, weight, **new_attrs)
if conv_type == "Conv2DTranspose":
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
return relay.nn.conv3d_transpose(data, weight, **new_attrs)
class IsComputeIntensiveGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and
its transpose and dense.
"""
def __init__(self):
ExprVisitor.__init__(self)
self.is_compute_intensive = False
def visit_call(self, call):
compute_intensive_ops = set(
[
"nn.conv1d",
"nn.conv2d",
"nn.conv2d_transpose",
"nn.conv3d",
"nn.conv3d_transpose",
"nn.dense",
"nn.layer_norm",
"nn.batch_matmul",
"nn.global_avg_pool2d",
]
)
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op) in compute_intensive_ops:
self.is_compute_intensive = True
return super().visit_call(call)
def is_graph_compute_intensive(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if it's compute intensive"
"""
self.visit(subgraph)
return self.is_compute_intensive
def is_valid_subgraph(body):
"""Final check on whether the subgraph is valid and should be offloaded to Tachikoma."""
return IsComputeIntensiveGraph().is_graph_compute_intensive(body)
def prune_tachikoma_subgraphs(mod):
"""
Removes invalid subgraphs, which does not contain compute intensive tachikoma ops.
"""
class SubgraphRemover(ExprMutator):
"""
Reverts subgraphs in subgraphs_to_remove back to TVM instead of using an external codegen.
"""
def __init__(self, subgraphs_to_remove, mod, new_mod):
ExprMutator.__init__(self)
self.subgraphs_to_remove = subgraphs_to_remove
self.mod = mod
self.new_mod = new_mod
def visit_call(self, call):
if isinstance(call.op, GlobalVar):
name = call.op.name_hint
if name in self.subgraphs_to_remove:
# "Inline" the subgraph back into new main function.
func = self.mod[name]
var_map = {}
for arg, param in zip(call.args, func.params):
var_map[param] = super().visit(arg)
new_body = relay.bind(func.body, var_map)
return new_body
if name != "main":
args = []
for arg in call.args:
args.append(super().visit(arg))
return call.op(*args)
return super().visit_call(call)
subgraphs_to_remove = []
# If only one subgraph, do nothing.
if len(mod.get_global_vars()) <= 2:
return mod
# Remove invalid subgraphs
for subgraph in mod.get_global_vars():
name = subgraph.name_hint
if not mod[name].attrs or mod[name].attrs["Compiler"] != "tachikoma":
continue
if not is_valid_subgraph(mod[name].body):
subgraphs_to_remove.append(name)
# Create new pruned module
new_mod = tvm.IRModule(mod.functions, mod.type_definitions)
new_mod["main"] = SubgraphRemover(subgraphs_to_remove, mod, new_mod).visit(mod["main"])
new_mod = transform.RemoveUnusedFunctions()(new_mod)
return new_mod
class LayerNormRewrite(DFPatternCallback):
"""
A callback to rewrite the following operators into a single layer normalization operator.
Pattern #1:
1 %4 = mean(%3, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
2 %5 = subtract(%3, %4) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %6 = cast(%5, dtype="float32") /* ty=Tensor[(1, 3136, 64), float32] */;
4 %7 = power(%6, 2f /* ty=float32 */) /* ty=Tensor[(1, 3136, 64), float32] */;
5 %8 = mean(%7, axis=[-1], keepdims=True) /* ty=Tensor[(1, 3136, 1), float32] */;
6 %9 = add(%8, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 3136, 1), float32] */;
7 %10 = sqrt(%9) /* ty=Tensor[(1, 3136, 1), float32] */;
8 %11 = divide(%5, %10) /* ty=Tensor[(1, 3136, 64), float32] */;
9 %12 = multiply(%11, meta[relay.Constant][2] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
10 %13 = add(%12, meta[relay.Constant][3] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %0 = mean(%input, axis=[-1], keepdims=True);
2 %1 = variance(%input, %0, axis=[-1], keepdims=True);
3 %2 = add(%1, 1e-05f /* ty=float32 */) /* ty=Tensor[(1, 49, 1), float32] */;
4 %3 = subtract(%input, %0);
5 %4 = sqrt(%2) /* ty=Tensor[(1, 49, 1), float32] */;
6 %5 = divide(%3, %4);
7 %6 = multiply(%5, meta[relay.Constant][0] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */;
8 %7 = add(%6, meta[relay.Constant][1] /* ty=Tensor[(64), float32] */)
/* ty=Tensor[(1, 49, 64), float32] */
"""
def __init__(self):
super(LayerNormRewrite, self).__init__()
self.data = wildcard()
self.gamma = wildcard()
self.beta = wildcard()
mu = is_op("mean")(self.data)
diff = is_op("subtract")(self.data, mu)
cdiff = diff | is_op("cast")(diff)
const_two = is_expr(relay.const(2)) | is_expr(relay.const(2.0))
p1 = is_op("power")(cdiff, const_two)
mp1 = is_op("mean")(p1) | is_op("variance")(self.data, mu)
eps = is_expr(relay.const(1e-5)) | is_expr(relay.const(1e-6))
added_eps = is_op("add")(mp1, eps)
deno = is_op("sqrt")(added_eps)
div_out = is_op("divide")(diff, deno)
div_out2 = diff * is_op("rsqrt")(added_eps)
weighted = is_op("multiply")(div_out | div_out2, self.gamma)
added_bias = is_op("add")(weighted, self.beta)
self.pattern = added_bias
def callback(self, pre, post, node_map):
data = node_map[self.data][0]
gamma = node_map[self.gamma][0]
beta = node_map[self.beta][0]
return relay.op.nn.layer_norm(data=data, gamma=gamma, beta=beta)
def rewrite_layer_norm(mod):
"""Rewrite the input graph to replace multiple operators with a TVM native layer normalization
operator so that we can offload them to tachikoma layer normalization byoc part.
"""
mod["main"] = rewrite(LayerNormRewrite(), mod["main"])
return mod
class DenseReshapeBiasGeluRewrite(DFPatternCallback):
"""
A callback to reorder reshape operators when the patterns are as below:
Pattern #1:
1 %62 = nn.dense(%61, meta[relay.Constant][13] /* ty=Tensor[(64, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 64), float32] */;
2 %63 = reshape(%62, newshape=[1, 3136, 64]) /* ty=Tensor[(1, 3136, 64), float32] */;
3 %64 = add(meta[relay.Constant][4] /* ty=Tensor[(64), float32] */, %63)
/* ty=Tensor[(1, 3136, 64), float32] */;
Pattern #2:
1 %76 = nn.dense(%75, meta[relay.Constant][18] /* ty=Tensor[(512, 64), float32] */,
units=None, out_dtype="float32") /* ty=Tensor[(3136, 512), float32] */;
2 %77 = reshape(%76, newshape=[1, 3136, 512]) /* ty=Tensor[(1, 3136, 512), float32] */;
3 %78 = add(meta[relay.Constant][15] /* ty=Tensor[(512), float32] */, %77)
/* ty=Tensor[(1, 3136, 512), float32] */;
4 %79 = divide(%78, 1.41421f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
5 %80 = erf(%79) /* ty=Tensor[(1, 3136, 512), float32] */;
6 %81 = add(%80, 1f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
7 %82 = multiply(%78, %81) /* ty=Tensor[(1, 3136, 512), float32] */;
8 %83 = multiply(%82, 0.5f /* ty=float32 */) /* ty=Tensor[(1, 3136, 512), float32] */;
"""
def __init__(self, has_gelu=True):
super(DenseReshapeBiasGeluRewrite, self).__init__()
self.data = wildcard()
self.weight = wildcard()
self.bias = wildcard()
self.const1 = wildcard()
self.const2 = wildcard()
self.const3 = wildcard()
self.attr_map = {}
self.has_gelu = has_gelu
den = is_op("nn.dense")(self.data, self.weight)
re_den = is_op("reshape")(den)
added = is_op("add")(self.bias, re_den)
if self.has_gelu:
divisor = is_op("divide")(added, self.const1)
val_erf = is_op("erf")(divisor)
added_erf = is_op("add")(val_erf, self.const2)
mul1 = is_op("multiply")(added, added_erf)
mul2 = is_op("multiply")(mul1, self.const3)
self.pattern = mul2
else:
self.pattern = added
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("reshape"):
new_attrs = {}
for k in expr.attrs.keys():
new_attrs[k] = expr.attrs[k]
self.attr_map["reshape"] = new_attrs
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
weight = node_map[self.weight][0]
bias = node_map[self.bias][0]
den = relay.op.nn.dense(data, weight)
added = relay.op.add(bias, den)
if not self.has_gelu:
return relay.op.reshape(added, self.attr_map["reshape"]["newshape"])
const1 = node_map[self.const1][0]
const2 = node_map[self.const2][0]
const3 = node_map[self.const3][0]
divisor = relay.op.divide(added, const1)
val_erf = relay.op.erf(divisor)
added_erf = relay.op.add(val_erf, const2)
mul1 = relay.op.multiply(added, added_erf)
mul2 = relay.op.multiply(mul1, const3)
return relay.op.reshape(mul2, self.attr_map["reshape"]["newshape"])
def rewrite_dense_bias_gelu_reshape_last(mod):
"""Rewrite the input graph to reorder reshape operators so that
we can perform dense_bias_gelu/dense_bias fusion and then offload
them to byoc part.
"""
mod["main"] = rewrite(
[DenseReshapeBiasGeluRewrite(), DenseReshapeBiasGeluRewrite(has_gelu=False)], mod["main"]
)
return mod
class ResNetV1Rewrite(DFPatternCallback):
"""
A callback to advance downsize operation when the patterns are as pattern1,
and the result is written in pattern2:
Pattern #1:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3));
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = add(%33, %25);
%35 = nn.relu(%34);
%36 = nn.conv2d(%35, ty=Tensor[(128, 256, 1, 1), strides=[2, 2]);
%37 = add(%36, ty=Tensor[(128, 1, 1));
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, ty=Tensor[(128, 128, 3, 3));
%40 = add(%39, ty=Tensor[(128, 1, 1)]);
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, ty=Tensor[(512, 128, 1, 1));
%43 = nn.conv2d(%35, ty=Tensor[(512, 256, 1, 1), strides=[2, 2]);
%44 = add(%42, ty=Tensor[(512, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, %45);
%47 = nn.relu(%46);
Pattern #2:
%26 = nn.conv2d(%25, ty=Tensor[(64, 256, 1, 1));
%27 = add(%26, ty=Tensor[(64, 1, 1));
%28 = nn.relu(%27);
%29 = nn.conv2d(%28, ty=Tensor[(64, 64, 3, 3), strides=[2, 2]);
%30 = add(%29, ty=Tensor[(64, 1, 1));
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, ty=Tensor[(256, 64, 1, 1));
%33 = add(%32, ty=Tensor[(256, 1, 1));
%34 = nn.max_pool2d(%25, pool_size=[1, 1], strides=[2, 2], padding=[0, 0, 0, 0]);
%35 = add(%33, %34);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, ty=Tensor[(128, 256, 1, 1));
%38 = add(%37, ty=Tensor[(128, 1, 1));
%39 = nn.relu(%38);
%40 = nn.conv2d(%39, ty=Tensor[(128, 128, 3, 3));
%41 = add(%40, ty=Tensor[(128, 1, 1));
%42 = nn.relu(%41);
%43 = nn.conv2d(%42, ty=Tensor[(512, 128, 1, 1));
%44 = nn.conv2d(%36, ty=Tensor[(512, 256, 1, 1));
%45 = add(%43, ty=Tensor[(512, 1, 1));
%46 = add(%44, ty=Tensor[(512, 1, 1));
%47 = add(%45, %46);
%48 = nn.relu(%47);
"""
def __init__(self):
super(ResNetV1Rewrite, self).__init__()
self.attr_lst = []
self.data = wildcard()
self.w1, self.b1 = wildcard(), wildcard()
self.w2, self.b2 = wildcard(), wildcard()
self.w3, self.b3 = wildcard(), wildcard()
self.w4, self.b4 = wildcard(), wildcard()
self.w5, self.b5 = wildcard(), wildcard()
self.w6, self.b6 = wildcard(), wildcard()
self.w7, self.b7 = wildcard(), wildcard()
conv1 = is_op("nn.conv2d")(self.data, self.w1).has_attr({"kernel_size": [1, 1]})
conv1 = is_op("add")(conv1, self.b1)
conv1 = is_op("nn.relu")(conv1)
conv2 = is_op("nn.conv2d")(conv1, self.w2).has_attr({"kernel_size": [3, 3]})
conv2 = is_op("add")(conv2, self.b2)
conv2 = is_op("nn.relu")(conv2)
conv3 = is_op("nn.conv2d")(conv2, self.w3).has_attr({"kernel_size": [1, 1]})
conv3 = is_op("add")(conv3, self.b3)
conv3 = is_op("add")(conv3, self.data)
conv3 = is_op("nn.relu")(conv3)
left_conv4 = is_op("nn.conv2d")(conv3, self.w4).has_attr({"strides": [2, 2]})
left_conv4 = is_op("add")(left_conv4, self.b4)
left_conv4 = is_op("nn.relu")(left_conv4)
left_conv5 = is_op("nn.conv2d")(left_conv4, self.w5).has_attr({"kernel_size": [3, 3]})
left_conv5 = is_op("add")(left_conv5, self.b5)
left_conv5 = is_op("nn.relu")(left_conv5)
left_conv6 = is_op("nn.conv2d")(left_conv5, self.w6).has_attr({"kernel_size": [1, 1]})
left_conv6 = is_op("add")(left_conv6, self.b6)
right_conv7 = is_op("nn.conv2d")(conv3, self.w7).has_attr({"strides": [2, 2]})
right_conv7 = is_op("add")(right_conv7, self.b7)
out = is_op("add")(left_conv6, right_conv7)
out = is_op("nn.relu")(out)
self.pattern = out
def get_attr(self, pre):
"""Recursively retrieve attributes from reshape operator."""
def visit_func(expr):
if isinstance(expr, _expr.Call) and expr.op == relay.op.get("nn.conv2d"):
self.attr_lst.append(expr.attrs)
_analysis.post_order_visit(pre, visit_func)
def callback(self, pre, post, node_map):
self.get_attr(pre)
data = node_map[self.data][0]
w1, b1 = node_map[self.w1][0], node_map[self.b1][0]
w2, b2 = node_map[self.w2][0], node_map[self.b2][0]
w3, b3 = node_map[self.w3][0], node_map[self.b3][0]
w4, b4 = node_map[self.w4][0], node_map[self.b4][0]
w5, b5 = node_map[self.w5][0], node_map[self.b5][0]
w6, b6 = node_map[self.w6][0], node_map[self.b6][0]
w7, b7 = node_map[self.w7][0], node_map[self.b7][0]
new_attrs = self.attr_lst[-7]
conv1 = relay.op.nn.conv2d(data, w1, **new_attrs)
conv1 = relay.op.add(conv1, b1)
conv1 = relay.op.nn.relu(conv1)
new_attrs = dict(self.attr_lst[-6])
new_attrs["strides"] = [2, 2]
conv2 = relay.op.nn.conv2d(conv1, w2, **new_attrs)
conv2 = relay.op.add(conv2, b2)
conv2 = relay.op.nn.relu(conv2)
new_attrs = self.attr_lst[-5]
conv3 = relay.op.nn.conv2d(conv2, w3, **new_attrs)
conv3 = relay.op.add(conv3, b3)
max_pool = relay.op.nn.max_pool2d(
data, pool_size=(1, 1), strides=(2, 2), layout=new_attrs["data_layout"]
)
conv3 = relay.op.add(conv3, max_pool)
conv3 = relay.op.nn.relu(conv3)
new_attrs = dict(self.attr_lst[-4])
new_attrs["strides"] = [1, 1]
left_conv4 = relay.op.nn.conv2d(conv3, w4, **new_attrs)
left_conv4 = relay.op.add(left_conv4, b4)
left_conv4 = relay.op.nn.relu(left_conv4)
new_attrs = self.attr_lst[-3]
left_conv5 = relay.op.nn.conv2d(left_conv4, w5, **new_attrs)
left_conv5 = relay.op.add(left_conv5, b5)
left_conv5 = relay.op.nn.relu(left_conv5)
new_attrs = self.attr_lst[-2]
left_conv6 = relay.op.nn.conv2d(left_conv5, w6, **new_attrs)
left_conv6 = relay.op.add(left_conv6, b6)
new_attrs = dict(self.attr_lst[-1])
new_attrs["strides"] = [1, 1]
right_conv7 = relay.op.nn.conv2d(conv3, w7, **new_attrs)
right_conv7 = relay.op.add(right_conv7, b7)
out = relay.op.add(left_conv6, right_conv7)
out = relay.op.nn.relu(out)
self.attr_lst = []
return out
def rewrite_resnetv1(mod):
"""Rewrite the the ResNetV1 downsize block to reduce the computation complexity."""
mod["main"] = rewrite(ResNetV1Rewrite(), mod["main"])
return mod
class LegalizeQnnOpForTachikoma(DFPatternCallback):
"""Legalize QNN based patterns to match Tachikoma
original pattern:
OP = qnn.dense | qnn.conv2d
%1 = OP<int>(SRC, WGH) - OP<int>(src_zp, WGH) // qnn.conv2d
%2 = %1 + orig_bias // bias
%2 = (%1 - rq_in_zp) * rq_in_scl / rq_out_scl + rq_out_zp // qnn.requantize
%3 = act(%2) // activation == clip
%4 = ((%3 - sum_lh_zp) * sum_lh_scl + (SRC2 - sum_rh_zp) * sum_rh_scl) // qnn.add
/ sum_out_scl + sum_out_zp
transform to Tachikoma compatible:
%1 = OP<int>(SRC, WGH)
%2 = cast(%1, dtype="float")
%2 = (%1 + bias) * o_scl
%3 = act(%2) * act_scl
%4 = %3 + SRC2 * sum_scl
%5 = %4 + dst_zp
%6 = cast(%5, dtype="float")
where:
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
bias = orig_bias - OP(src_zp, WGH) - rq_in_zp + rq_out_zp * rq_out_scl / rq_in_scl
dst_zp = sum_out_zp - sum_lhs_zp * sum_lhs_scl / sum_out_scl -
sum_rhs_zp * sum_rhs_scl / sum_out_scl
"""
def __init__(self):
super(LegalizeQnnOpForTachikoma, self).__init__()
self.src = wildcard()
self.wgh = wildcard()
self.bias = wildcard()
self.sum_src = wildcard()
self.src_scl = is_constant()
self.src_zp = is_constant()
self.wgh_scl = is_constant()
self.wgh_zp = is_expr(const(0))
self.rq_in_scl = is_constant()
self.rq_in_zp = is_constant()
self.rq_out_scl = is_constant()
self.rq_out_zp = is_constant()
self.sum_lhs_scl = is_constant()
self.sum_lhs_zp = is_constant()
self.sum_rhs_scl = is_constant()
self.sum_rhs_zp = is_constant()
self.sum_out_scl = is_constant()
self.sum_out_zp = is_constant()
self.root = (is_op("qnn.conv2d") | is_op("qnn.dense"))(
self.src, self.wgh, self.src_zp, self.wgh_zp, self.src_scl, self.wgh_scl
)
pat = is_op("add")(self.root, self.bias) | self.root # optional bias
pat = is_op("qnn.requantize")(
pat, self.rq_in_scl, self.rq_in_zp, self.rq_out_scl, self.rq_out_zp
)
pat = is_op("clip")(pat)
cast = is_op("cast")(pat)
pat = is_op("qnn.add")(
cast,
self.sum_src,
self.sum_lhs_scl,
self.sum_lhs_zp,
self.sum_rhs_scl,
self.sum_rhs_zp,
self.sum_out_scl,
self.sum_out_zp,
)
pat = is_op("clip")(pat)
self.pattern = pat | cast
def callback(self, pre, post, node_map):
root = node_map[self.root][0]
src = node_map[self.src][0]
wgh = node_map[self.wgh][0]
bias = node_map.get(self.bias, default=[relay.const(0, dtype="int32")])[0]
src_zp = node_map[self.src_zp][0]
rq_in_scl = node_map[self.rq_in_scl][0]
rq_in_zp = node_map[self.rq_in_zp][0]
rq_out_scl = node_map[self.rq_out_scl][0]
rq_out_zp = node_map[self.rq_out_zp][0]
final_dtype = node_map[self.pattern][0].checked_type.dtype
if root.op == relay.op.get("qnn.conv2d"):
dst_layout = root.attrs.out_layout
dst_layout = root.attrs.data_layout if dst_layout == "" else dst_layout
wgh_layout = root.attrs.kernel_layout
else:
# qnn.dense has no layout attributes. Assume that is plain
dst_layout = "NC"
wgh_layout = "OI"
# TODO(@liaopeiyuan): dst_layout may be blocked
bias_rank = len(dst_layout) - dst_layout.index("C")
sum_src = node_map[self.sum_src][0] if self.sum_src in node_map else None
# Default values if qnn.sum is not present
sum_lhs_scl = node_map[self.sum_lhs_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_lhs_zp = node_map[self.sum_lhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_rhs_scl = node_map[self.sum_rhs_scl][0] if sum_src else relay.const(0, dtype="float32")
sum_rhs_zp = node_map[self.sum_rhs_zp][0] if sum_src else relay.const(0, dtype="int32")
sum_out_scl = node_map[self.sum_out_scl][0] if sum_src else relay.const(1, dtype="float32")
sum_out_zp = node_map[self.sum_out_zp][0] if sum_src else relay.const(0, dtype="int32")
def cast_fp(op):
return relay.op.cast(op, dtype="float32")
def cast_to_constant(fn):
res = relay.create_executor(
kind="vm", mod=tvm.IRModule.from_expr(fn)
).evaluate()()
return relay.Constant(res)
# recalculate some factors
o_scl = rq_in_scl / rq_out_scl
act_scl = sum_lhs_scl / sum_out_scl
sum_scl = sum_rhs_scl / sum_out_scl
dst_zp = (
cast_fp(sum_out_zp)
- cast_fp(sum_lhs_zp) * sum_lhs_scl / sum_out_scl
- cast_fp(sum_rhs_zp) * sum_rhs_scl / sum_out_scl
)
bias = self.squeeze_bias(bias, dst_layout)
bias = (
cast_fp(bias)
- cast_fp(self.fake_op(src_zp, wgh, wgh_layout))
- cast_fp(rq_in_zp)
+ cast_fp(rq_out_zp) * rq_out_scl / rq_in_scl
)
bias = self.broadcast_to_rank(bias, bias_rank)
o_scl = self.broadcast_to_rank(o_scl, bias_rank)
act_scl = self.broadcast_to_rank(act_scl, bias_rank)
sum_scl = self.broadcast_to_rank(sum_scl, bias_rank)
zero_zp = relay.const(0, dtype="int32")
one_scl = relay.const(1.0, dtype="float32")
# construct new graph with proper post op ordering
gr = tvm.relay.Call(
root.op,
[src, wgh, zero_zp, zero_zp, one_scl, one_scl],
root.attrs,
root.type_args,
root.span,
)
gr = relay.op.cast(gr, dtype="float32")
gr = gr + bias
gr = gr * o_scl
gr = relay.op.clip(gr, 0, 255) * act_scl
gr = gr + sum_scl * cast_fp(sum_src) if sum_src else gr
gr = gr + dst_zp
gr = relay.op.cast(gr, dtype=final_dtype)
return gr
@staticmethod
def fake_op(zp, wgh, layout):
"""Fake operator implementation for zp broadcast input"""
# Conv: reduce kernel {OC, IC, KH, KW} -> {OC} in case of group that is still correct
# Dense: reduce kernel {OC, IC} -> {OC}
wgh_int = relay.op.cast(wgh, dtype="int32")
reduced_kernel = relay.op.sum(
wgh_int, axis=[layout.index("O")], keepdims=False, exclude=True
)
return zp * reduced_kernel
@staticmethod
def squeeze_bias(bias, layout):
shape = transform.InferTypeLocal(bias).concrete_shape
c_position = layout.index("C") - len(layout) + len(shape)
squeeze_idxs = [i for i in range(len(shape)) if i != c_position]
return relay.op.squeeze(bias, squeeze_idxs)
@staticmethod
def broadcast_to_rank(op, rank):
"""Scalar or 1D tensor are supported"""
shape = transform.InferTypeLocal(op).concrete_shape
if len(shape) == 0:
return op
if len(shape) == 1:
return relay.op.expand_dims(op, 1, rank - 1)
raise ValueError("Unexpected bias rank to broadcast. Only 0 and 1 are supported.")
def legalize_qnn_for_tachikoma(mod):
"""Transform qnn primitives to Tachikoma compatible form. Eliminate source zero point and apply
strict sequence of post ops."""
mod["main"] = rewrite(LegalizeQnnOpForTachikoma(), mod["main"])
seq = tvm.transform.Sequential(
[
transform.InferType(),
# transform.SimplifyInference(), # TODO: this pass decompose nn.layer_norm
# transform.FoldScaleAxis(), # TODO: fail inside TVM in case of grouped convolutions.
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod | https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/te_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Support a Relay partitioning target using Tensor Expressions."""
from typing import Callable, List, Dict
import tvm
import tvm.ir
from tvm import relay
from tvm import te
_LowerFunc = Callable[[relay.Call, List[te.Tensor]], te.Tensor]
_LOWER_MAP: Dict[str, _LowerFunc] = {}
def lower_composite(comp_name: str) -> Callable[[_LowerFunc], _LowerFunc]:
"""Register a lowering function for a given composite function name."""
def _register(f: _LowerFunc) -> _LowerFunc:
_LOWER_MAP[comp_name] = f
return f
return _register
def relay_to_runtime(target: tvm.target.Target) -> Callable[[relay.Function], tvm.runtime.Module]:
"""Create a Relay to runtime module lowering function using Tensor Expressions for lowering."""
def _relay_to_runtime(partition: relay.Function) -> tvm.runtime.Module:
"""Compile Relay functions to a runtime module using Tensor Expressions."""
assert isinstance(partition, relay.Function)
assert isinstance(partition.body, relay.Call)
assert isinstance(partition.body.op, relay.Function)
global_name = str(partition.attrs.global_symbol)
comp_func = partition.body.op
comp_name = comp_func.attrs["Composite"]
assert comp_name in _LOWER_MAP
assert isinstance(comp_func.body, relay.Call)
op = comp_func.body
inputs = []
for i, param in enumerate(comp_func.params):
inputs.append(
te.placeholder(
param.checked_type.shape,
name=f"input_{i}",
dtype=param.checked_type.dtype,
)
)
output = _LOWER_MAP[comp_name](op, inputs)
prim_func = te.create_prim_func(inputs + [output])
return tvm.build(prim_func, target=target, name=global_name)
return _relay_to_runtime
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/tensorrt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, logging-format-interpolation
"""TensorRT supported operators."""
import logging
from typing import Tuple, List, Dict, Union, Optional, Any, Callable
import numpy as np # type: ignore
import tvm
from tvm import relay
from tvm.ir import Op
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.dataflow_pattern import is_op, wildcard, is_constant, is_tuple, is_tuple_get_item
from tvm.relay.expr import Call, Constant, TupleGetItem
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
from tvm.relay.op.contrib.register import register_pattern_table
logger = logging.getLogger("TensorRT")
def is_tensorrt_compiler_enabled() -> bool:
return tvm.get_global_func("relay.ext.tensorrt.is_runtime_enabled", True) is not None
def is_tensorrt_runtime_enabled() -> bool:
"""Check if the TensorRT graph executor is present.
Returns
-------
ret: bool
True if present, False if not.
"""
check_enabled = tvm.get_global_func("relay.ext.tensorrt.is_runtime_enabled", True)
if check_enabled:
return check_enabled()
return False
def get_tensorrt_target() -> tvm.target.Target:
"""Returns the current Target, which must be of kind "tensorrt"."""
target = tvm.target.Target.current()
if target is None or target.kind.name != "tensorrt":
# Create the default target.
return tvm.target.Target("tensorrt")
return target
def get_tensorrt_version() -> Tuple[int, int, int]:
"""Returns the version of TensorRT to assume during compilation.
In order of preference this is taken from:
- The current "tensorrt" target's "tensorrt_version" attribute string.
- The version linked to the TVM runtime.
- (6, 0, 1)
Returns
-------
ret: Tuple[int, int, int]
TensorRT version as a tuple of (major, minor, patch).
"""
# cf logic in tensorrt/codegen.cc::SaveGlobalAttributes
# First check for version in target.
target = get_tensorrt_target()
version = target.attrs["tensorrt_version"]
if len(version) == 3:
return int(version[0]), int(version[1]), int(version[2])
assert len(version) == 0
# Next, ask runtime for its version.
if is_tensorrt_runtime_enabled():
get_version = tvm.get_global_func("relay.ext.tensorrt.get_version")
version = get_version()
assert len(version) == 3
return int(version[0]), int(version[1]), int(version[2])
# Finally, use default.
logger.warning(
"TVM was not built against TensorRT and no version was provided in the 'tensorrt' target."
"Defaulting to 6.0.1."
)
return (6, 0, 1)
def get_tensorrt_use_implicit_batch_mode() -> bool:
"""Returns the "use_implicit_batch" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["use_implicit_batch"]
def get_tensorrt_remove_no_mac_subgraphs() -> bool:
"""Returns the "remove_no_mac_subgraphs" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["remove_no_mac_subgraphs"]
def get_tensorrt_use_fp16() -> bool:
"""Returns the "use_fp16" attribute of the current "tensorrt" target."""
target = get_tensorrt_target()
return target.attrs["use_fp16"]
def partition_for_tensorrt(
mod: tvm.IRModule,
params: Optional[Dict[str, tvm.nd.NDArray]] = None,
# CAUTION: Can't use default Target("tensorrt") here since the target kind is only available
# if is_tensorrt_compiler_enabled() == True.
target: Optional[tvm.target.Target] = None,
) -> tvm.IRModule:
"""Partition all functions in mod to greedily offload supported operators to TensorRT.
Parameters
----------
mod : tvm.IRModule
The module to partition.
target : tvm.target.Target
A target of kind "tensorrt" describing additional partitioning and compilation options.
params : Optional[Dict[str, tvm.nd.NDArray]]
Constant input parameters.
Returns
-------
partitioned_mod : tvm.IRModule
The partitioned module.
"""
assert is_tensorrt_compiler_enabled(), "Can only partition for TensorRT if it is enabled"
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
if target is None:
# Use a default target. The get_tensorrt_target() function will similarly create an
# equivalent default target when compilation continues after partitioning.
target = tvm.target.Target("tensorrt")
seq = tvm.transform.Sequential(
[
transform.InferType(),
RemoveDropoutPass(),
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(
{
"nn.conv1d": ["NCW", "default"],
"nn.conv2d": ["NCHW", "default"],
"nn.conv3d": ["NCDHW", "default"],
"nn.conv2d_transpose": ["NCHW", "default"],
}
),
transform.FoldConstant(),
transform.MergeComposite(pattern_table()),
transform.AnnotateTarget("tensorrt"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
transform.InferType(),
]
)
with target:
mod = seq(mod)
mod = prune_tensorrt_subgraphs(mod)
return mod
def is_supported_trt_type(typ: Union[tvm.ir.TensorType, tvm.ir.TupleType], op_name: str) -> bool:
"""Check whether a type is supported by TensorRT."""
supported_dtypes = ["float32"]
if get_tensorrt_use_fp16():
supported_dtypes.append("float16")
if isinstance(typ, tvm.ir.TensorType):
if typ.dtype not in supported_dtypes:
logger.info(f"{op_name}: Only {supported_dtypes} tensor dtypes are supported.")
return False
dims = typ.shape
if get_tensorrt_use_implicit_batch_mode():
# The first dimension can be Any.
dims = dims[1:]
for dim in dims:
if isinstance(dim, tvm.tir.expr.Any):
logger.info(f"{op_name}: Only statically known tensor shapes are supported.")
return False
elif isinstance(typ, tvm.ir.TupleType):
for field_type in typ.fields:
if not is_supported_trt_type(field_type, op_name):
return False
else:
logger.info(f"{op_name}: Type {typ} is not supported.")
return False
return True
def get_op_name(expr: relay.expr.Expr) -> str:
"""Get the operator name from an expression."""
if isinstance(expr, Op):
return expr.name
if isinstance(expr, Call):
return get_op_name(expr.op)
if isinstance(expr, TupleGetItem):
return get_op_name(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return get_op_name(expr.fields[0])
return ""
def get_args(expr: relay.expr.Expr) -> List[relay.expr.Expr]:
"""Get the arguments from an expression."""
if isinstance(expr, Call):
return expr.args
if isinstance(expr, TupleGetItem):
return get_args(expr.tuple_value)
if isinstance(expr, relay.Tuple):
return [arg for args in map(get_args, expr.fields) for arg in args]
return []
def get_attrs(expr: relay.expr.Expr) -> Any:
"""Get the attributes from an expression."""
if isinstance(expr, Call):
return expr.attrs
if isinstance(expr, TupleGetItem):
return get_attrs(expr.tuple_value)
return {}
CheckFunc = Callable[[Any, List[relay.expr.Expr], str], bool]
def make_predicate(checker: CheckFunc) -> Callable[[relay.expr.Expr], bool]:
"""Returns the pattern predicate which performs the standard checks, then invokes the
more primitive checker."""
def predicate(expr: relay.expr.Expr) -> bool:
op_name = get_op_name(expr)
attrs = get_attrs(expr)
args = get_args(expr)
if not all([is_supported_trt_type(arg.checked_type, op_name) for arg in args]):
return False
if not checker(attrs, args, op_name):
return False
logger.info(f"{op_name}: Predicate passes")
return True
return predicate
standard_predicate = make_predicate(lambda attrs, args, op_name: True)
def make_trt_version_checker(version: Tuple[int, int, int]) -> CheckFunc:
"""Helper for ops which require a minimum TRT version"""
def checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
if get_tensorrt_version() < version:
logger.info(
f"{op_name}: requires TensorRT version {'.'.join(map(str, version))} or higher."
)
return False
return True
return checker
def make_and_checker(*checkers: CheckFunc) -> CheckFunc:
def checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
return all([c(attrs, args, op_name) for c in checkers])
return checker
def multiply_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Helper for multiply operations."""
shapes = [
[int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]
for arg in args
]
# TODO(mbs): Follow up
# Batched multiply operations don't work in implicit batch mode. The following shapes
# have been excluded because they occur in PT MaskRCNN model. The long term solution is
# to switch to explicit batch mode after performance regressions are solved.
if all([list(map(int, shape)) in [[300, 64, 7, 7], [300, 1, 1, 1]] for shape in shapes]):
logger.info(f"{op_name}: Excluding since problematic in implicit batch mode")
return False
return True
def reduce_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Helper for reduce operations."""
if get_tensorrt_use_implicit_batch_mode() and (not attrs.axis or len(attrs.axis) == 0):
logger.info(f"{op_name}: cannot reduce to scalar.")
return False
if attrs.exclude:
logger.info(f"{op_name}: exclude not supported.")
return False
if get_tensorrt_use_implicit_batch_mode() and any([x == 0 for x in map(int, attrs.axis)]):
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def add_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if add is supported by TensorRT."""
shapes = [
[int(x) if not isinstance(x, tvm.tir.expr.Any) else -1 for x in arg.checked_type.shape]
for arg in args
]
# Scalars require explicit batch mode.
if get_tensorrt_use_implicit_batch_mode() and any([len(shape) < 1 for shape in shapes]):
logger.info(f"{op_name}: Scalars not supported in implicit batch mode")
return False
if (
not get_tensorrt_use_implicit_batch_mode()
and (isinstance(args[0], Constant) or isinstance(args[1], Constant))
and len(shapes[0]) > 0
and len(shapes[1]) > 0
and shapes[0][0] == shapes[1][0]
and shapes[0][0] != 1
and (len(shapes[0]) > 3 or len(shapes[1]) > 3)
):
logger.info(f"{op_name}: bug in TRT with adding batched constants.")
return False
return True
def batch_norm_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.batch_norm is supported by TensorRT."""
if len(args[0].checked_type.shape) == 5 and get_tensorrt_version() < (6, 0, 1):
logger.info(f"{op_name}: TensorRT 6.0.1 or higher is required for rank 5 inputs.")
return False
if len(args[0].checked_type.shape) > 5:
logger.info(f"{op_name}: Input rank must be 5 or less.")
return False
if int(attrs.axis) not in (1, 3):
logger.info(f"{op_name}: axis is {int(attrs.axis)} but must be 1 or 3.")
return False
return True
def softmax_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.softmax is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def conv1d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv1d is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCW.")
return False
if attrs.kernel_layout != "OIW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIW.")
return False
return True
def conv2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv2d is supported by TensorRT."""
assert len(args) == 2
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCHW.")
return False
if attrs.kernel_layout != "OIHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCHW.")
return False
return True
def dense_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: weight must be constant")
return False
input_rank = len(args[0].checked_type.shape)
weight_rank = len(args[1].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info(f"{op_name}: input has rank {input_rank} but must be 2, 3 or 4.")
return False
if weight_rank != 2:
logger.info(f"{op_name}: weight has rank {weight_rank} but must be 2.")
return False
return True
def batch_matmul_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if dense is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and len(args[0].checked_type.shape) != len(
args[1].checked_type.shape
):
logger.info(f"{op_name}: requires use_implict_batch=False.")
return False
return True
def layer_norm_checker(attrs: Any, args: List[relay.expr.Expr], op_name: str) -> bool:
"""Check if dense is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: requires use_implict_batch=False.")
return False
return True
def bias_add_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.bias_add is supported by TensorRT."""
input_rank = len(args[0].checked_type.shape)
if input_rank not in (2, 3, 4):
logger.info(f"{op_name}: input rank is {input_rank} but must be 2, 3 or 4.")
return False
return True
def max_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.max_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info(f"{op_name}: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
def avg_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.avg_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
if (
attrs.count_include_pad
and len(attrs.padding) == 4
and (
int(attrs.padding[0]) != int(attrs.padding[2])
or int(attrs.padding[1]) != int(attrs.padding[3])
)
):
logger.info(
f"{op_name}: inclusive-counted blended or average "
"pooling is not supported in combination with asymmetric padding"
)
return False
if attrs.ceil_mode and get_tensorrt_version() < (5, 1, 5):
logger.info(f"{op_name}: ceil_mode=True requires TensorRT 5.1.5 or greater.")
return False
return True
def global_max_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.global_max_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
return True
def global_avg_pool_2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.global_avg_pool2d is supported by TensorRT."""
if attrs.layout != "NCHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCHW.")
return False
return True
def expand_dims_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if expand_dims is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def squeeze_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if squeeze is supported by TensorRT."""
if not attrs.axis:
logger.info(f"{op_name}: must explicitly set axis.")
return False
if get_tensorrt_use_implicit_batch_mode() and any([axis == 0 for axis in map(int, attrs.axis)]):
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def concatenate_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if concatenate is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode():
if int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
if not isinstance(args[0], relay.Tuple):
logger.info("f{op_name}: concatenate must be applied to a literal tuple")
return False
for tuple_input in args[0].fields:
if isinstance(tuple_input, Constant):
logger.info(f"{op_name}: can't concatenate tensors with constants.")
return False
return True
def split_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if split is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axis) == 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def conv2d_transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv2d_transpose is supported by TensorRT."""
if attrs.data_layout != "NCHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCHW.")
return False
if attrs.kernel_layout != "OIHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCHW.")
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info(f"{op_name}: dilation rate must be 1.")
return False
return True
def transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if transpose is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode() and int(attrs.axes[0]) != 0:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def layout_transform_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if layout_transform is supported by TensorRT."""
if (attrs.src_layout, attrs.dst_layout) not in [
("NCHW", "NHWC"),
("NHWC", "NCHW"),
("NDHWC", "NCDHW"),
("NCDHW", "NDHWC"),
]:
logger.info(f"{op_name}: {attrs.src_layout} to {attrs.dst_layout} is not supported.")
return False
return True
def reshape_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if reshape is supported by TensorRT."""
if any([x < -1 for x in map(int, attrs.newshape)]):
logger.info(f"{op_name}: new shape dims must be explicit.")
return False
if get_tensorrt_use_implicit_batch_mode():
shape = args[0].checked_type.shape
new_shape = attrs.newshape
if len(new_shape) == 0 or len(shape) == 0:
logger.info(f"{op_name}: Can't reshape to or from scalar.")
return False
dynamic_reshape = any([isinstance(x, tvm.tir.expr.Any) for x in shape])
if dynamic_reshape:
# Make sure that the batch dim is unmodified.
if int(new_shape[0]) < 0:
for shape_val, new_shape_val in zip(shape[1:], new_shape[1:]):
if not (
isinstance(shape_val, (int, tvm.tir.expr.IntImm))
and isinstance(new_shape_val, (int, tvm.tir.expr.IntImm))
and int(shape_val) == int(new_shape_val)
):
logger.info(f"{op_name}: can't modify batch dimension")
return False
elif int(new_shape[0]) > 0:
# Currently we only allow dim[0] to be Any, so this branch will always be False
if not (
isinstance(shape[0], (int, tvm.tir.expr.IntImm))
and isinstance(new_shape[0], (int, tvm.tir.expr.IntImm))
and int(shape[0]) == int(new_shape[0])
):
logger.info(f"{op_name}: can't modify batch dimension")
return False
else:
shape = list(map(int, shape))
new_shape = list(map(int, new_shape))
# TRT cannot modify batch dimension.
original_volume = np.prod(shape)
# First, resolve 0.
for i, value in enumerate(new_shape):
if value == 0:
new_shape[i] = shape[i]
# Resolve -1.
for i, value in enumerate(new_shape):
if value == -1:
new_shape[i] = original_volume // np.prod([x for x in new_shape if x != -1])
# Remove batch dimension and see if volumes match
if shape[0] != new_shape[0]:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
return True
def pad_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.pad is supported by TensorRT."""
pad_value = args[1]
if not isinstance(pad_value, relay.Constant):
logger.info(f"{op_name}: pad argument must be constant")
return False
pad_value = pad_value.data.numpy().item()
if attrs.pad_mode != "constant":
logger.info(f"{op_name}: pad mode is {attrs.pad_mode} but must be constant.")
return False
if pad_value > 0.0:
logger.info(f"{op_name}: pad value is {pad_value} but must be 0.0.")
return False
if len(attrs.pad_width) not in [4, 5]:
logger.info(f"{op_name}: can only pad 4D or 5D inputs")
return False
if any([x != 0 for x in attrs.pad_width[0]]) or any([x != 0 for x in attrs.pad_width[1]]):
logger.info(f"{op_name}: can't pad batch or channel dimensions.")
return False
if len(attrs.pad_width) == 5 and any([x != 0 for x in attrs.pad_width[2]]):
logger.info(f"{op_name}: can only pad last two dimensions for 5D inputs.")
return False
return True
def strided_slice_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if strided_slice is supported by TensorRT."""
if get_tensorrt_use_implicit_batch_mode():
batch_dim_begin_modified = attrs.begin[0] is not None and int(attrs.begin[0]) != 0
batch_dim_end_modified = (
attrs.end[0] is not None
and int(attrs.end[0]) != -1
and int(attrs.end[0]) != int(args[0].checked_type.shape[0])
)
if batch_dim_begin_modified or batch_dim_end_modified:
logger.info(f"{op_name}: can't modify batch dimension.")
return False
if any([x is not None and x <= 0 for x in attrs.strides]):
logger.info(f"{op_name}: stride must be positive")
return False
for i in range(0, len(args[0].checked_type.shape)):
begin = int(attrs.begin[i])
if attrs.slice_mode == "end":
end = (
int(attrs.end[i])
if attrs.end[i] is not None and int(attrs.end[i]) != -1
else args[0].checked_type.shape[i]
)
size = int(end) - int(begin)
elif attrs.slice_mode == "size":
size = (
int(attrs.end[i])
if attrs.end[i] is not None and int(attrs.end[i]) != -1
else args[0].checked_type.shape[i] - begin
)
else:
logger.warning(f"{op_name}: unknown slice mode encountered")
size = 1
if int(size) < 1:
logger.info(f"{op_name}: size of slice must be at least 1")
return False
return True
def adaptive_max_pool2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.adaptive_max_pool2d is supported by TensorRT."""
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info(f"{op_name}: output size must be (1, 1).")
return False
return True
def adaptive_avg_pool2d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.adaptive_avg_pool2d is supported by TensorRT."""
if len(attrs.output_size) == 0 or any([size != 1 for size in map(int, attrs.output_size)]):
logger.info(f"{op_name}: output size must be (1, 1).")
return False
return True
def conv3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv3d is supported by TensorRT."""
if not isinstance(args[1], Constant):
logger.info(f"{op_name}: kernel argument must be constant.")
return False
if attrs.data_layout != "NCDHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCDHW.")
return False
if attrs.kernel_layout != "OIDHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIDHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCDHW.")
return False
return True
def max_pool_3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.max_pool3d is supported by TensorRT."""
if attrs.layout != "NCDHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCDHW.")
return False
return True
def avg_pool_3d_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.avg_pool3d is supported by TensorRT."""
if attrs.layout != "NCDHW":
logger.info(f"{op_name}: layout is {attrs.layout} but must be NCDHW.")
return False
return True
def conv3d_transpose_checker(
attrs: Any, args: List[relay.expr.Expr], op_name: str
) -> bool: # pylint: disable=unused-variable
"""Check if nn.conv3d_transpose is supported by TensorRT."""
if attrs.data_layout != "NCDHW":
logger.info(f"{op_name}: data_layout is {attrs.data_layout} but must be NCDHW.")
return False
if attrs.kernel_layout != "OIDHW":
logger.info(f"{op_name}: kernel_layout is {attrs.kernel_layout} but must be OIDHW.")
return False
if attrs.out_layout and attrs.out_layout != "NCDHW":
logger.info(f"{op_name}: out_layout is {attrs.out_layout} but must be NCDHW.")
return False
if attrs.dilation and any([rate != 1 for rate in map(int, attrs.dilation)]):
logger.info(f"{op_name}: dilation rate must be 1.")
return False
if attrs.output_padding and any([x != 0 for x in map(int, attrs.output_padding)]):
logger.info(f"{op_name}: output padding is not supported.")
return False
return True
def unary_op_pattern(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches unary operation"""
return is_op(op)(wildcard())
def unary_op_pattern_with_any_tuple(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches unary operation with literal tuple argument"""
return is_op(op)(is_tuple(None))
def binary_op_pattern(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches binary operation"""
return is_op(op)(wildcard(), wildcard())
def binary_op_pattern_with_const(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
"""Matches binary operation with rhs arg a constant"""
return is_op(op)(wildcard(), is_constant())
def proj_five_op_pattern_with_const(op: relay.expr.Expr) -> relay.dataflow_pattern.DFPattern:
return is_tuple_get_item(
is_op(op)(wildcard(), is_constant(), is_constant(), is_constant(), is_constant()), 0
)
@register_pattern_table("tensorrt")
def pattern_table() -> List[
Tuple[str, relay.dataflow_pattern.DFPattern, Callable[[relay.expr.Call], bool]]
]:
"""Get the Tensorrt compiler pattern table for supported ops."""
return [
(
"tensorrt.nn.conv3d",
binary_op_pattern_with_const("nn.conv3d"),
make_predicate(make_and_checker(make_trt_version_checker((6, 0, 1)), conv3d_checker)),
),
(
"tensorrt.nn.conv2d",
binary_op_pattern_with_const("nn.conv2d"),
make_predicate(conv2d_checker),
),
(
"tensorrt.nn.conv1d",
binary_op_pattern_with_const("nn.conv1d"),
make_predicate(conv1d_checker),
),
(
"tensorrt.nn.conv2d_transpose",
binary_op_pattern("nn.conv2d_transpose"),
make_predicate(conv2d_transpose_checker),
),
("tensorrt.squeeze", binary_op_pattern("squeeze"), make_predicate(squeeze_checker)),
("tensorrt.add", binary_op_pattern("add"), make_predicate(add_checker)),
(
"tensorrt.nn.dense",
binary_op_pattern_with_const("nn.dense"),
make_predicate(dense_checker),
),
(
"tensorrt.nn.bias_add",
binary_op_pattern("nn.bias_add"),
make_predicate(bias_add_checker),
),
(
"tensorrt.nn.batch_matmul",
binary_op_pattern("nn.batch_matmul"),
make_predicate(batch_matmul_checker),
),
("tensorrt.divide", binary_op_pattern("divide"), standard_predicate),
("tensorrt.multiply", binary_op_pattern("multiply"), make_predicate(multiply_checker)),
("tensorrt.subtract", binary_op_pattern("subtract"), standard_predicate),
("tensorrt.power", binary_op_pattern("power"), standard_predicate),
("tensorrt.maximum", binary_op_pattern("maximum"), standard_predicate),
("tensorrt.minimum", binary_op_pattern("minimum"), standard_predicate),
("tensorrt.nn.relu", unary_op_pattern("nn.relu"), standard_predicate),
(
"tensorrt.nn.leaky_relu",
unary_op_pattern("nn.leaky_relu"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
("tensorrt.nn.pad", unary_op_pattern("nn.pad"), standard_predicate),
("tensorrt.sigmoid", unary_op_pattern("sigmoid"), standard_predicate),
("tensorrt.tanh", unary_op_pattern("tanh"), standard_predicate),
("tensorrt.exp", unary_op_pattern("exp"), standard_predicate),
("tensorrt.log", unary_op_pattern("log"), standard_predicate),
("tensorrt.sqrt", unary_op_pattern("sqrt"), standard_predicate),
("tensorrt.abs", unary_op_pattern("abs"), standard_predicate),
("tensorrt.negative", unary_op_pattern("negative"), standard_predicate),
("tensorrt.nn.batch_flatten", unary_op_pattern("nn.batch_flatten"), standard_predicate),
("tensorrt.clip", unary_op_pattern("clip"), standard_predicate),
(
"tensorrt.sin",
unary_op_pattern("sin"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.cos",
unary_op_pattern("cos"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.atan",
unary_op_pattern("atan"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
(
"tensorrt.ceil",
unary_op_pattern("ceil"),
make_predicate(make_trt_version_checker((5, 1, 5))),
),
("tensorrt.floor", unary_op_pattern("floor"), standard_predicate),
(
"tensorrt.erf",
unary_op_pattern("erf"),
make_predicate(make_trt_version_checker((7, 0, 0))),
),
("tensorrt.sum", unary_op_pattern("sum"), make_predicate(reduce_checker)),
("tensorrt.prod", unary_op_pattern("prod"), make_predicate(reduce_checker)),
("tensorrt.max", unary_op_pattern("max"), make_predicate(reduce_checker)),
("tensorrt.min", unary_op_pattern("min"), make_predicate(reduce_checker)),
("tensorrt.max", unary_op_pattern("max"), make_predicate(reduce_checker)),
("tensorrt.mean", unary_op_pattern("mean"), make_predicate(reduce_checker)),
(
"tensorrt.concatenate",
unary_op_pattern_with_any_tuple("concatenate"),
make_predicate(concatenate_checker),
),
(
"tensorrt.expand_dims",
unary_op_pattern("expand_dims"),
make_predicate(expand_dims_checker),
),
(
"tensorrt.layout_transform",
unary_op_pattern("layout_transform"),
make_predicate(layout_transform_checker),
),
("tensorrt.transpose", unary_op_pattern("transpose"), make_predicate(transpose_checker)),
("tensorrt.reshape", unary_op_pattern("reshape"), make_predicate(reshape_checker)),
("tensorrt.split", unary_op_pattern("split"), make_predicate(split_checker)),
("tensorrt.nn.pad", unary_op_pattern("nn.pad"), make_predicate(pad_checker)),
(
"tensorrt.strided_slice",
unary_op_pattern("strided_slice"),
make_predicate(
make_and_checker(make_trt_version_checker((5, 1, 5)), strided_slice_checker)
),
),
(
"tensorrt.nn.adaptive_avg_pool2d",
unary_op_pattern("nn.adaptive_avg_pool2d"),
make_predicate(adaptive_avg_pool2d_checker),
),
(
"tensorrt.nn.adaptive_max_pool2d",
unary_op_pattern("nn.adaptive_max_pool2d"),
make_predicate(adaptive_max_pool2d_checker),
),
(
"tensorrt.nn.max_pool3d",
unary_op_pattern("nn.max_pool3d"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), max_pool_3d_checker)
),
),
(
"tensorrt.nn.avg_pool3d",
unary_op_pattern("nn.avg_pool3d"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), avg_pool_3d_checker)
),
),
(
"tensorrt.nn.conv3d_transpose",
unary_op_pattern("nn.conv3d_transpose"),
make_predicate(
make_and_checker(make_trt_version_checker((6, 0, 1)), conv3d_transpose_checker)
),
),
("tensorrt.nn.softmax", unary_op_pattern("nn.softmax"), make_predicate(softmax_checker)),
(
"tensorrt.nn.layer_norm",
unary_op_pattern("nn.layer_norm"),
make_predicate(layer_norm_checker),
),
(
"tensorrt.nn.max_pool2d",
unary_op_pattern("nn.max_pool2d"),
make_predicate(max_pool_2d_checker),
),
(
"tensorrt.nn.avg_pool2d",
unary_op_pattern("nn.avg_pool2d"),
make_predicate(avg_pool_2d_checker),
),
(
"tensorrt.nn.global_max_pool2d",
unary_op_pattern("nn.global_max_pool2d"),
make_predicate(global_max_pool_2d_checker),
),
(
"tensorrt.nn.global_avg_pool2d",
unary_op_pattern("nn.global_avg_pool2d"),
make_predicate(global_avg_pool_2d_checker),
),
(
"tensorrt.nn.batch_norm",
proj_five_op_pattern_with_const("nn.batch_norm"),
make_predicate(batch_norm_checker),
),
]
class IsComputeIntensiveGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains compute heavy ops like convolutions and
its transpose, dense and batch mat-mul.
"""
def __init__(self) -> None:
ExprVisitor.__init__(self)
self.is_compute_intensive = False
def visit_call(self, call: relay.expr.Call) -> None:
compute_intensive_ops = {
"nn.conv1d",
"nn.conv2d",
"nn.conv2d_transpose",
"nn.conv3d",
"nn.conv3d_transpose",
"nn.dense",
"nn.batch_matmul",
"sum",
"prod",
"max",
"min",
"mean",
}
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op) in compute_intensive_ops:
self.is_compute_intensive = True
return super().visit_call(call)
def is_graph_compute_intensive(self, subgraph: relay.expr.Expr) -> bool:
"""
This function recursively visits the graph and checks if it's compute intensive"
"""
self.visit(subgraph)
return self.is_compute_intensive
def is_valid_subgraph(params: List[relay.expr.Var], body: relay.expr.Expr) -> bool:
"""Final check on whether the subgraph is valid and should be offloaded to TensorRT."""
# Remove invalid subgraphs for implicit batch mode.
if get_tensorrt_use_implicit_batch_mode():
input_batch_sizes = []
for var in params:
# In implicit batch mode, all inputs must have same batch size
# TODO: (codeislife99) : Fix different dynamic batch size inputs
if isinstance(var.checked_type, relay.TupleType):
for tupe_type in var.checked_type.fields:
# Scalar inputs not allowed
if len(tupe_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
if not isinstance(tupe_type.shape[0], tvm.tir.expr.Any):
input_batch_sizes.append(int(tupe_type.shape[0]))
else:
# Scalar inputs not allowed
if len(var.checked_type.shape) == 0:
logger.info("tensorrt: scalar inputs not supported")
return False
if not isinstance(var.checked_type.shape[0], tvm.tir.expr.Any):
input_batch_sizes.append(int(var.checked_type.shape[0]))
if len(input_batch_sizes) > 1 and len(set(input_batch_sizes)) != 1:
logger.info("tensorrt: inputs have different batch sizes: %s", input_batch_sizes)
return False
if get_tensorrt_remove_no_mac_subgraphs():
if not IsComputeIntensiveGraph().is_graph_compute_intensive(body):
logger.info("tensorrt: not a compute-intensize sub-graph")
return False
return True
def prune_tensorrt_subgraphs(mod: tvm.IRModule) -> tvm.IRModule:
"""
Un-partition those partitions which:
- have no multiply-accumulates (if remove_no_mac_subgraphs is True)
- can't actually be supported by TensorRT now that we see the whole partition."""
global_vars_to_inline = [
gv
for gv in mod.get_global_vars()
if mod[gv].attrs
and mod[gv].attrs["Compiler"] == "tensorrt"
and not is_valid_subgraph(mod[gv].params, mod[gv].body)
]
return relay.transform.InlineCompilerFunctionsBoundTo(global_vars_to_inline)(mod)
class RemoveDropout(ExprMutator):
"""
Removes all nn.dropout from an expr.
"""
def visit_tuple_getitem(self, op: TupleGetItem) -> relay.expr.Expr:
visit = super().visit_tuple_getitem(op)
if visit.index != 0:
return visit
if (
isinstance(visit.tuple_value, Call)
and isinstance(visit.tuple_value.op, Op)
and visit.tuple_value.op.name == "nn.dropout"
and visit.index == 0
):
return visit.tuple_value.args[0]
return visit
@transform.function_pass(opt_level=0)
class RemoveDropoutPass:
def transform_function(
self, func: relay.function.Function, mod: tvm.IRModule, _: tvm.transform.PassContext
) -> relay.function.Function:
return RemoveDropout().visit(func)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/contrib/vitis_ai.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, no-else-return, E1102
"""Vitis-AI codegen annotation of supported operators"""
import warnings
import numpy as np
from tvm import relay
import tvm._ffi
from tvm.relay import transform
from tvm.relay.expr import Tuple, TupleGetItem
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
# Placeholder for PyXIR module
pyxir = None
def enabled():
"""Return whether Vitis-AI support is available"""
if not tvm.get_global_func("relay.ext.vitis_ai.available", True):
print("Skip because Vitis-AI codegen is not available.")
return False
return True
@transform.function_pass(opt_level=0)
class VitisAIAnnotationPass:
"""Responsible for annotating Relay expressions for Vitis-AI DPU accelerators
Parameters
----------
compiler : str
The compiler name used for annotations (`vitis_ai`).
dpu_target : str
The Vitis AI DPU target identifier.
params : dict
A dictionary containing the module's parameters.
"""
def __init__(self, compiler, dpu_target, params):
global pyxir
try:
if pyxir is None:
pyxir = __import__("pyxir")
__import__("pyxir.frontend.tvm")
except ImportError:
# add "from None" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"The pyxir package is required for the Vitis AI backend. "
"Please install it first. "
"Help: (https://tvm.apache.org/docs/deploy/vitis_ai.html) "
) from None
self.compiler = compiler
self.dpu_target = dpu_target
self.params = params
def transform_function(self, func, mod, ctx):
"""Transform function for annotating Relay module"""
annotator = self
class Annotator(tvm.relay.ExprMutator):
"""Annotator for Vitis-AI DPU accelerators"""
def visit_tuple(self, tup):
"""Add compiler_begin and compiler_end annotations to Tuple"""
field_list = []
cond = int(hash(tup))
for field in tup.fields:
if cond in annotator.relay_ids:
field_list.append(compiler_begin(super().visit(field), annotator.compiler))
else:
field_list.append(super().visit(field))
if cond in annotator.relay_ids:
return compiler_end(Tuple(field_list), annotator.compiler)
else:
return Tuple(field_list)
def visit_tuple_getitem(self, op):
"""Add compiler_begin and compiler_end annotations to TupleGetItem"""
if int(hash(op.tuple_value)) in annotator.relay_ids:
tuple_value = compiler_begin(super().visit(op.tuple_value), annotator.compiler)
return compiler_end(TupleGetItem(tuple_value, op.index), annotator.compiler)
else:
tuple_value = super().visit(op.tuple_value)
return TupleGetItem(tuple_value, op.index)
def visit_call(self, call):
"""Add compiler_begin and compiler_end annotations to the Call expr"""
if int(hash(call)) in annotator.relay_ids:
new_args = []
for arg in call.args:
ann = compiler_begin(super().visit(arg), annotator.compiler)
new_args.append(ann)
new_call = relay.Call(call.op, new_args, call.attrs, call.type_args)
return compiler_end(new_call, annotator.compiler)
else:
return super().visit_call(call)
xgraph = pyxir.frontend.tvm.from_relay(mod, self.params, postprocessing=None)
xgraph = pyxir.partition(xgraph, targets=[self.dpu_target])
layers = xgraph.get_layers()
relay_ids = [
list(np.array(layer.attrs["relay_id"]).flatten())
for layer in layers
if layer.target == self.dpu_target
]
self.relay_ids = [item for sublist in relay_ids for item in sublist]
return Annotator().visit(func)
def annotation(mod, params, target):
"""DEPRECATED
Annotate Relay expression for offloading operators to Vitis AI DPU accelerators
NOTE: This function does the same as the next one (`partition_for_vitis_ai`) but is
still here for backward compatibility"""
# We need type information for supporting models that contain operations that don't
# have a Relay to XLayer translation
warnings.warn(
"tvm.relay.op.contrib.vitis_ai.annotation() is being deprecated."
" Please use tvm.relay.op.contrib.vitis_ai.partition_for_vitis_ai() instead. "
" Check out https://tvm.apache.org/docs/deploy/vitis_ai.html for documentation. "
)
mod = relay.transform.InferType()(mod)
mod = VitisAIAnnotationPass("vitis_ai", target, params)(mod)
return mod
def partition_for_vitis_ai(mod, params=None, dpu=None, **opts):
"""Partition the Relay expression for offloading operators to Vitis AI DPU
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
dpu : str
The DPU identifier (e.g. DPUCZDX8G-zcu104, DPUCADF8H)
Returns
-------
ret : Module
"""
if dpu is None:
raise ValueError("Please pass Vitis AI DPU identifier to the partitioning function")
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
desired_layouts_in_partition = {
"nn.conv2d": ["NHWC", "default"],
"nn.upsampling": ["NHWC"],
"image.resize2d": ["NHWC"],
}
desired_layouts_in_main = {
"nn.conv2d": ["NCHW", "default"],
"nn.upsampling": ["NCHW"],
"image.resize2d": ["NCHW"],
}
seq = tvm.transform.Sequential(
[
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(desired_layouts_in_partition),
transform.FoldConstant(),
transform.InferType(),
VitisAIAnnotationPass("vitis_ai", dpu, params),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
transform.RemoveUnusedFunctions(),
transform.ConvertLayout(desired_layouts_in_main),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
return seq(mod)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay namespace containing dynamic ops."""
from . import _algorithm
from . import _transform
from . import _tensor
from . import image
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/_algorithm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"Definition of classic algorithms"
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from .. import strategy
from .. import op as _reg
from ..op import OpPattern, register_pattern
from ..op import register_strategy
# topk
register_strategy("dyn.topk", strategy.topk_strategy)
register_pattern("dyn.topk", OpPattern.OPAQUE)
@script
def _topk_shape_func_input_data(data, k, axis):
ndim = len(data.shape)
val_out = output_tensor((ndim,), "int64")
indices_out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
val_out[i] = int64(data.shape[i])
indices_out[i] = int64(data.shape[i])
else:
if k[0] < 1:
val_out[i] = int64(data.shape[i])
indices_out[i] = int64(data.shape[i])
else:
val_out[i] = int64(k[0])
indices_out[i] = int64(k[0])
return val_out, indices_out
@_reg.register_shape_func("dyn.topk", True)
def topk_shape_func(attrs, inputs, _):
"""
Shape func for topk.
"""
axis = attrs.axis
if axis < 0:
axis += len(inputs[0].shape)
val_out, indices_out = _topk_shape_func_input_data(inputs[0], inputs[1], convert(axis))
ret_type = attrs.ret_type
if ret_type == "both":
ret = [val_out, indices_out]
elif ret_type == "values":
ret = [val_out]
else:
ret = [indices_out]
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.dyn._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, len-as-condition
"""Backend compiler related feature registration for dynamic ops"""
from tvm import topi
from ..op import register_shape_func, register_compute
from ..op import register_broadcast_schedule
from ..op import register_pattern, OpPattern
from .._tensor import full_shape_func, no_data_full_shape_func
# ones
@register_compute("dyn.ones")
def ones_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full(output_type.shape, output_type.dtype, 1.0)]
register_broadcast_schedule("dyn.ones")
register_pattern("dyn.ones", OpPattern.ELEMWISE)
@register_compute("dyn.zeros")
def zeros_compute(attrs, inputs, output_type):
assert len(inputs) == 1
return [topi.full(output_type.shape, output_type.dtype, 0.0)]
register_broadcast_schedule("dyn.zeros")
register_pattern("dyn.zeros", OpPattern.ELEMWISE)
register_shape_func("dyn.broadcast_to", True, full_shape_func)
register_shape_func("dyn.ones", True, no_data_full_shape_func)
register_shape_func("dyn.zeros", True, no_data_full_shape_func)
register_shape_func("dyn.full", True, full_shape_func)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
# pylint: disable=invalid-name,unused-argument, len-as-condition, too-many-nested-blocks, too-many-local-variables, too-many-arguments
from __future__ import absolute_import
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as _reg
_reg.register_broadcast_schedule("dyn.broadcast_to")
_reg.register_injective_schedule("dyn.reshape")
_reg.register_injective_schedule("dyn.expand_dims")
_reg.register_injective_schedule("dyn.squeeze")
_reg.register_broadcast_schedule("dyn.tile")
_reg.register_injective_schedule("dyn.one_hot")
_reg.register_injective_schedule("dyn.full")
_reg.register_injective_schedule("dyn.strided_slice")
_reg.register_injective_schedule("dyn.sparse_to_dense")
@script
def _reshape_shape_func_input_data(data_shape, newshape, ndim, allowzero):
out = output_tensor((ndim,), "int64")
src_idx = 0
dst_idx = 0
infer_idx = -1
copy = False
skip = 0
for i in const_range(len(newshape)):
if skip > 0:
skip -= 1
elif newshape[i] > 0:
out[dst_idx] = int64(newshape[i])
src_idx += 1
dst_idx += 1
elif newshape[i] == 0:
if allowzero:
out[dst_idx] = int64(newshape[i])
else:
out[dst_idx] = data_shape[src_idx]
src_idx += 1
dst_idx += 1
elif newshape[i] == -1:
assert infer_idx < 0, "One and only one dim can be inferred"
out[dst_idx] = int64(1)
infer_idx = i
src_idx += 1
dst_idx += 1
elif newshape[i] == -2:
assert False, "Value -2 is not valid in newshape argument of dynamic reshape"
elif newshape[i] == -3:
assert data_shape.shape[0] - src_idx > 1, "Not enough dims in input shape for -3"
out[dst_idx] = data_shape[src_idx] * data_shape[src_idx + 1]
src_idx += 2
dst_idx += 1
elif newshape[i] == -4:
assert False, "Value -4 is not valid in newshape argument of dynamic reshape"
else:
assert False, "Invalid special values in new shape"
if len(data_shape.shape) > 0:
# if data is not constant, we can then handle -1 and -2
if copy:
for i in range(src_idx, data_shape.shape[0]):
out[dst_idx] = data_shape[i]
dst_idx += 1
if infer_idx >= 0:
old_size = int64(1)
for i in const_range(data_shape.shape[0]):
old_size *= data_shape[i]
new_size = int64(1)
for i in const_range(out.shape[0]):
new_size *= out[i]
out[infer_idx] = old_size // new_size
return out
@_reg.register_shape_func("dyn.reshape", [False, True])
def dynamic_reshape_shape_func(attrs, inputs, out_ndims):
allowzero = attrs.allowzero
return [_reshape_shape_func_input_data(*inputs, out_ndims[0], convert(allowzero))]
@script
def _expand_dims_shape_func_input_data(data, axis, ndims, num_newaxis):
out = output_tensor((ndims,), "int64")
for i in const_range(ndims):
if i < axis:
# We multiply by a check (i < len(data.shape)) to avoid
# a constant folding mechanism leading to an overflow
out[i] = int64(data.shape[i * (i < len(data.shape))])
elif i - num_newaxis < axis:
out[i] = int64(1)
else:
out[i] = int64(
# We can't use axis in indices as it is not constant but we can
# use negative indices (kind of, have to manually do it)
data.shape[
(i - num_newaxis) * (i - num_newaxis >= 0)
+ (i - num_newaxis + len(data.shape)) * (i - num_newaxis < 0)
]
)
return out
@_reg.register_shape_func("dyn.expand_dims", [True, True])
def dynamic_expand_dims_shape_func(attrs, inputs, out_ndims):
return [
_expand_dims_shape_func_input_data(
inputs[0],
inputs[1],
out_ndims[0],
convert(attrs.num_newaxis),
)
]
@script
def _tile_shape_func(data, reps, ndim, tndim, rndim):
out = output_tensor((tndim,), "int64")
if ndim == rndim:
for i in const_range(tndim):
out[i] = int64(data.shape[i] * reps[i])
elif ndim > rndim:
ngap = ndim - rndim
for i in const_range(ndim):
if i < ngap:
out[i] = int64(data.shape[i])
else:
out[i] = int64(data.shape[i] * reps[i - ngap])
else:
rgap = rndim - ndim
for i in const_range(rndim):
if i < rgap:
out[i] = int64(reps[i])
else:
out[i] = int64(reps[i] * data.shape[i - rgap])
return out
@_reg.register_shape_func("dyn.tile", True)
def tile_shape_func(attrs, inputs, _):
"""
Shape function for dyn.tile op.
"""
reps = inputs[1]
ndim = len(inputs[0].shape)
rndim = inputs[1].shape[0].value
tndim = ndim if ndim > rndim else rndim
return [_tile_shape_func(inputs[0], reps, convert(ndim), convert(tndim), convert(rndim))]
@script
def _onehot_shape_func(dshape, k, axis):
ndim = len(dshape) + 1
out = output_tensor((ndim,), "int64")
for i in const_range(axis):
out[i] = int64(dshape[i])
out[axis] = int64(k[(0)])
for j in const_range(axis + 1, ndim):
out[j] = int64(dshape[j - 1])
return out
@_reg.register_shape_func("dyn.one_hot", True)
def one_hot_shape_func(attrs, inputs, _):
"""
Shape function for dyn.one_hot op.
"""
axis = len(inputs[0].shape) if attrs.axis == -1 else attrs.axis
return [_onehot_shape_func(inputs[0].shape, inputs[3], convert(axis))]
@script
def _strided_slice_shape_func_input_data(data_shape, begin, end, strides, slice_mode):
ndim = len(data_shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
dim_size = int64(data_shape[i])
cbegin = int64(0)
cend = dim_size
cstride = int64(1)
if strides.shape[0] > i:
cstride = int64(strides[i])
if begin.shape[0] > i:
cbegin = int64(begin[i])
elif cstride < 0:
cbegin = dim_size
if end.shape[0] <= i:
if cstride < 0:
cend = int64(0)
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = dim_size
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[i]:
cend = dim_size
else:
cend = int64(end[i])
assert cstride != 0, "Strides can't be zero."
if cbegin < 0:
cbegin += dim_size
if cend < 0:
cend += dim_size
if cstride < 0:
if cend < 0:
cend = int64(-1)
if cbegin > dim_size - 1:
cbegin = dim_size - 1
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[i] = int64(ceil_div(slice_range, step))
return out
@_reg.register_shape_func("dyn.strided_slice", [False, True, True, True])
def strided_slice_shape_func(attrs, inputs, _):
"""
Shape func for strided_slice
"""
slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
return [_strided_slice_shape_func_input_data(*inputs, slice_mode)]
@script
def _sparse_to_dense_shape_func(output_shape, ndim):
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(output_shape[i])
return out
@_reg.register_shape_func("dyn.sparse_to_dense", True)
def sparse_to_dense_shape_func(attrs, inputs, out_ndims):
return [_sparse_to_dense_shape_func(inputs[3], out_ndims[0])]
@script
def _squeeze_shape_func_input_data(data, axis, ndims):
out = output_tensor((ndims,), "int64")
out_i = 0
for i in const_range(data.shape[0]):
not_in_axis = True
for j in const_range(axis.shape[0]):
if i == axis[j]:
not_in_axis = False
if not_in_axis:
out[out_i] = int64(data[i])
out_i += 1
return out
@_reg.register_shape_func("dyn.squeeze", [False, True])
def dynamic_squeeze_shape_func(attrs, inputs, out_ndims):
return [_squeeze_shape_func_input_data(inputs[0], inputs[1], out_ndims[0])]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/image/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay namespace containing dynamic image ops."""
from . import _image
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/image/_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
import tvm.topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from tvm.topi.utils import nchw_pack_layout, nchw_xc_layout
from ... import op as reg
# resize
@reg.register_compute("dyn.image.resize2d")
def compute_resize2d(attrs, inputs, out_type):
"""
Compute function calls into topi
"""
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
tvm.topi.image.resize2d(
inputs[0],
inputs[2],
inputs[1],
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
out_type.shape,
)
]
reg.register_injective_schedule("dyn.image.resize2d")
@script
def _resize2d_shape_func(dshape, size, ndim, height_axis, width_axis):
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(dshape[i])
out[height_axis] = int64(size[0])
out[width_axis] = int64(size[1])
return out
@reg.register_shape_func("dyn.image.resize2d", True)
def resize2d_shape_func(attrs, inputs, _):
"""
Shape function for dyn.image.resize op.
"""
layout = attrs.layout
if nchw_pack_layout(layout) or nchw_xc_layout(layout):
out = [
_resize2d_shape_func(
inputs[0].shape, inputs[1], convert(len(inputs[0].shape)), convert(2), convert(3)
)
]
else:
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
out = [
_resize2d_shape_func(
inputs[0].shape,
inputs[1],
convert(len(inputs[0].shape)),
convert(height_axis),
convert(width_axis),
)
]
return out
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/image/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.dyn.image._make", __name__)
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.