file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
python/tvm/relay/op/dyn/nn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay namespace containing dynamic ops."""
from . import _nn
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/nn/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.dyn.nn._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/dyn/nn/_nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration for dynamic relay ops in nn namespace"""
from __future__ import absolute_import
from tvm import topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from ...op import register_shape_func, register_compute
from ...op import register_injective_schedule, register_broadcast_schedule
# upsampling
@register_compute("dyn.nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
data = inputs[0]
scale_h = inputs[1]
scale_w = inputs[2]
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [
topi.nn.upsampling(data, scale_h, scale_w, layout, method, align_corners, out_dtype.shape)
]
# upsampling3d
@register_compute("dyn.nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
data = inputs[0]
scale_d = inputs[1]
scale_h = inputs[2]
scale_w = inputs[3]
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
data,
scale_d,
scale_h,
scale_w,
layout,
method,
coordinate_transformation_mode,
out_dtype.shape,
)
]
register_injective_schedule("dyn.nn.upsampling")
register_injective_schedule("dyn.nn.upsampling3d")
register_broadcast_schedule("dyn.nn.pad")
#####################
# Shape functions #
#####################
# upsampling
@script
def _upsampling_shape_func(dshape, scale_h, scale_w, height_axis, width_axis):
out = output_tensor((4,), "int64")
for i in const_range(4):
out[i] = int64(dshape[i])
out[height_axis] = int64(round(dshape[height_axis] * scale_h[()]))
out[width_axis] = int64(round(dshape[width_axis] * scale_w[()]))
return out
@register_shape_func("dyn.nn.upsampling", True)
def upsampling_shape_func(attrs, inputs, _):
"""Shape function for upsampling. Supports NCHW and NHWC layouts."""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [
_upsampling_shape_func(
inputs[0].shape, inputs[1], inputs[2], convert(height_axis), convert(width_axis)
)
]
# upsampling3d
@script
def _upsampling3d_shape_func(
dshape, scale_d, scale_h, scale_w, depth_axis, height_axis, width_axis
):
out = output_tensor((5,), "int64")
for i in const_range(5):
out[i] = int64(dshape[i])
out[depth_axis] = int64(round(dshape[depth_axis] * scale_d[()]))
out[height_axis] = int64(round(dshape[height_axis] * scale_h[()]))
out[width_axis] = int64(round(dshape[width_axis] * scale_w[()]))
return out
@register_shape_func("dyn.nn.upsampling3d", True)
def upsampling3d_shape_func(attrs, inputs, _):
"""Shape function for upsampling. Supports NCHW and NHWC layouts."""
layout = attrs.layout
depth_axis = height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "D":
depth_axis = i
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [
_upsampling3d_shape_func(
inputs[0].shape,
inputs[1],
inputs[2],
inputs[3],
convert(depth_axis),
convert(height_axis),
convert(width_axis),
)
]
# pad
@script
def _dyn_pad_shape_func(data, pad_width):
ndim = len(data.shape)
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = int64(pad_width[i, 0] + pad_width[i, 1] + data.shape[i])
return out
@register_shape_func("dyn.nn.pad", True)
def pad_shape_func(attrs, inputs, data):
"""
Shape function for dynamic pad op.
"""
return [_dyn_pad_shape_func(inputs[0], inputs[1])]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/image/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Image network related operators."""
from .image import *
from . import _image
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/image/_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.utils import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .image import resize2d
# resize
@reg.register_compute("image.resize1d")
def compute_resize1d(attrs, inputs, out_type):
"""compute definition for resize1d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize1d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize1d")
@reg.register_convert_op_layout("image.resize1d")
def convert_image_resize1d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize1d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize1d(*inputs, **new_attrs)
@script
def _resize1d_shape_func(image_shape, size, batch_axis, width_axis, channel_axis):
out = output_tensor((3,), "int64")
out[batch_axis] = int64(image_shape[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize1d", False)
def resize1d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize1d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize2d")
def compute_resize2d(attrs, inputs, out_type):
"""compute definition for resize2d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize2d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize2d")
@reg.register_convert_op_layout("image.resize2d")
def convert_image_resize2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for image resize2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current resize op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data input.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 1, "Only one desired layout is expected"
desired_layout = str(desired_layouts[0])
assert desired_layout != "default", "Layout cannot be default"
new_attrs["layout"] = desired_layout
return resize2d(*inputs, **new_attrs)
@script
def _resize2d_shape_func(image_shape, size, batch_axis, height_axis, width_axis, channel_axis):
out = output_tensor((4,), "int64")
out[batch_axis] = int64(image_shape[0])
out[height_axis] = int64(size[0])
out[width_axis] = int64(size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.resize2d", False)
def resize2d_shape_func(attrs, inputs, _):
"""
Shape function for resize2d op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "N":
batch_axis = i
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
size = get_const_tuple(attrs.size)
return [
_resize2d_shape_func(
inputs[0],
convert(size),
convert(batch_axis),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
@reg.register_compute("image.resize3d")
def compute_resize3d(attrs, inputs, out_type):
"""compute definition for resize3d op"""
size = attrs.size
roi = attrs.roi
layout = attrs.layout
method = attrs.method
coord_trans = attrs.coordinate_transformation_mode
rounding_method = attrs.rounding_method
cubic_alpha = attrs.cubic_alpha
cubic_exclude = attrs.cubic_exclude
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.resize3d(
inputs[0],
roi,
size,
layout,
method,
coord_trans,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.resize3d")
# crop and resize
@reg.register_compute("image.crop_and_resize")
def compute_crop_and_resize(attrs, inputs, out_type):
crop_size = attrs.crop_size
layout = attrs.layout
method = attrs.method
extrapolation_value = attrs.extrapolation_value
out_dtype = attrs.out_dtype
return [
topi.image.crop_and_resize(
inputs[0],
inputs[1],
inputs[2],
crop_size,
layout,
method,
extrapolation_value,
out_dtype,
)
]
reg.register_injective_schedule("image.crop_and_resize")
@script
def _crop_and_resize_func(
image_shape, boxes_shape, crop_size, height_axis, width_axis, channel_axis
):
out = output_tensor((4,), "int64")
out[0] = boxes_shape[0]
out[height_axis] = int64(crop_size[0])
out[width_axis] = int64(crop_size[1])
out[channel_axis] = image_shape[channel_axis]
return out
@reg.register_shape_func("image.crop_and_resize", False)
def crop_and_resize_func(attrs, inputs, _):
"""
Shape function for crop_and_resize op.
"""
layout = attrs.layout
height_axis = width_axis = channel_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
if letter == "C":
channel_axis = i
crop_size = get_const_tuple(attrs.crop_size)
return [
_crop_and_resize_func(
inputs[0],
inputs[1],
convert(crop_size),
convert(height_axis),
convert(width_axis),
convert(channel_axis),
)
]
# dilation2d
reg.register_strategy("image.dilation2d", strategy.dilation2d_strategy)
reg.register_pattern("image.dilation2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# affine_grid
@reg.register_compute("image.affine_grid")
def compute_affine_grid(attrs, inputs, out_dtype):
target_shape = get_const_tuple(attrs.target_shape)
return [topi.image.affine_grid(inputs[0], target_shape)]
reg.register_injective_schedule("image.affine_grid")
@script
def _affine_grid_func(data, target_shape):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(2)
out[2] = int64(target_shape[0])
out[3] = int64(target_shape[1])
return out
@reg.register_shape_func("image.affine_grid", False)
def affine_grid_func(attrs, inputs, _):
"""
Shape function for affine_grid op.
"""
target_shape = get_const_tuple(attrs.target_shape)
return [_affine_grid_func(inputs[0], convert(target_shape))]
# grid_sample
@reg.register_compute("image.grid_sample")
def compute_grid_sample(attrs, inputs, out_dtype):
method = attrs.method
layout = attrs.layout
padding_mode = attrs.padding_mode
align_corners = attrs.align_corners
return [
topi.image.grid_sample(inputs[0], inputs[1], method, layout, padding_mode, align_corners)
]
reg.register_injective_schedule("image.grid_sample")
@script
def _grid_sample_func_nchw(data, grid):
out = output_tensor((4,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
return out
@script
def _grid_sample_func_ncdhw(data, grid):
out = output_tensor((5,), "int64")
out[0] = int64(data[0])
out[1] = int64(data[1])
out[2] = int64(grid[2])
out[3] = int64(grid[3])
out[4] = int64(grid[4])
return out
@reg.register_shape_func("image.grid_sample", False)
def grid_sample_func(attrs, inputs, _):
"""
Shape function for grid_sample op.
"""
if attrs.layout == "NCHW":
script_func = _grid_sample_func_nchw
elif attrs.layout == "NCDHW":
script_func = _grid_sample_func_ncdhw
else:
msg = f"layout {attrs.layout} is not supported"
raise ValueError(msg)
return [script_func(inputs[0], inputs[1])]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/image/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.image._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/image/image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Image operations."""
from . import _make
from ..dyn.image import _make as _dyn_make
from ...expr import Expr, Constant, const
def resize1d(
data,
size,
roi=None,
layout="NCW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize1d operator.
This operator takes data as input and does 1D scaling to the given scale factor.
In the default case, where the data_layout is `NCW`
with data of shape (n, c, w)
out will have a shape (n, c, size[0])
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for cubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during cubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 2
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("int32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
raise NotImplementedError(
"dyn.resize1d is not yet implemented, got size", size, "and roi", roi
)
return _make.resize1d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def resize2d(
data,
size,
roi=None,
layout="NCHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize2d operator.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, size[0], size[1])
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for bicubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during bicubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 4
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("float32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
if not isinstance(size, Expr):
size = const(size, "int64")
if not isinstance(roi, Expr):
roi = const(roi, "float32")
return _dyn_make.resize2d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
return _make.resize2d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def resize3d(
data,
size,
roi=None,
layout="NCDHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
cubic_alpha=-0.5,
cubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
):
"""Image resize3d operator.
This operator takes data as input and does 3D scaling to the given scale factor.
In the default case, where the data_layout is `NCDHW`
with data of shape `(n, c, d, h, w)`
out will have a shape `(n, c, size[0], size[1], size[2])`
method indicates the algorithm to be used while calculating the out value
and method can be one of ("linear", "nearest_neighbor", "cubic")
Parameters
----------
data : relay.Expr
The input data to the operator.
size: Tuple of Int or Expr
The out size to which the image will be resized.
roi: Tuple of Float or Expr, optional
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, linear, cubic].
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor. Defintions can be found
in topi/image/resize.py.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
cubic_alpha: float
Spline Coefficient for cubic interpolation
cubic_exclude: int
Flag to exclude exterior of the image during cubic interpolation
extrapolation_value: float
Fill value to use when roi is outside of the image
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The resized result.
"""
if roi is None:
roi = [0.0] * 6
if isinstance(size, Constant):
size = list(size.data.numpy().astype("int32"))
if isinstance(roi, Constant):
roi = list(roi.data.numpy().astype("int32"))
if isinstance(size, Expr) or isinstance(roi, Expr):
raise NotImplementedError(
"dyn.resize3d is not yet implemented, got size", size, "and roi", roi
)
return _make.resize3d(
data,
size,
roi,
layout,
method,
coordinate_transformation_mode,
rounding_method,
cubic_alpha,
cubic_exclude,
extrapolation_value,
out_dtype,
)
def crop_and_resize(
data,
boxes,
box_indices,
crop_size,
layout,
method="bilinear",
extrapolation_value=0,
out_dtype=None,
):
"""Crop input images and resize them.
method indicates the algorithm to be used while calculating the out value
and method can be either "bilinear" or "nearest_neighbor".
Parameters
----------
data : relay.Expr
The input data to the operator.
boxes : relay.Expr
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : relay.Expr
A 1-D tensor of shape [num_boxes], box_ind[i] specifies the data that
the i-th box refers to.
crop_size : Tuple of PrimExpr
The target size to which each box will be resized.
layout : str, optional
Layout of the input.
method : str, optional
Scale method, it can be either "nearest_neighbor" or "bilinear".
extrapolation_value : float, optional
Value used for extrapolation, when applicable.
out_dtype : str, optional
Type to return. If left None returns the same type as input.
Returns
-------
result: relay.Expr
The computed result.
"""
return _make.crop_and_resize(
data, boxes, box_indices, crop_size, layout, method, extrapolation_value, out_dtype
)
def dilation2d(
data,
weight,
strides=(1, 1),
padding=(0, 0),
dilations=(1, 1),
data_layout="NCHW",
kernel_layout="IHW",
out_dtype="",
):
r"""Morphological Dilation 2D.
This operator takes the weight as the dilation kernel and dilates it with
data to produce an output. In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, dilation2d takes in a data Tensor with shape
`(batch_size, in_channels, height, width)`, and a weight Tensor with shape
`(channels, kernel_height, kernel_width)` to produce an output Tensor
with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \max_{dy, dx}
\mbox{data}[b, c, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] +
\mbox{weight}[c, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification. Semantically, the operator
will convert the layout to the canonical layout
(`NCHW` for data and `IHW` for weight) and perform the computation.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilations : Optional[Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_dtype : Optional[str]
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dilation2d(
data, weight, strides, padding, dilations, data_layout, kernel_layout, out_dtype
)
def affine_grid(data, target_shape=None):
"""affine_grid operator that generates 2D sampling grid.
This operation is described in https://arxiv.org/pdf/1506.02025.pdf. It generates a uniform
sampling grid within the target shape and normalizes it to [-1, 1]. The provided affine
transformation is then applied on the sampling grid.
Parameters
----------
data : tvm.Tensor
3-D with shape [batch, 2, 3]. The affine matrix.
target_shape: list/tuple of two int
Specifies the output shape (H, W).
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, 2, target_height, target_width]
"""
return _make.affine_grid(data, target_shape)
def grid_sample(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies grid sampling to input feature map.
Given :math:`data` and :math:`grid`, then for 4-D the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}])
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation function.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
4-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]), or
5-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width], or
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width], or
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method, 4-D "nearest", "bilinear", "bicubic" and
5-D "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width], or
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
return _make.grid_sample(data, grid, method, layout, padding_mode, align_corners)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/memory/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Operators for manipulating low level memory."""
from __future__ import absolute_import as _abs
from .memory import *
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/memory/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.memory._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/memory/memory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""Operators for manipulating low-level memory."""
from __future__ import absolute_import as _abs
from . import _make
def alloc_tensor(storage, offset, shape, dtype="float32", assert_shape=None):
"""Allocate a tensor with the provided shape, and dtype.
Parameters
----------
storage : tvm.relay.Expr
The storage to allocate from.
offset : tvm.relay.Expr
The offset to allocate from.
shape : tvm.relay.Expr
The shape of the tensor to allocate.
dtype: str
The dtype of the tensor.
assert_shape: Control the static shape when computed by dynamic shape expression.
Returns
-------
result : tvm.relay.Expr
The alloc_tensor expression.
"""
return _make.alloc_tensor(storage, offset, shape, dtype, assert_shape)
def alloc_storage(size, alignment, device, dtype_hint="float32"):
"""Allocate a piece of tensor storage.
Parameters
----------
size : tvm.relay.Expr
The size of the allocation.
alignment : tvm.relay.Expr
The alignment of the allocation.
device : tvm.runtime.Device
The device of the allocation.
dtype_hint : str
The dtype hint of the allocation.
Returns
-------
result : tvm.relay.Expr
The alloc_storage expression.
"""
return _make.alloc_storage(size, alignment, device, dtype_hint)
def flatten_tuple_type(ty):
"""Return a sequence of the types contained in the tuple type in order.
Parameters
----------
ty: tvm.Type
The type to flatten.
Returns
-------
result: List[tvm.Type]
The types in their linear order.
"""
return _make.FlattenTupleType(ty)
def from_tuple_type(ty, expr):
"""Convert an expression with the given type into a sequence of expressions.
Each expression maps to a field of the tuple or nested tuples in linear
order.
Parameters
----------
ty: tvm.Type
The type to unpack.
expr: tvm.relay.Expr
The expression from which to extract each sub-field.
Returns
-------
result: List[tvm.relay.Expr]
The list of sub-expressions.
"""
return _make.FromTupleType(ty, expr)
def to_tuple_type(ty, exprs):
"""Pack the sequence of expressions into the nested tuple type.
Parameters
----------
ty: tvm.Type
The type to pack with.
exprs: tvm.relay.Expr
The expressions to pack back into the nested tuple type.
Returns
-------
result: List[tvm.relay.Expr]
The packed tuple expression.
"""
return _make.ToTupleType(ty, exprs)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/nn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Neural network related operators."""
from __future__ import absolute_import as _abs
from .nn import *
from . import _nn
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/nn/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.nn._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/nn/_nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
import re
from tvm import relay, topi
from tvm.runtime import convert
from tvm.te.hybrid import script
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from ....ir import container
from ....tir import expr
from ...transform import LayoutConfig
from .. import op as reg
from .. import strategy
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
# relu
reg.register_broadcast_schedule("nn.relu")
# softmax
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
# fast softmax
reg.register_strategy("nn.fast_softmax", strategy.fast_softmax_strategy)
# log_softmax
reg.register_strategy("nn.log_softmax", strategy.log_softmax_strategy)
@reg.register_legalize("nn.matmul")
def legalize_matmul(attrs, inputs, types):
"""Legalize matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current matmul
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.matmul_legalize(attrs, inputs, types)
# matmul
reg.register_strategy("nn.matmul", strategy.matmul_strategy)
@reg.register_legalize("nn.dense")
def legalize_dense(attrs, inputs, types):
"""Legalize dense op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.dense_legalize(attrs, inputs, types)
# dense
reg.register_strategy("nn.dense", strategy.dense_strategy)
@reg.register_alter_op_layout("nn.dense")
def alter_op_layout_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of dense"""
return topi.nn.dense_alter_layout(attrs, inputs, tinfos, out_type)
# dense_pack
reg.register_strategy("nn.contrib_dense_pack", strategy.dense_pack_strategy)
# fifo_buffer
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
@reg.register_legalize("nn.batch_matmul")
def legalize_batch_matmul(attrs, inputs, types):
"""Legalize batch_matmul op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.batch_matmul_legalize(attrs, inputs, types)
# batch_matmul
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
# batch_norm
reg.register_strategy("nn.batch_norm", strategy.batch_norm_strategy)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of sparse_dense"""
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
# sparse_add
reg.register_strategy("nn.sparse_add", strategy.sparse_add_strategy)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
"""Compute definition of sparse_dense_padded"""
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
# sparse_conv2d
@reg.register_compute("nn.sparse_conv2d")
def compute_sparse_conv2d(attrs, inputs, out_type):
"""Compute definition of sparse_conv2d"""
return [
topi.nn.sparse_conv2d(
inputs[0], inputs[1], inputs[2], inputs[3], attrs["layout"], attrs["kernel_size"]
)
]
reg.register_strategy("nn.sparse_conv2d", strategy.sparse_conv2d_strategy)
# conv1d
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
# conv2d
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv2d"""
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
data, weight = inputs
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.conv2d(data, weight, **attrs)
# Prepare new layout.
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
need_tile = re.match(r"NCHW(\d*)c", desired_data_layout)
if desired_kernel_layout != "default" and not need_tile:
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "HWNC":
new_attrs["kernel_layout"] = "HWOI"
return relay.nn.conv2d(data, weight, **new_attrs)
elif need_tile:
assert desired_kernel_layout != "default", "Kernel layout cannot be default."
tile = int(need_tile.group(1))
if isinstance(data, relay.expr.Var) and data.checked_type.shape[1] % tile != 0:
return relay.nn.conv2d(data, weight, **attrs)
else:
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.contrib_conv2d_nchwc(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv3d_transpose
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
# conv3d
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv3d"""
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transform_strategy,
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv3d_winograd_weight_transform"""
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
# bias_add
reg.register_injective_schedule("nn.bias_add")
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
@reg.register_convert_op_layout("nn.max_pool2d")
def convert_max_pool2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for max_pool2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current pooling
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of one layout string
layout string defining our desired layout for input and output.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
new_attrs["layout"] = str(desired_layouts[0])
new_attrs["out_layout"] = str(desired_layouts[0])
return relay.nn.max_pool2d(*inputs, **new_attrs)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
@reg.register_convert_op_layout("nn.avg_pool2d")
def convert_avg_pool2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for avg_pool2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current pooling
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of one layout string
layout string defining our desired layout for input and output.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
new_attrs["layout"] = str(desired_layouts[0])
new_attrs["out_layout"] = str(desired_layouts[0])
return relay.nn.avg_pool2d(*inputs, **new_attrs)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
# adaptive_max_pool1d
reg.register_schedule("nn.adaptive_max_pool1d", strategy.schedule_adaptive_pool)
# adaptive_avg_pool1d
reg.register_schedule("nn.adaptive_avg_pool1d", strategy.schedule_adaptive_pool)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
@reg.register_convert_op_layout("nn.global_max_pool2d")
def convert_global_max_pool2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for global_max_pool2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current pooling
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of one layout string
layout string defining our desired layout for input and output.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
new_attrs["layout"] = str(desired_layouts[0])
new_attrs["out_layout"] = str(desired_layouts[0])
return relay.nn.global_max_pool2d(*inputs, **new_attrs)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
@reg.register_convert_op_layout("nn.global_avg_pool2d")
def convert_global_avg_pool2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for global_avg_pool2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current pooling
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of one layout string
layout string defining our desired layout for input and output.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
new_attrs["layout"] = str(desired_layouts[0])
new_attrs["out_layout"] = str(desired_layouts[0])
return relay.nn.global_avg_pool2d(*inputs, **new_attrs)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
# prelu
reg.register_broadcast_schedule("nn.prelu")
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_schedule("nn.pad", strategy.schedule_pad)
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transform_strategy,
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_gemm_weight_transform"""
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
@reg.register_alter_op_layout("nn.deformable_conv2d")
def alter_op_layout_deformable_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of deformable conv2d"""
return None
@reg.register_legalize("nn.deformable_conv2d")
def legalize_deformable_conv2d(attrs, inputs, types):
"""Legalize deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return None
@reg.register_convert_op_layout("nn.deformable_conv2d")
def convert_deformable_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for deformable conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
data, offset, weight = inputs
new_attrs = dict(attrs)
for attr in new_attrs:
if isinstance(new_attrs[attr], container.Array):
new_attrs[attr] = list(new_attrs[attr])
elif isinstance(new_attrs[attr], expr.IntImm):
new_attrs[attr] = new_attrs[attr].value
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# Prepare new layout.
assert len(desired_layouts) == 2, "A desired layout is expected for data and kernel"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
else:
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
# nll_loss
@reg.register_compute("nn.nll_loss")
def compute_nll_loss(attrs, inputs, out_dtype):
predictions, targets, weights = inputs
return [topi.nn.nll_loss(predictions, targets, weights, attrs.reduction, attrs.ignore_index)]
reg.register_reduce_schedule("nn.nll_loss")
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
# space_to_batch_nd and batch_to_space_nd
reg.register_injective_schedule("nn.space_to_batch_nd")
reg.register_injective_schedule("nn.batch_to_space_nd")
reg.register_strategy("nn.conv2d_backward_weight", strategy.conv2d_backward_weight_strategy)
@reg.register_legalize("nn.conv2d_backward_weight")
def legalize_conv2d_backward_weight(attrs, inputs, types):
"""Legalize conv2d_backward_weight op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
grad, data = inputs
data_shape = get_const_tuple(data.checked_type.shape)
weight_shape = get_const_tuple(types[2].shape)
_, out_channel, grad_h, grad_w = get_const_tuple(grad.checked_type.shape)
batch, in_channel, in_h, in_w = data_shape
_, _, filter_h, filter_w = weight_shape
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
get_const_tuple(attrs.padding), (filter_h, filter_w)
)
stride_h, stride_w = get_const_tuple(attrs.strides)
dilation_h, dilation_w = get_const_tuple(attrs.dilation)
grad = relay.tile(grad, [1, in_channel // attrs.groups, 1, 1])
grad = relay.reshape(grad, [-1, 1, 0, 0]) # batch * oc * ic // groups, 1, oh, ow
data = relay.reshape(data, [1, -1, 0, 0]) # 1, batch * ic, ih, iw
backward_weight = relay.nn.conv2d(
data,
grad,
strides=attrs.dilation,
padding=attrs.padding,
dilation=attrs.strides,
groups=in_channel * batch,
out_dtype=attrs.out_dtype,
)
# infer shape of backward_weight
padded_weight_grad_h = (
in_h - (grad_h - 1) * stride_h - 1 + fpad_top + fpad_bottom
) // dilation_h + 1
padded_weight_grad_w = (
in_w - (grad_w - 1) * stride_w - 1 + fpad_left + fpad_right
) // dilation_w + 1
backward_weight = relay.reshape(
backward_weight,
[
batch,
in_channel // attrs.groups,
out_channel,
padded_weight_grad_h,
padded_weight_grad_w,
],
)
backward_weight = relay.sum(backward_weight, axis=0)
backward_weight = relay.transpose(backward_weight, [1, 0, 2, 3])
assert padded_weight_grad_h >= filter_h
assert padded_weight_grad_w >= filter_w
if padded_weight_grad_h > filter_h or padded_weight_grad_w > filter_w:
backward_weight = relay.strided_slice(
backward_weight,
begin=[0, 0, 0, 0],
end=[out_channel, in_channel // attrs.groups, filter_h, filter_w],
)
return backward_weight
@reg.register_convert_op_layout("nn.conv2d_backward_weight")
def convert_conv2d_backward_weight(attrs, inputs, _, desired_layouts):
"""Convert Layout pass registration for conv2d_backward_weight op.
Note that `desired_layouts` must be a pair [`data_layout`, `kernel_layouts`],
where `kernel_layouts` affects the output of this op (since the output of this op
is the weight gradient). The layout of the output gradient (the second input to this op)
is assumed to be the same as `data_layout`.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current op
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of data and gradient."
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["grad_layout"] = desired_data_layout
new_attrs["data_layout"] = desired_data_layout
new_attrs["kernel_layout"] = desired_kernel_layout
new_attrs.pop("out_layout")
return relay.nn.conv2d_backward_weight(inputs[0], inputs[1], **new_attrs)
#####################
# Shape functions #
#####################
@script
def _conv_shape_func_nchw(dshape, kshape, strides, padding, dilation):
"""Shape function for conv*d op with nchw & oihw layout."""
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
@script
def _conv_shape_func_nhwc_hwio(dshape, kshape, strides, padding, dilation):
"""Shape function for conv*d op with nhwc & hwio layout."""
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[dshape.shape[0] - 1] = kshape[kshape.shape[0] - 1]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i] - 1) * dilation[i] + 1
out[i + 1] = (dshape[i + 1] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
@script
def _conv_shape_func_nhwc_hwoi(dshape, kshape, strides, padding, dilation):
"""Shape function for conv*d op with nhwc & hwoi layout."""
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[dshape.shape[0] - 1] = kshape[kshape.shape[0] - 2]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i] - 1) * dilation[i] + 1
out[i + 1] = (dshape[i + 1] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
@script
def _conv_shape_func_nhwc_ohwi(dshape, kshape, strides, padding, dilation):
"""Shape function for conv*d op with nhwc & ohwi layout."""
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[dshape.shape[0] - 1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 1] - 1) * dilation[i] + 1
out[i + 1] = (dshape[i + 1] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
"""Shape function for conv*d op."""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
shape_func = None
if attrs["data_layout"] == "NCHW" and attrs["kernel_layout"] == "OIHW":
shape_func = _conv_shape_func_nchw
elif attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWIO":
shape_func = _conv_shape_func_nhwc_hwio
elif attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "HWOI":
shape_func = _conv_shape_func_nhwc_hwoi
elif attrs["data_layout"] == "NHWC" and attrs["kernel_layout"] == "OHWI":
shape_func = _conv_shape_func_nhwc_ohwi
else:
raise ValueError(
"Unsupported data/kernel layout: %s, %s"
% (attrs["data_layout"], attrs["kernel_layout"])
)
return [shape_func(inputs[0], inputs[1], convert(strides), convert(padding), convert(dilation))]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv_transpose_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[1]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (
strides[i] * (dshape[i + 2] - 1) + dilated_k - 2 * padding[i] + output_padding[i]
)
return out
def conv_transpose_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv_transpose_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
reg.register_shape_func("nn.conv1d_transpose", False, conv_transpose_shape_func)
reg.register_shape_func("nn.conv2d_transpose", False, conv_transpose_shape_func)
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _matmul_shape_func(tensor_a_shape, tensor_b_shape, transpose_a, transpose_b):
out = output_tensor((tensor_a_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = tensor_a_shape[i]
if transpose_a:
out[out.shape[0] - 2] = out[out.shape[0] - 1]
out[out.shape[0] - 1] = tensor_b_shape[0] if transpose_b else tensor_b_shape[1]
return out
@reg.register_shape_func("nn.matmul", False)
def matmul_shape_func(attrs, inputs, _):
"""Shape function for matmul op."""
ret = [
_matmul_shape_func(
inputs[0],
inputs[1],
expr.IntImm("bool", attrs.transpose_a),
expr.IntImm("bool", attrs.transpose_b),
)
]
return ret
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""Shape function for dense op. This is an alias of matmul_nt operator for data tensor in
non-transposed format and weight tensor in transposed format.
"""
ret = [
_matmul_shape_func(
inputs[0],
inputs[1],
expr.IntImm("bool", False),
expr.IntImm("bool", True),
)
]
return ret
@script
def _dense_pack_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
assert data_shape.shape[0] == 2, "Input data must be 2D"
out[0] = data_shape[0]
out[1] = weight_shape[0] * weight_shape[2]
return out
@reg.register_shape_func("nn.contrib_dense_pack", False)
def dense_pack_shape_func(attrs, inputs, _):
"""
Shape function for dense_pack op.
"""
ret = [_dense_pack_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(tensor_a_shape, tensor_b_shape, transpose_a, transpose_b):
out = output_tensor((tensor_a_shape.shape[0],), "int64")
out[0] = max(tensor_a_shape[0], tensor_b_shape[0])
out[1] = tensor_a_shape[2] if transpose_a else tensor_a_shape[1]
out[2] = tensor_b_shape[1] if transpose_b else tensor_b_shape[2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
"""
Shape function for batch matmul op.
"""
ret = [
_batch_matmul_shape_func(
inputs[0],
inputs[1],
expr.IntImm("bool", attrs.transpose_a),
expr.IntImm("bool", attrs.transpose_b),
)
]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
"""
Shape function for dilate op.
"""
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.fast_softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
reg.register_shape_func("nn.leaky_relu", False, elemwise_shape_func)
reg.register_shape_func("nn.prelu", False, elemwise_shape_func)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/nn/nn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-lines
"""Neural network operations."""
from tvm.relay import expr
from ...expr import Constant, Expr, const
from ..dyn.nn import _make as _dyn_make
from . import _make
from .utils import get_pad_tuple1d, get_pad_tuple2d, get_pad_tuple3d
def conv1d(
data,
weight,
strides=1,
padding=0,
dilation=1,
groups=1,
channels=None,
kernel_size=None,
data_layout="NCW",
kernel_layout="OIW",
out_layout="",
out_dtype="",
):
r"""1D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCW`
and kernel_layout is `OIW`, conv1d takes in
a data Tensor with shape `(batch_size, in_channels, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size)`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, w] = \sum_{dw, k}
\mbox{data}[b, k, \mbox{strides}[0] * w + dw] *
\mbox{weight}[c, k, dw]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCW` for data and `OIW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[int, Tuple[int]]
The strides of convolution.
padding : Optional[int, Tuple[int]]
The padding of convolution on both sides of the input before convolution.
dilation : Optional[int, Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
groups : Optional[int]
Currently unused for 1D convolution.
channels : Optional[int]
Number of output channels of this convolution.
kernel_size : Optional[int, Tuple[int]]
The spatial dimension of the convolution kernel.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : Optional[str]
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size,)
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation, int):
dilation = (dilation,)
padding = get_pad_tuple1d(padding)
return _make.conv1d(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def conv2d(
data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and kernel_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[int, Tuple[int]]
The strides of convolution.
padding : Optional[int, Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilation : Optional[int, Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
groups : Optional[int]
Number of groups for grouped convolution.
channels : Optional[int]
Number of output channels of this convolution.
kernel_size : Optional[int, Tuple[int]]
The spatial of the convolution kernel.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : Optional[str]
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation)
# TODO enforce 4-way padding in topi/nn/conv2d after #4644 merged
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def conv3d(
data,
weight,
strides=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCDHW",
kernel_layout="OIDHW",
out_layout="",
out_dtype="",
):
r"""3D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCDHW`
and kernel_layout is `OIDHW`, conv3d takes in
a data Tensor with shape `(batch_size, in_channels, depth, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1],
kernel_size[2])` to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, z, y, x] = \sum_{dz, dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * z + dz, \mbox{strides}[1] * y + dy,
\mbox{strides}[2] * x + dx] * \mbox{weight}[c, k, dz, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCDHW` for data and `OIDHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[int, Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilation : Optional[int, Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
groups : Optional[int]
Number of groups for grouped convolution.
channels : Optional[int]
Number of output channels of this convolution.
kernel_size : Optional[int, Tuple[int]]
The spatial of the convolution kernel.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : Optional[str]
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.conv3d(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def contrib_conv3d_winograd_without_weight_transform(
data,
weight,
tile_size,
strides=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCDHW",
kernel_layout="OIDHW",
out_layout="",
out_dtype="",
):
r"""3D convolution with winograd algorithm.
The basic parameters are the same as the ones in vanilla conv3d.
It assumes the weight is pre-transformed by nn.contrib_conv3d_winograd_weight_transform
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 3-way padding to 6-way padding
padding = get_pad_tuple3d(padding)
return _make.contrib_conv3d_winograd_without_weight_transform(
data,
weight,
tile_size,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def conv3d_transpose(
data,
weight,
strides=(1, 1, 1),
padding=(0, 0, 0),
dilation=(1, 1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCDHW",
kernel_layout="OIDHW",
out_layout="",
output_padding=(0, 0, 0),
out_dtype="",
):
r"""3D transpose convolution.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Optional[Tuple[int]]
The strides of convolution.
padding : Optional[int, Tuple[int]]
The padding of convolution on both sides of inputs before convolution.
dilation : Optional[int, Tuple[int]]
Specifies the dilation rate to be used for dilated convolution.
groups : Optional[int]
Number of groups for grouped convolution.
channels : Optional[int]
Number of output channels of this convolution.
kernel_size : Optional[int, Tuple[int]]
The spatial of the convolution kernel.
data_layout : Optional[str]
Layout of the input.
kernel_layout : Optional[str]
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : Optional[str]
Specifies the output data type for mixed precision conv3d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.conv3d_transpose(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
output_padding,
out_dtype,
)
def conv2d_transpose(
data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="IOHW",
out_layout="",
output_padding=(0, 0),
out_dtype="",
):
"""Two dimensional transposed convolution operator.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convolution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
output_padding : Tuple[int], optional
Used to disambiguate the output shape.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d_transpose(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
output_padding,
out_dtype,
)
def conv1d_transpose(
data,
weight,
strides=(1,),
padding=(0,),
dilation=(1,),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCW",
kernel_layout="OIW",
out_layout="",
output_padding=(0,),
out_dtype="",
):
"""One dimensional transposed convolution operator.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convolution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
output_padding : Tuple[int], optional
Used to disambiguate the output shape.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.conv1d_transpose(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
output_padding,
out_dtype,
)
def softmax(data, axis=-1):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.softmax(data, axis)
def fast_softmax(data, axis=-1):
r"""Computes softmax.
Use approximation to compute exponent for faster speed.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.fast_softmax(data, axis)
def log_softmax(data, axis=-1):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing log softmax
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.log_softmax(data, axis)
def max_pool1d(
data,
pool_size=(1,),
strides=(1,),
dilation=(1,),
padding=(0,),
layout="NCW",
out_layout="",
ceil_mode=False,
):
r"""1D maximum pooling operator.
This operator takes data as input and does 1D max value calculation
with in pool_size sized window by striding defined by stride.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, channels, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : int or tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : int or tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size,)
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation, int):
dilation = (dilation,)
padding = get_pad_tuple1d(padding)
return _make.max_pool1d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)
def max_pool2d(
data,
pool_size=(1, 1),
strides=(1, 1),
dilation=(1, 1),
padding=(0, 0),
layout="NCHW",
out_layout="",
ceil_mode=False,
):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation)
padding = get_pad_tuple2d(padding)
return _make.max_pool2d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)
def max_pool3d(
data,
pool_size=(1, 1, 1),
strides=(1, 1, 1),
dilation=(1, 1, 1),
padding=(0, 0, 0),
layout="NCDHW",
out_layout="",
ceil_mode=False,
):
r"""3D maximum pooling operator.
This operator takes data as input and does 3D max value calculation
with in pool_size sized window by striding defined by stride.
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, channels, depth, height, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.max_pool3d(
data, pool_size, strides, dilation, padding, layout, out_layout, ceil_mode
)
def avg_pool1d(
data,
pool_size=(1,),
strides=(1,),
dilation=(1,),
padding=(0,),
layout="NCW",
out_layout="",
ceil_mode=False,
count_include_pad=False,
):
r"""1D average pooling operator.
This operator takes data as input and does 1D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, channels, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : int or tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : int or tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size,)
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation, int):
dilation = (dilation,)
padding = get_pad_tuple1d(padding)
return _make.avg_pool1d(
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)
def avg_pool2d(
data,
pool_size=(1, 1),
strides=(1, 1),
dilation=(1, 1),
padding=(0, 0),
layout="NCHW",
out_layout="",
ceil_mode=False,
count_include_pad=False,
):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation)
padding = get_pad_tuple2d(padding)
return _make.avg_pool2d(
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)
def avg_pool3d(
data,
pool_size=(1, 1, 1),
strides=(1, 1, 1),
dilation=(1, 1, 1),
padding=(0, 0, 0),
layout="NCDHW",
out_layout="",
ceil_mode=False,
count_include_pad=False,
):
r"""3D average pooling operator.
This operator takes data as input and does 3D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, channels, depth, height, width)`,
to produce an output Tensor.
The ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
dilation : int or tuple of int, optional
The dilation of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
padding = get_pad_tuple3d(padding)
return _make.avg_pool3d(
data,
pool_size,
strides,
dilation,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)
def max_pool2d_grad(
out_grad,
data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
out_layout="",
ceil_mode=False,
):
r"""Gradient of 2D maximum pooling operator.
This operator takes out_grad and data as input and calculates gradient of max_pool2d.
Parameters
----------
out_grad : tvm.relay.Expr
The output gradient
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.max_pool2d_grad(
out_grad, data, pool_size, strides, padding, layout, out_layout, ceil_mode
)
def avg_pool2d_grad(
out_grad,
data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
out_layout="",
ceil_mode=False,
count_include_pad=False,
):
r"""Gradient of 2D average pooling operator.
This operator takes out_grad and data as input and calculates gradient of avg_pool2d.
Parameters
----------
out_grad : tvm.relay.Expr
The output gradient
data : tvm.relay.Expr
The input data to the operator.
pool_size : int or tuple of int, optional
The size of window for pooling.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.avg_pool2d_grad(
out_grad,
data,
pool_size,
strides,
padding,
layout,
out_layout,
ceil_mode,
count_include_pad,
)
def global_max_pool2d(data, layout="NCHW", out_layout=""):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout, out_layout)
def global_avg_pool2d(data, layout="NCHW", out_layout=""):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : Optional[str]
Layout of the output
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout, out_layout)
def upsampling(
data, scale_h=1, scale_w=1, layout="NCHW", method="nearest_neighbor", align_corners=False
):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale_h, w*scale_w)
method indicates the algorithm to be used while calculating the out value
and method can be one of ("bilinear", "nearest_neighbor", "bicubic")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale_h : tvm.relay.Expr or int or float
The scale factor for height upsampling.
scale_w : tvm.relay.Expr or int or float
The scale factor for width upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, bilinear, bicubic].
align_corners : bool, optional
Whether to keep corners in proper place.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(scale_h, Constant):
scale_h = scale_h.data.numpy().item()
if isinstance(scale_w, Constant):
scale_w = scale_w.data.numpy().item()
if isinstance(scale_h, Expr) or isinstance(scale_w, Expr):
if not isinstance(scale_h, Expr):
scale_h = const(scale_h, "float64")
if not isinstance(scale_w, Expr):
scale_w = const(scale_w, "float64")
return _dyn_make.upsampling(data, scale_h, scale_w, layout, method, align_corners)
return _make.upsampling(data, scale_h, scale_w, layout, method, align_corners)
def upsampling3d(
data,
scale_d=1,
scale_h=1,
scale_w=1,
layout="NCDHW",
method="nearest_neighbor",
coordinate_transformation_mode="half_pixel",
):
"""3D Upsampling.
This operator takes data as input and does 3D scaling to the given scale factor.
In the default case, where the data_layout is `NCDHW`
with data of shape (n, c, d, h, w)
out will have a shape (n, c, d*scale_d, h*scale_h, w*scale_w)
method indicates the algorithm to be used while calculating the out value
and method can be one of ("trilinear", "nearest_neighbor")
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
scale_d : tvm.relay.Expr
The scale factor for depth upsampling.
scale_h : tvm.relay.Expr
The scale factor for height upsampling.
scale_w : tvm.relay.Expr
The scale factor for width upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [nearest_neighbor, trilinear].
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(scale_d, Constant):
scale_d = scale_d.data.numpy().item()
if isinstance(scale_h, Constant):
scale_h = scale_h.data.numpy().item()
if isinstance(scale_w, Constant):
scale_w = scale_w.data.numpy().item()
if isinstance(scale_d, Expr) or isinstance(scale_h, Expr) or isinstance(scale_w, Expr):
if not isinstance(scale_d, Expr):
scale_d = const(scale_d, "float64")
if not isinstance(scale_h, Expr):
scale_h = const(scale_h, "float64")
if not isinstance(scale_w, Expr):
scale_w = const(scale_w, "float64")
return _dyn_make.upsampling3d(
data, scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
return _make.upsampling3d(
data, scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
Returns
-------
result : tvm.relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
def bias_add(data, bias, axis=1):
"""add_bias operator.
Add 1D bias to the axis of data.
This function is a special case of add which allows
inference of shape of the bias from data.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
bias : tvm.relay.Expr
The bias to be added.
axis : int, optional
The axis to add the bias.
Returns
-------
result : tvm.relay.Expr
The final result.
"""
return _make.bias_add(data, bias, axis)
def matmul(tensor_a, tensor_b, units=None, out_dtype="", transpose_a=False, transpose_b=False):
"""Matmul operator.
Applies a linear transformation. The A & B can be transposed.
.. math::
`C = A * B`
Parameters
----------
data : tvm.relay.Expr
The first input of the operator,
of shape `(d_1, d_2, ..., d_n, units_in)` or `(d_1, d_2, ..., units_in, d_n)`.
weight : tvm.relay.Expr
The second input expressions, 2-D matrix,
of shape `(units_in, units)` or `(units, units_in)`.
units : Optional[int]
Number of hidden units of the matmul transformation.
out_dtype : Optional[str]
Specifies the output data type for mixed precision matmul,
of shape `(d_1, d_2, ..., d_n, units)`.
transpose_a : Optional[bool] = False
Whether the data tensor is in transposed format.
transpose_b : Optional[bool] = False
Whether the weight tensor is in transposed format.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# Since currently `nn.dense` has better topi schedule support, will prefer to use `dense`
# rather than `matmul` for better compatibility
if not transpose_a and transpose_b:
# TODO(jcf94): Remove this when `nn.matmul` is finnaly ready
return dense(tensor_a, tensor_b, units, out_dtype)
return _make.matmul(tensor_a, tensor_b, units, out_dtype, transpose_a, transpose_b)
def dense(data, weight, units=None, out_dtype=""):
"""Dense operator.
Applies a linear transformation
.. math::
`Y = X * W^T`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator,
of shape `(d_1, d_2, ..., d_n, units_in)`.
weight : tvm.relay.Expr
The weight expressions, 2-D matrix,
of shape `(units, units_in)`.
units : int, optional
Number of hidden units of the dense transformation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense,
of shape `(d_1, d_2, ..., d_n, units)`.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dense(data, weight, units, out_dtype)
def contrib_dense_pack(data, weight, weight_layout="NC", units=None, out_dtype=""):
"""Dense operator.
Applies a linear transformation with packed weight
.. math::
`Y = X * W^T`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator,
of shape `(batch, units_in)`.
weight : tvm.relay.Expr
The transformed weight expressions, 3-D matrix,
of shape `(units // pack_weight_tile, units_in, pack_weight_tile)`.
weight_layout: str
The layout of weight, such as "NC" or "NC8n".
units : int, optional
Number of hidden units of the dense transformation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_dense_pack(data, weight, weight_layout, units, out_dtype)
def fifo_buffer(data, buffer, axis):
"""FIFO buffer to enable computation reuse in CNNs with sliding indow input
Compute equivalent of
.. code-block:: python
concat(buffer, data, axis=axis)
.slice_axis(axis=axis,
begin=data.shape[axis],
end=data.shape[axis]+buffer.shape[axis])
Useful for
* Encoding explicit re-use of computation in convolution ops operated on a sliding window input
* Implementing a FIFO queue to cache intermediate results, e.g. as in Fast WaveNet.
Parameters
----------
data : tvm.relay.Expr
The input data
buffer : tvm.relay.Expr
Previous value of the FIFO buffer
axis : int
Specify which axis should be used for buffering
Returns
-------
result : tvm.relay.Expr
Updated value for the buffer
"""
return _make.fifo_buffer(data, buffer, axis)
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : tvm.relay.Expr
The input data
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.relu(data)
def leaky_relu(data, alpha=0.01):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : float
Slope coefficient for the negative half axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)
def prelu(data, alpha, axis=1):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
y = x > 0 ? x : alpha * x
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
alpha : tvm.relay.Expr
Slope coefficient for the negative half axis.
axis : int, optional
Specify which shape axis the channel is specified.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.prelu(data, alpha, axis)
def pad(data, pad_width, pad_value=0, pad_mode="constant"):
r"""Padding
This operator takes in a tensor and pads each axis by the specified
widths using the specified value.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, or tvm.relay.Expr, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, or tvm.relay.Expr, optional, default=0
The value used for padding
pad_mode: 'constant', 'edge', 'reflect'
'constant' pads with constant_value pad_value
'edge' pads using the edge values of the input array
'reflect' pads by reflecting values with respect to the edge
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
if isinstance(pad_width, Constant):
pad_width = [list(i) for i in pad_width.data.numpy()]
if not isinstance(pad_value, Expr):
pad_value = const(pad_value)
if isinstance(pad_width, Expr):
return _dyn_make.pad(data, pad_width, pad_value, pad_mode)
return _make.pad(data, pad_width, pad_value, pad_mode)
def dilate(data, strides, dilation_value=0.0):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.relay.Expr
n-D, can be any layout.
strides : tuple of <int>
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
Returns
-------
Output : tvm.relay.Expr
The computed result
"""
return _make.dilate(data, strides, dilation_value)
def mirror_pad(data, pad_width, mode="SYMMETRIC"):
r"""MirrorPadding
This operator takes in a tensor and pads each axis by the specified
widths using mirroring of the border pixels.
Parameters
----------
data: tvm.relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
mode: string, optional, default='SYMMETRIC'
What type of mirroring to use, must be SYMMETRIC or REFLECT.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.mirror_pad(data, pad_width, mode)
def lrn(data, size=5, axis=1, bias=2, alpha=0.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
def dropout(data, rate=0.5):
"""Applies the dropout operation to the input array.
During training, each element of the input is set to zero with
probability ``p``. The whole array is rescaled by ``1/(1-p)``
to keep the expected sum of the input unchanged.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
rate : float, optional (default=0.5)
The probability for an element to be reset to 0.
Returns
-------
result : tvm.relay.Expr
The result of dropout
"""
return expr.TupleWrapper(dropout_raw(data, rate), 2)[0]
def dropout_raw(data, rate=0.5):
"""Applies the dropout operation to the input array.
During training, each element of the input is set to zero with
probability ``p``. The whole array is rescaled by ``1/(1-p)``
to keep the expected sum of the input unchanged.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
rate : float, optional (default=0.5)
The probability for an element to be reset to 0.
Returns
-------
result : tvm.relay.Expr
The result of dropout
"""
return _make.dropout(data, rate)
def batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=1, epsilon=1e-5, center=True, scale=True
):
r"""
Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}}
* gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated by
.. code:: python
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1.
Specifying -1 sets the channel axis to be the last item in the input shape.
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which batch_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
moving_mean : tvm.relay.Expr
Running mean of input,
moving_var : tvm.relay.Expr
Running variance of input.
axis : int, optional, default=1
Specify along which shape axis the channel is specified.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid dividing by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If true, multiply by gamma. If False, gamma is not used.
When the next layer is piecewise linear (also e.g. nn.relu),
this can be disabled since the scaling will be done by the next layer.
Returns
-------
result : relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])
Tuple of normed data (same shape as input),
new running mean (k-length vector),
and new running variance (k-length vector)
"""
result = _make.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis, epsilon, center, scale
)
return expr.TupleWrapper(result, 3)
def instance_norm(data, gamma, beta, axis=1, epsilon=1e-5, center=True, scale=True):
r"""
Instance Normalization (Ulyanov and et al., 2016)
Applies instance normalization to the n-dimensional input array.
.. math::
out = \frac{data - mean(data)}{\sqrt{var(data)+\epsilon}}
* gamma + beta
The instance normalization is similar to batch normalization, but unlike
batch normalization, the mean and var are calculated per-dimension
separately for each object(instance) in a mini-batch, not over a batch.
And the same normalization is applied both at test and train time.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*.
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel'. The default is 1. Specifying -1 sets the channel axis
to be the last item in the input shape.
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which instance_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
axis : int, optional, default=1
Specify along which shape axis the channel is specified.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid dividing by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If True, multiply by gamma. If False, gamma is not used.
Returns
-------
result : tvm.relay.Expr
The normalized data.
.. _`Instance Normalization: The Missing Ingredient for Fast Stylization`:
https://arxiv.org/abs/1607.08022
"""
return _make.instance_norm(data, gamma, beta, axis, epsilon, center, scale)
def layer_norm(data, gamma, beta, axis=-1, epsilon=1e-5, center=True, scale=True):
r"""
Layer normalization (Lei Ba and et al., 2016).
Applies layer normalization to the n-dimensional input array.
This operator takes an n-dimensional input array and normalizes
the input using the given axis:
.. math::
out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis)+\epsilon}}
* gamma + beta
Unlike batch normalization, the mean and var are computed along the channel dimension.
Assume the input has size k on axis 1, then both gamma and beta have shape (k,).
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which layer_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
axis : int, optional, default=-1
The axis that should be normalized, typically the axis of the channels.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid dividing by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If True, multiply by gamma. If False, gamma is not used.
Returns
-------
result : tvm.relay.Expr
The normalized data.
"""
return _make.layer_norm(data, gamma, beta, axis, epsilon, center, scale)
def group_norm(data, gamma, beta, num_groups, axis=1, epsilon=1e-5, center=True, scale=True):
r"""
Group normalization normalizes over group of channels for each training examples.
We can say that, Group Norm is in between Instance Norm and Layer Norm. When we put
all the channels into a single group, group normalization becomes Layer normalization.
And, when we put each channel into different groups it becomes Instance normalization
https://arxiv.org/pdf/1803.08494.pdf
Applies group normalization to the n-dimensional input array by seperating the input channels
into 'num_groups' groups, each containing 'num_channels / num_groups' channels.
The mean and standard-deviation are calculated separately over the each group. gamma and
beta are learnable per-channel affine transform parameter vectors of size num_channels.
.. math::
out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis)+\epsilon}}
* gamma + beta
Unlike batch normalization, the mean and var are computed along a group of channels.
If the input has size k on axis 1, then both gamma and beta have shape (k,).
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : tvm.relay.Expr
Input to which group_norm will be applied.
gamma : tvm.relay.Expr
The gamma scale factor.
beta : tvm.relay.Expr
The beta offset factor.
num_groups : int
The number of groups to separate the channels into.
axis : int, optional, default=1
The axis of the channels.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid dividing by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If True, multiply by gamma. If False, gamma is not used.
Returns
-------
result : tvm.relay.Expr
The normalized data.
"""
return _make.group_norm(data, gamma, beta, num_groups, axis, epsilon, center, scale)
def batch_matmul(tensor_a, tensor_b, out_dtype="", transpose_a=False, transpose_b=True):
r"""
Compute batch matrix multiplication of `tensor_a` and `tensor_b`.
Both `tensor_a` and `tensor_b` can be transposed. For legacy reason, we use NT format
(transpose_a=False, transpose_b=True) by default.
.. math::
\mbox{batch_matmul}(A, B)[i, :, :] = \mbox{matmul}(A[i, :, :], B[i, :, :])
Parameters
----------
tensor_a : tvm.relay.Expr
The first input.
tensor_b : tvm.relay.Expr
The second input.
out_dtype : Optional[str]
Specifies the output data type for mixed precision batch matmul.
transpose_a : Optional[bool] = False
Whether the first tensor is in transposed format.
transpose_b : Optional[bool] = True
Whether the second tensor is in transposed format.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
return _make.batch_matmul(tensor_a, tensor_b, out_dtype, transpose_a, transpose_b)
# pylint: disable=no-else-return,inconsistent-return-statements
def sparse_dense(dense_mat, sparse_mat, sparse_lhs=False):
r"""
Computes the matrix multiplication of `dense_mat` and `sparse_mat`, where `dense_mat` is
a dense matrix and `sparse_mat` is a sparse (either BSR or CSR) namedtuple with
fields `data`, `indices`, and `indptr`.
\if sparse_lhs=False:
.. math::
\mbox{sparse_dense}(dense_mat, sparse_mat)[m, n]
= \mbox{matmul}(D, \mbox{as_dense}(S)^T)[m, n]
\if sparse_lhs=True:
.. math::
\mbox{sparse_dense}(dense_mat, sparse_mat)[m, n]
= \mbox{matmul}(\mbox{as_dense}(S), (D)^T)[m, n]
where `as_dense` returns dense equivalent of the given S(sparse matrix)
while performing matmul with given D(dense matrix).
See
https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
and
https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.bsr_matrix.html
for more detail on the sparse matrix representation.
Parameters
----------
dense_mat : tvm.relay.Expr
The input dense matrix for the matrix multiplication
sparse_mat : Union[namedtuple, Tuple[ndarray, ndarray, ndarray]].
The input sparse matrix for the matrix multiplication.
sparse_lhs : bool, optional
Indicates whether lhs or rhs matrix is sparse. Default value is False.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
if hasattr(sparse_mat, "indices"):
return _make.sparse_dense(
dense_mat, sparse_mat.data, sparse_mat.indices, sparse_mat.indptr, sparse_lhs
)
else:
return _make.sparse_dense(
dense_mat, sparse_mat[0], sparse_mat[1], sparse_mat[2], sparse_lhs
)
def sparse_transpose(x):
r"""
Computes the fast matrix transpose of x,
where x is a sparse tensor in CSR format (represented as a namedtuple
with fields `data`, `indices`, and `indptr`).
** Currently only support Square Matrices **
.. math::
\mbox{sparse_transpose}(x)[n, n] = (x^T)[n, n]
Please refer to https://github.com/scipy/scipy/blob/v1.3.0/scipy/sparse/csr.py
for the algorithm implemented in this operator.
Parameters
----------
x : Union[namedtuple, Tuple[ndarray, ndarray, ndarray]].
The sparse weight matrix for the fast matrix transpose.
Returns
-------
result : relay.Tuple([tvm.relay.Expr, tvm.relay.Expr, tvm.relay.Expr])
Tuple of output sparse tensor (same shape and format as input),
i.e. if CSR then output is in ([data, indices, indptr]) form
"""
if hasattr(x, "indices"):
return expr.TupleWrapper(_make.sparse_transpose(x.data, x.indices, x.indptr), 3)
return expr.TupleWrapper(_make.sparse_transpose(x[0], x[1], x[2]), 3)
# pylint: disable=no-else-return,inconsistent-return-statements
def sparse_add(dense_mat, sparse_mat):
r"""
Computes the matrix addition of `dense_mat` and `sparse_mat`, where `dense_mat` is
a dense matrix and `sparse_mat` is a sparse (CSR) namedtuple with
fields `data`, `indices`, and `indptr`.
.. math::
\mbox{sparse_add}(dense_mat, sparse_mat)[m, n] = \mbox{add}(\mbox{as_dense}(S), (D))[m, n]
where `as_dense` returns dense equivalent of the given S(sparse matrix)
while performing addition with given D(dense matrix).
Parameters
----------
dense_mat : tvm.relay.Expr
The input dense matrix for the matrix addition
sparse_mat : Union[namedtuple, Tuple[ndarray, ndarray, ndarray]].
The input sparse matrix(CSR) for the matrix addition.
Returns
-------
result: tvm.relay.Expr
The computed result.
Examples
-------
.. code-block:: python
dense_data = [[ 3., 4., 4. ]
[ 4., 2., 5. ]]
sparse_data = [4., 8.]
sparse_indices =[0, 2]
sparse_indptr =[0, 1, 2]
output = relay.sparse_add(dense_data, sparse_data, sparse_indices, sparse_indptr)
output = [[ 7., 4., 4. ]
[ 4., 2., 13. ]]
"""
if hasattr(sparse_mat, "indices"):
return _make.sparse_add(dense_mat, sparse_mat.data, sparse_mat.indices, sparse_mat.indptr)
else:
return _make.sparse_add(dense_mat, sparse_mat[0], sparse_mat[1], sparse_mat[2])
def contrib_conv2d_winograd_without_weight_transform(
data,
weight,
tile_size,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""2D convolution with winograd algorithm.
The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_winograd_weight_transform
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.contrib_conv2d_winograd_without_weight_transform(
data,
weight,
tile_size,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def contrib_conv2d_gemm_without_weight_transform(
data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""2D convolution with gemm algorithm.
The basic parameters are the same as the ones in vanilla conv2d.
It assumes the weight is pre-transformed by nn.contrib_conv2d_gemm_weight_transform
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.contrib_conv2d_gemm_without_weight_transform(
data,
weight,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def contrib_conv2d_nchwc(
data,
kernel,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW8c",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""Variant of 2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output, following a specialized
NCHWc data layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
kernel : tvm.relay.Expr
The kernel expressions.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.contrib_conv2d_NCHWc(
data,
kernel,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def contrib_depthwise_conv2d_nchwc(
data,
kernel,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW8c",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""Variant of 2D depthwise convolution.
This operator takes the weight as the depthwise convolution kernel
and depthwise convolves it with data to produce an output, following a specialized
NCHWc data layout.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
kernel : tvm.relay.Expr
The kernel expressions.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.contrib_depthwise_conv2d_NCHWc(
data,
kernel,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def contrib_conv2d_winograd_weight_transform(weight, tile_size):
r"""Weight Transformation part for 2D convolution with winograd algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform
Parameters
----------
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_weight_transform(weight, tile_size)
def contrib_conv2d_gemm_weight_transform(weights, tile_rows, tile_cols):
r"""Weight Transformation part for 2D convolution with gemm algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_gemm_without_weight_transform
Parameters
----------
weights : tvm.relay.Expr
The weight expressions.
tile_rows: int
Tile rows of the weight transformation for ConvGemm.
tile_cols: int
Tile columns of the weight transformation for ConvGemm.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_gemm_weight_transform(weights, tile_rows, tile_cols)
def contrib_conv3d_winograd_weight_transform(weight, tile_size):
r"""Weight Transformation part for 3D convolution with winograd algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv3d_winograd_without_weight_transform
Parameters
----------
weight : tvm.relay.Expr
The weight expressions.
tile_size : int
The Tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv3d_winograd_weight_transform(weight, tile_size)
def contrib_conv2d_winograd_nnpack_weight_transform(weight, convolution_algorithm, out_dtype=""):
r"""Weight Transformation part for 2D convolution with winograd algorithm.
We separate this as a single op to enable pre-compute for inference.
Use this together with nn.contrib_conv2d_winograd_without_weight_transform
Parameters
----------
weight : tvm.relay.Expr
The weight expressions.
convolution_algorithm : int
The Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.contrib_conv2d_winograd_nnpack_weight_transform(
weight, convolution_algorithm, out_dtype
)
def deformable_conv2d(
data,
offset,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
deformable_groups=1,
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="",
):
r"""Deformable 2d convolution.
The deformable convolution operation is described in https://arxiv.org/abs/1703.06211
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
offset : tvm.relay.Expr
The offset expressions.
weight : tvm.relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
deformable_groups : int, optional
Number of deformable groups.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.deformable_conv2d(
data,
offset,
weight,
strides,
padding,
dilation,
deformable_groups,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def bitpack(data, bits=1, pack_axis=1, bit_axis=2, pack_type="uint32", name="BitPack"):
"""Tensor packing for bitserial operations.
The values along the input tensor's pack_axis are quantized
and packed together into the specified pack_type in a new bit axis.
For example, consider bitpacking with data to be a tensor with shape `[1, 64, 128, 128]`,
pack_axis=1, bit_axis=4, pack_type=uint8, and bits=2. The output in this case will
be of shape `[1, 8, 128, 128, 2]`. The dimension of axis 1 has been reduced by a factor
of 8 since each value is packed into an 8-bit uint8. Axis 4 is now two bitplanes
representing the quantized value of the incoming data. The output tensor is now
ready to be used in a bitserial operation.
Parameters
----------
data : tvm.relay.expr
The incoming tensor to be packed.
bits : int
Number of bits that should be packed.
pack_axis : int
Axis that should be decomposed and packed.
bit_axis : int
New axis containing bitplane.
pack_type : str
Datatype to pack bits into.
name : str, optional
Name of the operation.
Returns
-------
result : tvm.relay.Expr
The packed tensor.
"""
return _make.bitpack(data, bits, pack_axis, bit_axis, pack_type, name)
def bitserial_conv2d(
data,
weight,
strides=(1, 1),
padding=(0, 0),
channels=None,
kernel_size=(3, 3),
activation_bits=1,
weight_bits=1,
data_layout="NCHW",
kernel_layout="OIHW",
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
r"""2D convolution using bitserial computation.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
activation_bits : int
Number of bits to pack for activations.
weight_bits : int
Number of bits to pack for weights.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the kernel
pack_dtype: str, optional
Datatype to pack bits into.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.bitserial_conv2d(
data,
weight,
strides,
padding,
channels,
kernel_size,
activation_bits,
weight_bits,
data_layout,
kernel_layout,
pack_dtype,
out_dtype,
unipolar,
)
def bitserial_dense(
data,
weight,
units=None,
data_bits=1,
weight_bits=1,
pack_dtype="uint32",
out_dtype="int16",
unipolar=True,
):
"""Bitserial Dense operator.
Applies matrix multiplication of two quantized matrices
using a fast bitserial algorithm.
.. math::
`Y = X * W`
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
units : int, optional
Number of hidden units of the dense transformation.
data_bits : int
Number of bits incoming tensor should be packed with.
weight_bits : int
Number of bits weight tensor should be packed with.
pack_dtype : str, optional
Datatype to pack individual bits into before computation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense.
unipolar : bool, optional
Whether to use unipolar or bipolar quantization for inputs.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.bitserial_dense(
data, weight, units, data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
def cross_entropy(predictions, targets):
"""CrossEntropy without logits.
Parameters
----------
predictions : tvm.relay.Expr
The predictions.
targets : tvm.relay.Expr
The targets.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.cross_entropy(predictions, targets)
def cross_entropy_with_logits(predictions, targets):
"""CrossEntropy with logits.
Parameters
----------
predictions : tvm.relay.Expr
The predictions.
targets : tvm.relay.Expr
The targets.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.cross_entropy_with_logits(predictions, targets)
def nll_loss(predictions, targets, weights, reduction="mean", ignore_index=-100):
"""Negative log likelihood loss.
output{n, i_1, i_2, ..., i_k} = -p * w
where t = target{n, i_1, i_2, ..., i_k}
p = predictions{n, t, i_1, i_2, i_k}
w = weights{n, i_1, i_2, ..., i_k} if t != ignore_index else 0
result = reduction(output)
Parameters
----------
predictions : tvm.relay.Expr
The predictions.
targets : tvm.relay.Expr
The target value of each prediction.
weights : tvm.relay.Expr
The weight of each target value.
reduction : string
The reduction method to apply to the output.
Possible values are "mean", "sum" and "none".
ignore_index : int
The target value to ignore.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.nll_loss(predictions, targets, weights, reduction, ignore_index)
def depth_to_space(data, block_size, layout="NCHW", mode="DCR"):
"""Convert channels into spatial blocks.
Parameters
----------
data : tvm.relay.Expr
Input data with channels divisible by block_size**2
block_size : int
Size of blocks to convert channels into.
layout : string
One of NCHW or NHWC, indicates channel axis.
mode : string
One of DCR or CDR, indicates which order channels
are accessed in.
Returns
-------
result : tvm.relay.Expr
Tensor with shape [in_batch, in_channel / block_size * block_size,
in_height * block_size, in_width * block_size]
"""
return _make.depth_to_space(data, block_size, layout, mode)
def space_to_depth(data, block_size, layout="NCHW"):
"""Convert spatial blocks into channels.
Parameters
----------
data : tvm.relay.Expr
Input data with spatial dimensions divisible by block_size
block_size : int
Size of blocks to decompose into channels.
layout : string
One of NCHW or NHWC, indicates channel axis.
Returns
-------
result : tvm.relay.Expr
Tensor with shape [in_batch, in_channel * block_size * block_size,
in_height / block_size, in_width / block_size]
"""
return _make.space_to_depth(data, block_size, layout)
def adaptive_max_pool1d(data, output_size=None, layout="NCW", out_layout=""):
r"""1D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 1D max value calculation
across each window represented by W.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, in_channels, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size) for any input (NCW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
if isinstance(output_size, int):
output_size = [output_size]
return _make.adaptive_max_pool1d(data, output_size, layout, out_layout)
def adaptive_avg_pool1d(data, output_size=None, layout="NCW", out_layout=""):
r"""1D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 1D average value calculation
across each window represented by W.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, in_channels, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output width.
If a single integer is provided for output_size, the output size is
(N x C x output_size) for any input (NCW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
if isinstance(output_size, int):
output_size = [output_size]
return _make.adaptive_avg_pool1d(data, output_size, layout, out_layout)
def adaptive_max_pool2d(data, output_size=None, layout="NCHW", out_layout=""):
r"""2D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool2d(data, output_size, layout, out_layout)
def adaptive_avg_pool2d(data, output_size=None, layout="NCHW", out_layout=""):
r"""2D adaptive average pooling operator. This operator is experimental.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input height and width will be used
as output height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size) for any input (NCHW).
If a tuple of integers (height, width) are provided for output_size,
the output size is (N x C x height x width) for any input (NCHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool2d(data, output_size, layout, out_layout)
def adaptive_max_pool3d(data, output_size=None, layout="NCDHW", out_layout=""):
r"""3D adaptive max pooling operator. This operator is experimental.
This operator takes data as input and does 3D max value calculation
across each window represented by DxWxH.
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, in_channels, depth, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_depth, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input depth, height and width will be used
as output depth, height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size x output_size) for any input (NCDHW).
If a tuple of integers (depth, height, width) are provided for output_size,
the output size is (N x C x depth x height x width) for any input (NCDHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_max_pool3d(data, output_size, layout, out_layout)
def adaptive_avg_pool3d(data, output_size=None, layout="NCDHW", out_layout=""):
r"""3D adaptive avg pooling operator. This operator is experimental.
This operator takes data as input and does 3D avg value calculation
across each window represented by DxWxH.
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, in_channels, depth, height, width)`,
to produce an output Tensor with shape
(batch_size, in_channels, output_depth, output_height, output_width).
The pooling kernel and stride sizes are automatically chosen for
desired output sizes.
For output_size:
If this argument is not provided, input depth, height and width will be used
as output depth, height and width.
If a single integer is provided for output_size, the output size is
(N x C x output_size x output_size x output_size) for any input (NCDHW).
If a tuple of integers (depth, height, width) are provided for output_size,
the output size is (N x C x depth x height x width) for any input (NCDHW).
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
output_size : tuple of int. optional
Output height and width.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [] or output_size
return _make.adaptive_avg_pool3d(data, output_size, layout, out_layout)
def global_max_pool1d(data, layout="NCW", out_layout=""):
r"""1D global maximum pooling operator.
This operator takes data as input and does 1D max value calculation
across each window represented by W.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, in_channels, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, w)
.. math::
\mbox{out}(b, c, 1) = \max_{n=0, \ldots, w} \mbox{data}(b, c, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [1]
return _make.adaptive_max_pool1d(data, output_size, layout, out_layout)
def global_avg_pool1d(data, layout="NCW", out_layout=""):
r"""1D global average pooling operator.
This operator takes data as input and does 1D average value calculation
across each window represented by W.
In the default case, where the data_layout is `NCW`
a data Tensor with shape `(batch_size, in_channels, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, w)
.. math::
\mbox{out}(b, c, 1) = \frac{1}{w} \sum_{n=0}^{w-1} \mbox{data}(b, c, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [1]
return _make.adaptive_avg_pool1d(data, output_size, layout, out_layout)
def global_max_pool3d(data, layout="NCDHW", out_layout=""):
r"""3D global maximum pooling operator.
This operator takes data as input and does 3D max value calculation
across each window represented by DxWxH.
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, in_channels, depth, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, d, h, w)
.. math::
\mbox{out}(b, c, 1, 1, 1) = \max_{l=0, \ldots, d}, \max_{m=0, \ldots, h},
\max_{n=0, \ldots, w} \mbox{data}(b, c, l, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [1, 1, 1]
return _make.adaptive_max_pool3d(data, output_size, layout, out_layout)
def global_avg_pool3d(data, layout="NCDHW", out_layout=""):
r"""3D global average pooling operator.
This operator takes data as input and does 3D average value calculation
across each window represented by DxWxH.
In the default case, where the data_layout is `NCDHW`
a data Tensor with shape `(batch_size, in_channels, depth, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, d, h, w)
.. math::
\mbox{out}(b, c, 1, 1, 1) = \frac{1}{d * h * w} \sum_{l=0}^{d-1} \sum_{m=0}^{h-1}
\sum_{n=0}^{w-1} \mbox{data}(b, c, l, m, n)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
out_layout : str, optional
Layout of the output.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
output_size = [1, 1, 1]
return _make.adaptive_avg_pool3d(data, output_size, layout, out_layout)
def correlation(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply, layout
):
r"""Applies correlation to inputs.
The correlation layer performs multiplicative patch comparisons between two feature maps.
Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and
:math:`c` being their width, height, and number of channels, the correlation layer lets the
network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`.
For now we consider only a single comparison of two patches. The 'correlation' of two patches
centered at :math:`x_{1}` in the first map and :math:`x_{2}` in the second map is then defined
as:
.. math::
c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} <f_{1}(x_{1} + o), f_{2}(x_{2} + o)>
for a square patch of size :math:`K:=2k+1`.
Note that the equation above is identical to one step of a convolution in neural networks, but
instead of convolving data with a filter, it convolves data with other data. For this
reason, it has no training weights.
Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all
patch combinations involves :math:`w^{2}*h^{2}` such computations.
Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes
correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`,
by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize
:math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood
centered around :math:`x_{1}`.
The final output is defined by the following expression:
.. math::
out[n, q, i, j] = c(x_{i, j}, x_{q})
where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q`
denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`.
Parameters
----------
data1 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
data2 : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
kernel_size: int
Kernel size for correlation, must be an odd number
max_displacement: int
Max displacement of Correlation
stride1: int
Stride for data1
stride2: int
Stride for data2 within the neightborhood centered around data1
padding : int or a list/tuple of 2 or 4 ints
Padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
is_multiply: bool
operation type is either multiplication or substraction
layout: str
layout of data1, data2 and the output
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if isinstance(padding, int):
padding = (padding, padding)
return _make.correlation(
data1, data2, kernel_size, max_displacement, stride1, stride2, padding, is_multiply, layout
)
def space_to_batch_nd(data, block_shape, paddings, pad_value=0):
r"""Divide spatial dimensions of the data into a grid of blocks
and interleave them into batch dim.
Parameters
----------
data : tvm.te.Tensor
N-D with shape [batch, spatial_shape, remaining_shape]
block_shape : relay.Expr
1-D of size [M] where M is number of spatial dims, specifies block size
for each spatial dimension.
paddings : relay.Expr
2-D of shape [M, 2] where M is number of spatial dims, specifies
[before, after] paddings for each spatial dimension.
pad_value : float, or relay.Expr, optional, default=0
The value used for padding.
Returns
-------
result : relay.Expr
N-D Tensor with shape
[in_batch * prod(block_shape),
padded_data[1] / block_shape[0], ..., padded_data[M] / block_shape[M-1],
remaining_shape]
"""
return _make.space_to_batch_nd(data, block_shape, paddings, pad_value)
def batch_to_space_nd(data, block_shape, crops):
r"""Reshape the batch dimension into spatial dimensions.
Parameters
----------
data : tvm.te.Tensor
N-D with shape [batch, spatial_shape, remaining_shape]
block_shape : relay.Expr
1-D of size [M] where M is number of spatial dims, specifies block size
for each spatial dimension.
crops : relay.Expr
2-D of shape [M, 2] where M is number of spatial dims, specifies
[begin, end] crop size for each spatial dimension.
Returns
-------
result : relay.Expr
N-D Tensor with shape
[batch / prod(block_shape),
in_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], ...,
in_shape[M] * block_shape[M-1] - crops[M-1, 0] - crops[M-1, 1],
remaining_shape]
"""
return _make.batch_to_space_nd(data, block_shape, crops)
def conv2d_backward_weight(
grad,
data,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
grad_layout="NCHW",
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="",
):
r"""The gradient of conv2d with respect to weight.
This operator takes the output gradient `grad` and convolves it with `data` as
the convolution kernel, to produce the gradient with respect to weight.
Note that the parameter `kernel_size` is the spatial size of the corresponding
forward convolution kernel, not that of `data`. `grad_layout` and
`kernel_layout` are the layouts of `grad` and the weight gradient respectively.
Other parameters are the same as the conv2d op. See its documentation for more
details.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(dilation, int):
dilation = (dilation, dilation)
padding = get_pad_tuple2d(padding)
return _make.conv2d_backward_weight(
grad,
data,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
grad_layout,
data_layout,
kernel_layout,
out_dtype,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/nn/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
"""NN operator common utilities"""
from tvm.ir import container
def get_pad_tuple1d(padding):
"""Common code to get the 1 dimensional pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_left : int
Padding size on left
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 1:
pad_w = padding[0] * 2
elif len(padding) == 2:
return padding[0], padding[1]
else:
raise ValueError("Size of padding can only be 1 or 2")
elif isinstance(padding, int):
pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_left = (pad_w + 1) // 2
return pad_left, pad_w - pad_left
def get_pad_tuple2d(padding):
"""Common code to get the pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 2:
pad_h = padding[0] * 2
pad_w = padding[1] * 2
elif len(padding) == 4:
return padding[0], padding[1], padding[2], padding[3]
else:
raise ValueError("Size of padding can only be 2 or 4")
elif isinstance(padding, int):
pad_h = pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left
def get_pad_tuple3d(padding):
"""Common code to get the pad option
Parameters
----------
padding : Union[int, Tuple[int, ...]]
Padding size
Returns
-------
pad_front : int
Padding size on front
pad_top : int
Padding size on top
pad_left : int
Padding size on left
pad_back : int
Padding size on back
pad_down : int
Padding size on down.
pad_right : int
Padding size on right.
"""
# compute the padding size
if isinstance(padding, container.Array):
padding = list(padding)
if isinstance(padding, (tuple, list)):
if len(padding) == 3:
pad_d = padding[0] * 2
pad_h = padding[1] * 2
pad_w = padding[2] * 2
elif len(padding) == 6:
return padding[0], padding[1], padding[2], padding[3], padding[4], padding[5]
else:
raise ValueError("Size of padding can only be 3 or 6")
elif isinstance(padding, int):
pad_d = pad_h = pad_w = padding * 2
else:
raise ValueError("Unknown padding option %s" % padding)
pad_front = (pad_d + 1) // 2
pad_top = (pad_h + 1) // 2
pad_left = (pad_w + 1) // 2
return pad_front, pad_top, pad_left, pad_d - pad_front, pad_h - pad_top, pad_w - pad_left
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument,invalid-name
"""The base node types for the Relay language."""
import tvm._ffi
import tvm.ir
import tvm.ir._ffi_api
from tvm.driver import build, lower
from tvm.runtime import Object
from tvm.target import GenericFunc, get_native_generic_func
from . import _make
def get(op_name):
"""Get the Op for a given name
Parameters
----------
op_name : str
The operator name
Returns
-------
op : Op
The op of the corresponding name
"""
return tvm.ir.Op.get(op_name)
def register(op_name, describe=""):
"""Get the Op for a given name.
when the op_name is not registered, create a new empty op with the given name.
when the op_name has been registered, abort with an error message.
Parameters
----------
op_name : str
The operator name
describe : Optional[str]
The operator description
"""
tvm.ir._ffi_api.RegisterOp(op_name, describe)
def register_stateful(op_name, stateful, level=10):
"""Register stateful flag for an op.
Parameters
----------
op_name : str
The name of the op.
stateful : bool
The stateful flag.
level : int
The priority level
"""
tvm.ir.register_op_attr(op_name, "TOpIsStateful", stateful, level)
class OpPattern(object):
"""Operator generic patterns
See Also
--------
topi.tag : Contains explanation of the tag type.
"""
# Elementwise operator
ELEMWISE = 0
# Broadcast operator
BROADCAST = 1
# Injective mapping
INJECTIVE = 2
# Communication
COMM_REDUCE = 3
# Complex op, can still fuse ewise into it
OUT_ELEMWISE_FUSABLE = 4
# Represents tuple node
TUPLE = 7
# Not fusable opaque op
OPAQUE = 8
@tvm._ffi.register_object("relay.OpImplementation")
class OpImplementation(Object):
"""Operator implementation"""
def compute(self, attrs, inputs, out_type):
"""Call compute function.
Parameters
----------
attrs : Attrs
Op attributes.
inputs : list[te.tensor.Tensor]
The input tensors.
out_type : relay.Type
The output type.
Returns
-------
outs : list[te.tensor.Tensor]
The output tensors.
"""
return _OpImplementationCompute(self, attrs, inputs, out_type)
def schedule(self, attrs, outs, target):
"""Call schedule function.
Parameters
----------
attrs : Attrs
Op attributes.
outs : list[te.tensor.Tensor]
The output tensors.
target : tvm.target.Target
The target to schedule the op.
Returns
-------
schedule : tvm.te.Schedule
The schedule.
"""
return _OpImplementationSchedule(self, attrs, outs, target)
@tvm._ffi.register_object("relay.OpSpecialization")
class OpSpecialization(Object):
"""Operator specialization"""
@tvm._ffi.register_object("relay.OpStrategy")
class OpStrategy(Object):
"""Operator strategy"""
def __init__(self):
self.__init_handle_by_constructor__(_make.OpStrategy)
def add_implementation(self, compute, schedule, name="default", plevel=10):
"""Add an implementation to the strategy
Parameters
----------
compute : function (attrs: Attrs, inputs: List[Tensor], out_type: Type)
-> List[Tensor]
The compute function.
schedule : function (attrs: Attrs, outs: List[Tensor], target:Target) -> Schedule
The schedule function.
name : str
The name of implementation.
plevel : int
The priority level of implementation.
"""
_OpStrategyAddImplementation(self, compute, schedule, name, plevel)
def _wrap_default_fstrategy(compute, schedule, name):
def _fstrategy(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(compute, schedule, name=name)
return strategy
return _fstrategy
def _create_fstrategy_from_schedule(op_name, schedule):
assert hasattr(schedule, "dispatch_dict")
compute = get(op_name).get_attr("FTVMCompute")
assert compute is not None, "FTVMCompute is not registered for op %s" % op_name
fstrategy = get_native_generic_func("{}_strategy".format(op_name))
name_pfx = schedule.__name__
name_pfx = name_pfx[name_pfx.index("_") + 1 :]
fstrategy.set_default(
_wrap_default_fstrategy(compute, schedule.fdefault, "%s.generic" % name_pfx)
)
for key, sch in schedule.dispatch_dict.items():
fstrategy.register(_wrap_default_fstrategy(compute, sch, "%s.%s" % (name_pfx, key)), [key])
return fstrategy
def register_compute(op_name, compute=None, level=10):
"""Register compute function for an op.
Parameters
----------
op_name : str
The name of the op.
compute : function (attrs: Attrs, inputs: List[Tensor], out_type: Type)
-> List[Tensor]
The compute function.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMCompute", compute, level)
def register_strategy(op_name, fstrategy=None, level=10):
"""Register strategy function for an op.
Parameters
----------
op_name : str
The name of the op.
fstrategy : function (attrs: Attrs, inputs: List[Tensor], out_type: Type,
target:Target) -> OpStrategy
The strategy function. Need to be native GenericFunc.
level : int
The priority level
"""
if not isinstance(fstrategy, GenericFunc):
assert hasattr(fstrategy, "generic_func_node")
fstrategy = fstrategy.generic_func_node
return tvm.ir.register_op_attr(op_name, "FTVMStrategy", fstrategy, level)
def register_schedule(op_name, schedule, level=10):
"""Register schedule function for an op.
This is used when compute function is the same for all targets and only
schedule is different. It requires FTVMCompute is already registered to
the op.
Parameters
----------
op_name : str
The name of the op.
schedule : function (attrs: Attrs, outs: List[Tensor], target:Target) -> Schedule
The schedule function. Need to be target.generic_func.
level : int
The priority level
"""
fstrategy = _create_fstrategy_from_schedule(op_name, schedule)
return register_strategy(op_name, fstrategy, level)
def register_injective_schedule(op_name, level=10):
"""Register injective schedule function for an op.
Parameters
----------
op_name : str
The name of the op.
level : int
The priority level
"""
return register_schedule(op_name, _schedule_injective, level)
def register_broadcast_schedule(op_name, level=10):
"""Register broadcast schedule function for an op.
Parameters
----------
op_name : str
The name of the op.
level : int
The priority level
"""
return register_schedule(op_name, _schedule_injective, level)
def register_reduce_schedule(op_name, level=10):
"""Register reduce schedule function for an op.
Parameters
----------
op_name : str
The name of the op.
level : int
The priority level
"""
return register_schedule(op_name, _schedule_reduce, level)
def register_alter_op_layout(op_name, alter_layout=None, level=10):
"""Register alter op layout function for an op
Parameters
----------
op_name : str
The name of the operator
alter_layout: function (attrs: Attrs, inputs: List[Expr]) -> new_expr: Expr
The function for changing the layout or replacing the operator
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMAlterOpLayout", alter_layout, level)
def register_convert_op_layout(op_name, convert_layout=None, level=10):
"""Register convert op layout function for an op
Parameters
----------
op_name : str
The name of the operator
convert_layout: function (attrs: Attrs, inputs: List[Expr]) -> new_expr: Expr
The function for changing the layout or replacing the operator
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMConvertOpLayout", convert_layout, level)
def register_infer_correct_layout(op_name, infer_layout=None, level=10):
"""Register infer op layout function for an op
Parameters
----------
op_name : str
The name of the operator
infer_layout: function (attrs: Attrs, inputs: List[Layout]) -> InferCorrectLayoutOutput
The function to infer correct layout
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FInferCorrectLayout", infer_layout, level)
def register_legalize(op_name, legal_op=None, level=10):
"""Register legal transformation function for an op
Parameters
----------
op_name : str
The name of the operator
legal_op: function (attrs: Attrs, inputs: List[Expr]) -> new_expr: Expr
The function for transforming an expr to another expr.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMLegalize", legal_op, level)
def register_pattern(op_name, pattern, level=10):
"""Register operator pattern for an op.
Parameters
----------
op_name : str
The name of the op.
pattern : int
The pattern being used.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "TOpPattern", pattern, level)
def register_gradient(op_name, fgradient=None, level=10):
"""Register operator gradient function for an op.
Parameters
----------
op_name : str
The name of the op.
fgradient : function (orig_expr : Expr, output_grad : Expr) -> new_expr : Expr
The gradient being used.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FPrimalGradient", fgradient, level)
def register_shape_func(op_name, data_dependent, shape_func=None, level=10):
"""Register operator shape function for an op.
Parameters
----------
op_name : str
The name of the op.
data_dependent : bool or list of bool
Whether the shape function depends on input data. If this is a list of bool,
the length of the list must be the same as the number of arguments of this op.
The list specifies per-input data dependence of the op.
shape_func : function (attrs: Attrs, inputs: List[Tensor], out_ndims: List[IndexExpr])
-> shape_tensors: List<Tensor>
The function for computing the dynamic output shapes
level : int
The priority level
"""
if not isinstance(data_dependent, list):
data_dependent = [data_dependent]
get(op_name).set_attr("TShapeDataDependent", data_dependent, level)
return tvm.ir.register_op_attr(op_name, "FShapeFunc", shape_func, level)
def register_external_compiler(op_name, fexternal=None, level=10):
"""Register the external compiler for an op.
Parameters
----------
op_name : str
The name of the operator.
fexternal : function (attrs: Attrs, args: List[Expr], compiler: str)
-> new_expr: Expr
The function for wrapping a call expr with compiler_begin and
compiler_end.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMExternalCompiler", fexternal, level)
def register_fake_quantization_to_integer(op_name, func=None, level=10):
"""Register quantize function for an op
Given an op and Affine Types on it's inputs, this function should return the op
in affine space/integer operators and the new type of the output, where affine
denotes the transformation x_real = (x_affine - zero_point) * scale
Parameters
----------
op_name : str
The name of the operator
func: function (expr: Expr, map: Map<Expr, AffineType>) -> new_expr: Expr
The function for translating the op into affine space and integer operators
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMFakeQuantizationToInteger", func, level)
def register_mixed_precision_conversion(op_name, func=None, level=10):
"""Register mixed precision conversion function for an op
Given an op the function should return information on how the value should be
converted. Specifically the function should take a call node and the target
mixed precision datatype (e.g. FP16) and return the conversion category
(see python/tvm/relay/transform/mixed_precision.py) as well as the accumulation
and output datatype of the operation in the mixed precision dtype space.
Parameters
----------
op_name : str
The name of the operator
func: function (call_node: relay.Call, target_dtype: string)
-> [conversion category, accumulation dtype, output dtype]: [int, string, string]
A function which given a call_node and target_dtype (e.g. FP16) returns the
conversion category and associated accumulation/output of the operation
when transformed into the mixed precision dtype space.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMMixedPrecisionConversionType", func, level)
@tvm._ffi.register_func("relay.op.compiler._lower")
def _lower(name, schedule, inputs, outputs):
return lower(schedule, list(inputs) + list(outputs), name=name)
@tvm._ffi.register_func("relay.op.compiler._build")
def _build(lowered_funcs):
return build(lowered_funcs, target="llvm")
_schedule_injective = None
_schedule_reduce = None
__DEBUG_COUNTER__ = 0
def debug(expr, debug_func=None):
"""The main entry point to the debugger."""
global __DEBUG_COUNTER__
if debug_func:
name = "debugger_func{}".format(__DEBUG_COUNTER__)
tvm._ffi.register_func(name, debug_func)
__DEBUG_COUNTER__ += 1
else:
name = ""
return _make.debug(expr, name)
tvm._ffi._init_api("relay.op", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/op_attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The attributes node used for Relay operators"""
from tvm.ir import Attrs
import tvm._ffi
@tvm._ffi.register_object("relay.attrs.Conv1DAttrs")
class Conv1DAttrs(Attrs):
"""Attributes for nn.conv1d"""
@tvm._ffi.register_object("relay.attrs.Conv2DAttrs")
class Conv2DAttrs(Attrs):
"""Attributes for nn.conv2d"""
@tvm._ffi.register_object("relay.attrs.Conv2DWinogradAttrs")
class Conv2DWinogradAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_without_weight_transform"""
@tvm._ffi.register_object("relay.attrs.Conv3DAttrs")
class Conv3DAttrs(Attrs):
"""Attributes for nn.conv3d"""
@tvm._ffi.register_object("relay.attrs.Conv3DWinogradAttrs")
class Conv3DWinogradAttrs(Attrs):
"""Attributes for nn.contrib_conv3d_winograd_without_weight_transform"""
@tvm._ffi.register_object("relay.attrs.ConvWinogradWeightTransformAttrs")
class ConvWinogradWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_convNd_winograd_weight_transform"""
@tvm._ffi.register_object("relay.attrs.Conv2DWinogradNNPACKWeightTransformAttrs")
class Conv2DWinogradNNPACKWeightTransformAttrs(Attrs):
"""Attributes for nn.contrib_conv2d_winograd_nnpack_weight_transform"""
@tvm._ffi.register_object("relay.attrs.GlobalPool2DAttrs")
class GlobalPool2DAttrs(Attrs):
"""Attributes for nn.global_pool"""
@tvm._ffi.register_object("relay.attrs.BiasAddAttrs")
class BiasAddAttrs(Attrs):
"""Atttribute of nn.bias_add"""
@tvm._ffi.register_object("relay.attrs.MatmulAttrs")
class MatmulAttrs(Attrs):
"""Attributes for nn.matmul"""
@tvm._ffi.register_object("relay.attrs.DenseAttrs")
class DenseAttrs(Attrs):
"""Attributes for nn.dense"""
@tvm._ffi.register_object("relay.attrs.DensePackAttrs")
class DensePackAttrs(Attrs):
"""Attributes for nn.contrib_dense_pack"""
@tvm._ffi.register_object("relay.attrs.BatchMatmulAttrs")
class BatchMatmulAttrs(Attrs):
"""Attributes for nn.batch_matmul"""
@tvm._ffi.register_object("relay.attrs.SoftmaxAttrs")
class SoftmaxAttrs(Attrs):
"""Attributes for nn.softmax"""
@tvm._ffi.register_object("relay.attrs.FIFOBufferAttrs")
class FIFOBufferAttrs(Attrs):
"""Attributes for nn.fifo_buffer"""
@tvm._ffi.register_object("relay.attrs.UpSamplingAttrs")
class UpSamplingAttrs(Attrs):
"""Attributes for nn.upsampling"""
@tvm._ffi.register_object("relay.attrs.UpSampling3DAttrs")
class UpSampling3DAttrs(Attrs):
"""Attributes for nn.upsampling3d"""
@tvm._ffi.register_object("relay.attrs.PadAttrs")
class PadAttrs(Attrs):
"""Attributes for nn.pad"""
@tvm._ffi.register_object("relay.attrs.MirrorPadAttrs")
class MirrorPadAttrs(Attrs):
"""Attributes for nn.mirror_pad"""
@tvm._ffi.register_object("relay.attrs.LeakyReluAttrs")
class LeakyReluAttrs(Attrs):
"""Attributes for nn.leaky_relu"""
@tvm._ffi.register_object("relay.attrs.PReluAttrs")
class PReluAttrs(Attrs):
"""Attributes for nn.prelu"""
@tvm._ffi.register_object("relay.attrs.DropoutAttrs")
class DropoutAttrs(Attrs):
"""Attributes for nn.dropout"""
@tvm._ffi.register_object("relay.attrs.BatchNormAttrs")
class BatchNormAttrs(Attrs):
"""Attributes for nn.batch_norm"""
@tvm._ffi.register_object("relay.attrs.LRNAttrs")
class LRNAttrs(Attrs):
"""Attributes for nn.lrn"""
@tvm._ffi.register_object("relay.attrs.L2NormalizeAttrs")
class L2NormalizeAttrs(Attrs):
"""Attributes for nn.l2_normalize"""
@tvm._ffi.register_object("relay.attrs.DeformableConv2DAttrs")
class DeformableConv2DAttrs(Attrs):
"""Attributes for nn.deformable_conv2d"""
@tvm._ffi.register_object("relay.attrs.Resize1DAttrs")
class Resize1DAttrs(Attrs):
"""Attributes for image.resize1d"""
@tvm._ffi.register_object("relay.attrs.Resize2DAttrs")
class Resize2DAttrs(Attrs):
"""Attributes for image.resize2d"""
@tvm._ffi.register_object("relay.attrs.Resize3DAttrs")
class Resize3DAttrs(Attrs):
"""Attributes used in resize3d operators"""
@tvm._ffi.register_object("relay.attrs.CropAndResizeAttrs")
class CropAndResizeAttrs(Attrs):
"""Attributes for image.crop_and_resize"""
@tvm._ffi.register_object("relay.attrs.Dilation2DAttrs")
class Dilation2DAttrs(Attrs):
"""Attributes for image.dilation2d"""
@tvm._ffi.register_object("relay.attrs.ArgsortAttrs")
class ArgsortAttrs(Attrs):
"""Attributes for algorithm.argsort"""
@tvm._ffi.register_object("relay.attrs.OnDeviceAttrs")
class OnDeviceAttrs(Attrs):
"""Attributes for annotation.on_device"""
@tvm._ffi.register_object("relay.attrs.DebugAttrs")
class DebugAttrs(Attrs):
"""Attributes for debug"""
@tvm._ffi.register_object("relay.attrs.CompilerAttrs")
class CompilerAttrs(Attrs):
"""Attributes for compiler"""
@tvm._ffi.register_object("relay.attrs.DeviceCopyAttrs")
class DeviceCopyAttrs(Attrs):
"""Attributes for annotation.device_copy"""
@tvm._ffi.register_object("relay.attrs.CastAttrs")
class CastAttrs(Attrs):
"""Attributes for transform.cast"""
@tvm._ffi.register_object("relay.attrs.ConcatenateAttrs")
class ConcatenateAttrs(Attrs):
"""Attributes for tensor.concatenate"""
@tvm._ffi.register_object("relay.attrs.TransposeAttrs")
class TransposeAttrs(Attrs):
"""Attributes for transform.transpose"""
@tvm._ffi.register_object("relay.attrs.ReshapeAttrs")
class ReshapeAttrs(Attrs):
"""Attributes for transform.reshape"""
@tvm._ffi.register_object("relay.attrs.ReshapeLikeAttrs")
class ReshapeLikeAttrs(Attrs):
"""Attributes for transform.reshape_like"""
@tvm._ffi.register_object("relay.attrs.GatherAttrs")
class GatherAttrs(Attrs):
"""Attributes for transform.gather"""
@tvm._ffi.register_object("relay.attrs.TakeAttrs")
class TakeAttrs(Attrs):
"""Attributes for transform.take"""
@tvm._ffi.register_object("relay.attrs.InitOpAttrs")
class InitOpAttrs(Attrs):
"""Attributes for ops specifying a tensor"""
@tvm._ffi.register_object("relay.attrs.ArangeAttrs")
class ArangeAttrs(Attrs):
"""Attributes used in arange operators"""
@tvm._ffi.register_object("relay.attrs.MeshgridAttrs")
class MeshgridAttrs(Attrs):
"""Attributes used in arange operators"""
@tvm._ffi.register_object("relay.attrs.StackAttrs")
class StackAttrs(Attrs):
"""Attributes used in stack operators"""
@tvm._ffi.register_object("relay.attrs.RepeatAttrs")
class RepeatAttrs(Attrs):
"""Attributes used in repeat operators"""
@tvm._ffi.register_object("relay.attrs.TileAttrs")
class TileAttrs(Attrs):
"""Attributes used in tile operators"""
@tvm._ffi.register_object("relay.attrs.ReverseAttrs")
class ReverseAttrs(Attrs):
"""Attributes used in reverse operators"""
@tvm._ffi.register_object("relay.attrs.ReverseSequenceAttrs")
class ReverseSequenceAttrs(Attrs):
"""Attributes used in reverse sequence operators"""
@tvm._ffi.register_object("relay.attrs.SqueezeAttrs")
class SqueezeAttrs(Attrs):
"""Attributes used in squeeze operators"""
@tvm._ffi.register_object("relay.attrs.SplitAttrs")
class SplitAttrs(Attrs):
"""Attributes for transform.split"""
@tvm._ffi.register_object("relay.attrs.StridedSliceAttrs")
class StridedSliceAttrs(Attrs):
"""Attributes for transform.stranded_slice"""
@tvm._ffi.register_object("relay.attrs.SliceLikeAttrs")
class SliceLikeAttrs(Attrs):
"""Attributes for transform.slice_like"""
@tvm._ffi.register_object("relay.attrs.ClipAttrs")
class ClipAttrs(Attrs):
"""Attributes for transform.clip"""
@tvm._ffi.register_object("relay.attrs.LayoutTransformAttrs")
class LayoutTransformAttrs(Attrs):
"""Attributes for transform.layout_transform"""
@tvm._ffi.register_object("relay.attrs.ShapeOfAttrs")
class ShapeOfAttrs(Attrs):
"""Attributes for tensor.shape_of"""
@tvm._ffi.register_object("relay.attrs.MultiBoxPriorAttrs")
class MultiBoxPriorAttrs(Attrs):
"""Attributes for vision.multibox_prior"""
@tvm._ffi.register_object("relay.attrs.MultiBoxTransformLocAttrs")
class MultiBoxTransformLocAttrs(Attrs):
"""Attributes for vision.multibox_transform_loc"""
@tvm._ffi.register_object("relay.attrs.GetValidCountsAttrs")
class GetValidCountsAttrs(Attrs):
"""Attributes for vision.get_valid_counts"""
@tvm._ffi.register_object("relay.attrs.NonMaximumSuppressionAttrs")
class NonMaximumSuppressionAttrs(Attrs):
"""Attributes for vision.non_maximum_suppression"""
@tvm._ffi.register_object("relay.attrs.AllClassNonMaximumSuppressionAttrs")
class AllClassNonMaximumSuppressionAttrs(Attrs):
"""Attributes for vision.all_classnon_maximum_suppression"""
@tvm._ffi.register_object("relay.attrs.ROIAlignAttrs")
class ROIAlignAttrs(Attrs):
"""Attributes for vision.roi_align"""
@tvm._ffi.register_object("relay.attrs.ROIPoolAttrs")
class ROIPoolAttrs(Attrs):
"""Attributes for vision.roi_pool"""
@tvm._ffi.register_object("relay.attrs.YoloReorgAttrs")
class YoloReorgAttrs(Attrs):
"""Attributes for vision.yolo_reorg"""
@tvm._ffi.register_object("relay.attrs.ProposalAttrs")
class ProposalAttrs(Attrs):
"""Attributes used in proposal operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool2DAttrs")
class MaxPool2DAttrs(Attrs):
"""Attributes used in max_pool2d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool2DAttrs")
class AvgPool2DAttrs(Attrs):
"""Attributes used in avg_pool2d operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool1DAttrs")
class MaxPool1DAttrs(Attrs):
"""Attributes used in max_pool1d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool1DAttrs")
class AvgPool1DAttrs(Attrs):
"""Attributes used in avg_pool1d operators"""
@tvm._ffi.register_object("relay.attrs.MaxPool3DAttrs")
class MaxPool3DAttrs(Attrs):
"""Attributes used in max_pool3d operators"""
@tvm._ffi.register_object("relay.attrs.AvgPool3DAttrs")
class AvgPool3DAttrs(Attrs):
"""Attributes used in avg_pool3d operators"""
@tvm._ffi.register_object("relay.attrs.BitPackAttrs")
class BitPackAttrs(Attrs):
"""Attributes used in bitpack operator"""
@tvm._ffi.register_object("relay.attrs.BinaryConv2DAttrs")
class BinaryConv2DAttrs(Attrs):
"""Attributes used in bitserial conv2d operators"""
@tvm._ffi.register_object("relay.attrs.BinaryDenseAttrs")
class BinaryDenseAttrs(Attrs):
"""Attributes used in bitserial dense operators"""
@tvm._ffi.register_object("relay.attrs.Conv2DTransposeAttrs")
class Conv2DTransposeAttrs(Attrs):
"""Attributes used in Transposed Conv2D operators"""
@tvm._ffi.register_object("relay.attrs.Conv3DTransposeAttrs")
class Conv3DTransposeAttrs(Attrs):
"""Attributes used in Transposed Conv3D operators"""
@tvm._ffi.register_object("relay.attrs.DilateAttrs")
class DilateAttrs(Attrs):
"""Attributes used in dilate operators"""
@tvm._ffi.register_object("relay.attrs.SubPixelAttrs")
class SubPixelAttrs(Attrs):
"""Attributes used in depth to space and space to depth operators"""
@tvm._ffi.register_object("relay.attrs.CorrelationAttrs")
class CorrelationAttrs(Attrs):
"""Attributes used in correlation operators"""
@tvm._ffi.register_object("relay.attrs.AdaptivePool2DAttrs")
class AdaptivePool2DAttrs(Attrs):
"""Attributes used in 2D adaptive pooling operators"""
@tvm._ffi.register_object("relay.attrs.AdaptivePool3DAttrs")
class AdaptivePool3DAttrs(Attrs):
"""Attributes used in 3D adaptive pooling operators"""
@tvm._ffi.register_object("relay.attrs.AffineGridAttrs")
class AffineGridAttrs(Attrs):
"""Attributes used in affine_grid operators"""
@tvm._ffi.register_object("relay.attrs.AllocStorageAttrs")
class AllocStorageAttrs(Attrs):
"""Attributes used in alloc_storage operators"""
@tvm._ffi.register_object("relay.attrs.AllocTensorAttrs")
class AllocTensorAttrs(Attrs):
"""Attributes used in alloc_tensor operators"""
@tvm._ffi.register_object("relay.attrs.CastHintAttrs")
class CastHintAttrs(Attrs):
"""Attributes used in cast_hint annotation operators"""
@tvm._ffi.register_object("relay.attrs.Conv1DTransposeAttrs")
class Conv1DTransposeAttrs(Attrs):
"""Attributes used in 1D transposed convolution operators"""
@tvm._ffi.register_object("relay.attrs.ExpandDimsAttrs")
class ExpandDimsAttrs(Attrs):
"""Attributes used in expand_dims operators"""
@tvm._ffi.register_object("relay.attrs.GridSampleAttrs")
class GridSampleAttrs(Attrs):
"""Attributes used in grid_sample operators"""
@tvm._ffi.register_object("relay.attrs.GroupNormAttrs")
class GroupNormAttrs(Attrs):
"""Attributes used in group norm operators"""
@tvm._ffi.register_object("relay.attrs.InstanceNormAttrs")
class InstanceNormAttrs(Attrs):
"""Attributes used in instance norm operators"""
@tvm._ffi.register_object("relay.attrs.LayerNormAttrs")
class LayerNormAttrs(Attrs):
"""Attributes used in layer norm operators"""
@tvm._ffi.register_object("relay.attrs.NdarraySizeAttrs")
class NdarraySizeAttrs(Attrs):
"""Attributes used in ndarray_size operators"""
@tvm._ffi.register_object("relay.attrs.OneHotAttrs")
class OneHotAttrs(Attrs):
"""Attributes used in one_hot operators"""
@tvm._ffi.register_object("relay.attrs.BroadcastAttrs")
class BroadcastAttrs(Attrs):
"""Attributes used in broadcast operators"""
@tvm._ffi.register_object("relay.attrs.QuantizeAttrs")
class QuantizeAttrs(Attrs):
"""Attributes used in quantize operators"""
@tvm._ffi.register_object("relay.attrs.DequantizeAttrs")
class DequantizeAttrs(Attrs):
"""Attributes used in dequantize operators"""
@tvm._ffi.register_object("relay.attrs.ReduceAttrs")
class ReduceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. sum)"""
@tvm._ffi.register_object("relay.attrs.ArgReduceAttrs")
class ArgReduceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. argmin/argmax)"""
@tvm._ffi.register_object("relay.attrs.VarianceAttrs")
class VarianceAttrs(Attrs):
"""Attributes used in reduction operators (e.g. sum)"""
@tvm._ffi.register_object("relay.attrs.RequantizeAttrs")
class RequantizeAttrs(Attrs):
"""Attributes used in requantize operators"""
@tvm._ffi.register_object("relay.attrs.ScatterAttrs")
class ScatterAttrs(Attrs):
"""Attributes used in scatter operators"""
@tvm._ffi.register_object("relay.attrs.SequenceMaskAttrs")
class SequenceMaskAttrs(Attrs):
"""Attributes used in sequence_mask operators"""
@tvm._ffi.register_object("relay.attrs.ShapeFuncAttrs")
class ShapeFuncAttrs(Attrs):
"""Attributes used in shape func operators"""
@tvm._ffi.register_object("relay.attrs.SimulatedQuantizeAttrs")
class SimulatedQuantizeAttrs(Attrs):
"""Attributes used in simulated_quantize operators"""
@tvm._ffi.register_object("relay.attrs.SparseDenseAttrs")
class SparseDenseAttrs(Attrs):
"""Attributes used in sparse_dense operators"""
@tvm._ffi.register_object("relay.attrs.SparseToDenseAttrs")
class SparseToDenseAttrs(Attrs):
"""Attributes used in sparse_to_dense operators"""
@tvm._ffi.register_object("relay.attrs.SparseTransposeAttrs")
class SparseTransposeAttrs(Attrs):
"""Attributes used in sparse_transpose operators"""
@tvm._ffi.register_object("relay.attrs.SparseConv2DAttrs")
class SparseConv2DAttrs(Attrs):
"""Attributes used in sparse_conv2d operators"""
@tvm._ffi.register_object("relay.attrs.TopkAttrs")
class TopkAttrs(Attrs):
"""Attributes used in topk operators"""
@tvm._ffi.register_object("relay.attrs.SearchSortedAttrs")
class SearchSortedAttrs(Attrs):
"""Attributes used in searchsorted operators"""
@tvm._ffi.register_object("relay.attrs.TupleGetItemAttrs")
class TupleGetItemAttrs(Attrs):
"""Attributes used in tuple item access operators"""
@tvm._ffi.register_object("relay.attrs.WithFuncIdAttrs")
class WithFuncIdAttrs(Attrs):
"""Attributes used in with_funcid annotation operators"""
@tvm._ffi.register_object("relay.attrs.SpaceToBatchNDAttrs")
class SpaceToBatchNDAttrs(Attrs):
"""Attributes used in SpaceToBatchND operators"""
@tvm._ffi.register_object("relay.attrs.BatchToSpaceNDAttrs")
class BatchToSpaceNDAttrs(Attrs):
"""Attributes used in BatchToSpaceNDAttrs operators"""
@tvm._ffi.register_object("relay.attrs.ThreefryGenerateAttrs")
class ThreefryGenerateAttrs(Attrs):
"""Attributes used in ThreefryGenerateAttrs operators"""
@tvm._ffi.register_object("relay.attrs.UniformAttrs")
class UniformAttrs(Attrs):
"""Attributes used in UniformAttrs operators"""
@tvm._ffi.register_object("relay.attrs.NLLLossAttrs")
class NLLLossAttrs(Attrs):
"""Attributes for nn.nll_loss"""
@tvm._ffi.register_object("relay.attrs.FixedPointMultiplyAttrs")
class FixedPointMultiplyAttrs(Attrs):
"""Attributes used in fixed_point_multiply operators"""
@tvm._ffi.register_object("relay.attrs.TriluAttrs")
class TriluAttrs(Attrs):
"""Attributes used in trilu operators"""
@tvm._ffi.register_object("relay.attrs.SlidingWindowAttrs")
class SlidingWindowAttrs(Attrs):
"""Attributes used in sliding_window operators"""
@tvm._ffi.register_object("relay.attrs.DynExpandDimsAttrs")
class DynExpandDimsAttrs(Attrs):
"""Attributes used in dynamic expand_dims operators"""
@tvm._ffi.register_object("relay.attrs.ScatterAddAttrs")
class ScatterAddAttrs(Attrs):
"""Attributes used in scatter_add operators"""
@tvm._ffi.register_object("relay.attrs.ScatterNDAttrs")
class ScatterNDAttrs(Attrs):
"""Attributes used in scatter_nd operators"""
@tvm._ffi.register_object("relay.attrs.GatherNDAttrs")
class GatherNDAttrs(Attrs):
"""Attributes used in gather_nd operators"""
@tvm._ffi.register_object("relay.attrs.AutoSchedulerLayoutTransformAttrs")
class AutoSchedulerLayoutTransformAttrs(Attrs):
"""Attributes used in AutoSchedulerLayoutTransform operators"""
@tvm._ffi.register_object("relay.attrs.MetaScheduleLayoutTransformAttrs")
class MetaScheduleLayoutTransformAttrs(Attrs):
"""Attributes used in MetaScheduleLayoutTransform operators"""
@tvm._ffi.register_object("relay.attrs.MatrixSetDiagAttrs")
class MatrixSetDiagAttrs(Attrs):
"""Attributes used in matrix_set_diag operators"""
@tvm._ffi.register_object("relay.attrs.ScanopAttrs")
class ScanopAttrs(Attrs):
"""Attributes used in cumsum and cumprod operators"""
@tvm._ffi.register_object("relay.attrs.UniqueAttrs")
class UniqueAttrs(Attrs):
"""Attributes used in unique operators"""
@tvm._ffi.register_object("relay.attrs.EinsumAttrs")
class EinsumAttrs(Attrs):
"""Attributes used in einsum operators"""
@tvm._ffi.register_object("relay.attrs.StftAttrs")
class StftAttrs(Attrs):
"""Attributes used in stft operators"""
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/random/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""PRNG related operators."""
from .kernel import *
from . import _kernel
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/random/_kernel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Splittable and parallelizable PRNG kernels."""
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import
from .. import strategy
from ..op import register_strategy, register_pattern, OpPattern
# Threefry
register_strategy("random.threefry_generate", strategy.threefry_generate_strategy)
register_pattern("random.threefry_generate", OpPattern.OPAQUE)
register_strategy("random.threefry_split", strategy.threefry_split_strategy)
register_pattern("random.threefry_split", OpPattern.OPAQUE)
# Distribution
register_strategy("random.uniform", strategy.uniform_strategy)
register_pattern("random.uniform", OpPattern.OPAQUE)
register_strategy("random.normal", strategy.normal_strategy)
register_pattern("random.normal", OpPattern.OPAQUE)
register_strategy("random.multinomial", strategy.multinomial_strategy)
register_pattern("random.multinomial", OpPattern.OPAQUE)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/random/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.random._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/random/kernel.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Splittable and parallelizable PRNG kernels."""
# pylint: disable=invalid-name,unused-argument
from __future__ import absolute_import
import sys
import numpy as np
from ...expr import Constant, Expr, const
from .... import nd
from . import _make
def threefry_key(seed):
"""Create a new Threefry random number generator key.
Example
-------
.. code-block:: python
gen = threefry_key(0)
_, random_number = threefry_generate(gen, (4,))
Parameters
----------
seed : int
Starting seed for the key
Returns
-------
key : relay.Expr
New key to pass to future uses of :py:func:`threefry_split` or
:py:func:`threefry_generate`.
"""
s = np.frombuffer(seed.to_bytes(32, sys.byteorder), dtype="uint64")
a = np.concatenate((s, np.array([0, 0, 0, 0, 1 << 63, 0], dtype="uint64")))
return Constant(nd.array(a))
def threefry_generate(key, shape):
"""Generate an array of random bits (`uint64`) using the Threefry algorithm
Example
-------
.. code-block:: python
key = threefry_key(0)
new_key, random1 = threefry_generate(key, (4,))
_, random2 = threefry_generate(new_key, (4,))
# random1 and random2 are different random numbers
Parameters
----------
key : relay.Expr
key that uniquely determines the random values. Multiple uses with the
same key will generate the same random values. This key should be
treated as an opaque pointer. You can create one from calling
:py:func:`threefry_key`, :py:func:`threefry_split`, or
:py:func:`threefry_generate`. **Do not use this key again after calling
this function.**
shape : Sequence[int]
Desired outputs shape of random numbers.
Returns
-------
new_key : relay.Expr
New key to pass to future uses of :py:func:`threefry_split` or
:py:func:`threefry_generate`.
random_array : relay.Expr
Array of random numbers. Has shape `shape`.
"""
return _make.threefry_generate(key, shape)
def threefry_split(key):
"""Split an existing Threefry key into two new ones.
This is useful if you have to subsequent calls which each need their own
independent random number generation.
Example
-------
.. code-block:: python
def foo(key):
new_key, num = threefry_generate(key, (4,))
return num
key = threefry_key(0)
key1, key2 = threefry_split(key)
assert foo(key1) != foo(key2)
Parameters
----------
key : relay.Expr
key that uniquely determines the random values. Multiple uses with the
same generator will generate the same random values. This generator should be
treated as an opaque pointer. You can create one from calling
:py:func:`threefry_key`, :py:func:`threefry_split`, or
:py:func:`threefry_generate`. **Do not use this generator again after calling
this function.**
Returns
-------
new_key_1 : relay.Expr
New key to pass to future uses of :py:func:`threefry_split` or
:py:func:`threefry_generate`.
new_key_2 : relay.Expr
New key to pass to future uses of :py:func:`threefry_split` or
:py:func:`threefry_generate`.
"""
return _make.threefry_split(key)
def uniform(key, shape, dtype="float32", low=0.0, high=1.0):
"""Draw samples from a uniform distribution.
Samples are uniformly distributed over the half-open interval [low, high)
(includes low, but excludes high). In other words, any value within the
given interval is equally likely to be drawn by uniform.
Example
-------
.. code-block:: python
key = threefry_key(0)
key, random_values = uniform(key, (100,), low=0, high=10)
Parameters
----------
key : relay.Expr
key that uniquely determines the random values. Multiple uses with the
same generator will generate the same random values. This generator should be
treated as an opaque pointer. You can create one from calling
:py:func:`threefry_key`, :py:func:`threefry_split`, or
:py:func:`threefry_generate`. **Do not use this generator again after calling
this function.**
shape : Sequence[int]
Desired outputs shape of random numbers.
dtype : str
Desired outputs type of random numbers.
low : float or relay.Expr, optional
Lower bound of the uniform distribution.
high : float or relay.Expr, optional
Upper bound of the uniform distribution.
Returns
-------
new_key : relay.Expr
New random key to pass to future uses of random functions.
random_values : relay.Expr
The generated uniform distributed random numbers.
"""
if not isinstance(low, Expr):
low = const(low, dtype=dtype)
if not isinstance(high, Expr):
high = const(high, dtype=dtype)
return _make.uniform(key, low, high, shape, dtype)
def normal(key, shape, dtype="float32", mean=0.0, scale=1.0):
"""Draw samples from a normal distribution.
Example
-------
.. code-block:: python
key = threefry_key(0)
key, random_values = normal(key, (100,), low=0, high=10)
Parameters
----------
key : relay.Expr
key that uniquely determines the random values. Multiple uses with the
same generator will generate the same random values. This generator should be
treated as an opaque pointer. You can create one from calling
:py:func:`threefry_key`, :py:func:`threefry_split`, or
:py:func:`threefry_generate`. **Do not use this generator again after calling
this function.**
shape : Sequence[int]
Desired outputs shape of random numbers.
dtype : str
Desired outputs type of random numbers.
low : float or relay.Expr, optional
Mean of the normal distribution.
high : float or relay.Expr, optional
Standard deviation of the normal distribution.
Returns
-------
new_key : relay.Expr
New random key to pass to future uses of random functions.
random_values : relay.Expr
The generated normal distributed random numbers.
"""
if not isinstance(mean, Expr):
mean = const(mean, dtype=dtype)
if not isinstance(scale, Expr):
scale = const(scale, dtype=dtype)
return _make.normal(key, mean, scale, shape, dtype)
def multinomial(key, probs, num_samples):
"""Draw samples from a multinomial distribution.
Example
-------
.. code-block:: python
key = threefry_key(0)
key, random_indices = multinomial(key, (3, 5, 10), num_samples=2)
Parameters
----------
key : relay.Expr
key that uniquely determines the random values. Multiple uses with the
same generator will generate the same random values. This generator should be
treated as an opaque pointer. You can create one from calling
:py:func:`threefry_key`, :py:func:`threefry_split`, or
:py:func:`threefry_generate`. **Do not use this generator again after calling
this function.**
probs: relay.Expr
Array containing the probabilities of returning each respective index.
If a tensor is provided, the last dimension is treated independently.
Negative values in this tensor will be clipped to zero to
represent they have no chance of being selected.
num_samples : int
Number of samples to return
Returns
-------
new_key : relay.Expr
New random key to pass to future uses of random functions.
random_indices : relay.Expr
The generated indices.
"""
return _make.multinomial(key, probs, num_samples)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Reduce operators."""
# pylint: disable=redefined-builtin
from ..expr import Tuple, TupleWrapper
from . import _make
from .tensor import exp, log, sqrt
from .transform import squeeze
def argmax(data, axis=None, keepdims=False, exclude=False, select_last_index=False):
"""Returns the indices of the maximum values along an axis.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a argmax operation is performed.
The default, axis=None, will find the indices of the maximum element of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
select_last_index : bool
Whether to select the last index or the first index if the max element appears in
multiple indices, default is False (first index).
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.argmax(data, axis, keepdims, exclude, select_last_index)
def argmin(data, axis=None, keepdims=False, exclude=False, select_last_index=False):
"""Returns the indices of the minimum values along an axis.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a argmin operation is performed.
The default, axis=None, will find the indices of minimum element all of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
select_last_index : bool
Whether to select the last index or the first index if the min element appears in
multiple indices, default is False (first index).
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.argmin(data, axis, keepdims, exclude, select_last_index)
def sum(data, axis=None, keepdims=False, exclude=False):
"""Computes the sum of array elements over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a sum is performed. The default, axis=None,
will sum all of the elements of the input array. If axis is
negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.sum(data, axis, keepdims, exclude)
def all(data, axis=None, keepdims=False, exclude=False):
"""Computes the logical AND of boolean array elements over given axes.
Parameters
----------
data : relay.Expr
The input boolean tensor
axis : None or int or tuple of int
Axis or axes along which a sum is performed. The default, axis=None,
will sum all of the elements of the input array. If axis is
negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data = relay.Constant(tvm.nd.array([[[ True, True, True],
[ True, True, True],
[False, True, False]],
[[ True, False, False],
[ True, True, False],
[False, True, True]]]))
relay.all(data, axis=1)
# [[False, True, False],
# [False, False, False]]
relay.all(data, axis=0)
# [[ True, False, False],
# [ True, True, False],
# [False, True, False]]
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.all(data, axis, keepdims, exclude)
def any(data, axis=None, keepdims=False, exclude=False):
"""Computes the logical OR of boolean array elements over given axes.
Parameters
----------
data : relay.Expr
The input boolean tensor
axis : None or int or tuple of int
Axis or axes along which a sum is performed. The default, axis=None,
will sum all of the elements of the input array. If axis is
negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data = relay.Constant(tvm.nd.array([[[ True, True, True],
[ True, True, True],
[False, True, False]],
[[ True, False, False],
[ True, True, False],
[False, True, True]]]))
relay.any(data, axis=1)
# [[True, True, True],
# [True, True, True]]
relay.any(data, axis=0)
# [[ True, True, True],
# [ True, True, True],
# [False, True, True]]
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.any(data, axis, keepdims, exclude)
def max(data, axis=None, keepdims=False, exclude=False):
"""Computes the max of array elements over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which the max operation is performed.
The default, axis=None, will find the max element from all of the elements of the input
array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.max(data, axis, keepdims, exclude)
def min(data, axis=None, keepdims=False, exclude=False):
"""Computes the min of array elements over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a minimum operation is performed.
The default, axis=None, will find the minimum element from all
of the elements of the input array. If axis is negative it counts from
the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.min(data, axis, keepdims, exclude)
def mean(data, axis=None, keepdims=False, exclude=False):
"""Computes the mean of array elements over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a mean operation is performed.
The default, axis=None, will compute the mean of all elements in the input array.
If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.mean(data, axis, keepdims, exclude)
def variance(data, axis=None, keepdims=False, exclude=False, unbiased=False, with_mean=None):
"""Computes the variance of data over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a variance operation is performed.
The default, axis=None, will compute the variance of all elements in the input array.
If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
unbiased : bool
If this is set to True, the unbiased estimation will be used.
with_mean : Optional[relay.Expr]
To compute variance given an already computed mean
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
m = mean(data, axis, True, exclude) if with_mean is None else with_mean
return _make._variance(data, m, axis, keepdims, exclude, unbiased)
def std(data, axis=None, keepdims=False, exclude=False, unbiased=False):
"""Computes the standard deviation of data over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a standard deviation operation is performed.
The default, axis=None, will compute the standard deviation of all elements in the
input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
unbiased : bool
If this is set to True, the unbiased estimation will be used.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
m = mean(data, axis, True, exclude)
return sqrt(_make._variance(data, m, axis, keepdims, exclude, unbiased))
def mean_variance(data, axis=None, keepdims=False, exclude=False, unbiased=False):
"""Computes the mean and variance of data over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a mean and variance operation is performed.
The default, axis=None, will compute the mean and variance of all elements in
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
unbiased : bool
If this is set to True, the unbiased estimation will be used.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
m = mean(data, axis, True, exclude)
var = _make._variance(data, m, axis, keepdims, exclude, unbiased)
if not keepdims:
m = squeeze(m, axis=axis)
return TupleWrapper(Tuple((m, var)), 2)
def mean_std(data, axis=None, keepdims=False, exclude=False):
"""Computes the mean and standard deviation of data over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a mean and standard deviation operation is performed.
The default, axis=None, will compute the mean and standard deviation of all elements in
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
m = mean(data, axis, True, exclude)
s = sqrt(_make._variance(data, m, axis, keepdims, exclude, False))
if not keepdims:
m = squeeze(m)
return TupleWrapper(Tuple((m, s)), 2)
def prod(data, axis=None, keepdims=False, exclude=False):
"""Computes the products of array elements over given axes.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a product is performed.
The default, axis=None, will find the indices of minimum element all of the elements of
the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
With this option, the result will broadcast correctly against the input array.
exclude : bool
If `exclude` is true, reduction will be performed on the axes that are
NOT in axis instead.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
return _make.prod(data, axis, keepdims, exclude)
def logsumexp(data, axis=None, keepdims=False):
"""Compute the log of the sum of exponentials of input elements over given axes.
This function is more numerically stable than log(sum(exp(input))).
It avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
Parameters
----------
data : relay.Expr
The input data
axis : None or int or tuple of int
Axis or axes along which a standard deviation operation is performed.
The default, axis=None, will compute the log of the sum of exponentials of all elements
in the input array. If axis is negative it counts from the last to the first axis.
keepdims : bool
If this is set to True, the axes which are reduced are left in the result as dimensions
with size one.
Returns
-------
result : relay.Expr
The computed result.
"""
axis = [axis] if isinstance(axis, int) else axis
max_x = max(data, axis, True)
exp_x = exp(data - max_x)
sum_x = sum(exp_x, axis, True)
out_x = log(sum_x) + max_x
if not keepdims:
out_x = squeeze(out_x, axis)
return out_x
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Relay op strategies."""
from __future__ import absolute_import as _abs
from .generic import *
from . import x86
from . import arm_cpu
from . import cuda
from . import hls
from . import mali
from . import bifrost
from . import rocm
from . import intel_graphics
from . import hexagon
from . import adreno
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/adreno.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of adreno operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
@conv2d_NCHWc_strategy.register("adreno")
@conv2d_strategy.register("adreno")
def conv2d_strategy_adreno(attrs, inputs, out_type, target):
"""conv2d adreno strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
stride_h, stride_w = attrs.get_int_tuple("strides")
groups = attrs.groups
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if (
(data_layout == "NCHW" and kernel_layout == "OIHW")
or (data_layout == "NCHW4c" and kernel_layout == "OIHW4o")
or (data_layout == "NCHW" and kernel_layout == "OIHW4o")
):
if len(kernel.shape) == 4:
_, _, kh, kw = get_const_tuple(kernel.shape)
else:
_, _, kh, kw, _ = get_const_tuple(kernel.shape)
if (
(2 < kh < 8 and 2 < kw < 8 and kh == kw)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
and not (data_layout == "NCHW" and kernel_layout == "OIHW4o")
):
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nchw_winograd),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.image2d",
plevel=5,
)
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nchwc),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nchwc),
name="conv2d_nchwc.image2d",
plevel=10,
)
elif (
(data_layout == "NHWC" and kernel_layout == "HWIO")
or (data_layout == "NHWC4c" and kernel_layout == "HWIO4o")
or (data_layout == "NHWC" and kernel_layout == "HWIO4o")
):
if len(kernel.shape) == 4:
kh, kw, _, _ = get_const_tuple(kernel.shape)
else:
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if (
(2 < kh < 8 and 2 < kw < 8 and kh == kw)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
and not (data_layout == "NHWC" and kernel_layout == "HWIO4o")
):
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nhwc_winograd),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nhwc_winograd),
name="conv2d_nhwc_winograd.image2d",
plevel=5,
)
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nhwc),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nhwc),
name="conv2d_nhwc.image2d",
plevel=10,
)
else:
raise RuntimeError(
"Layout not supported: ("
+ data_layout
+ ", "
+ kernel_layout
+ ") - only support NCHW4c / OIHW4o and NHWC / HWOI layouts for conv2d"
)
else:
# cannot use is_depthwise_conv2d because it does not know about NHWC4c/HWOI4o layouts
if data_layout == "NCHW":
ic = data.shape[1]
elif data_layout == "NCHW4c":
ic = data.shape[1] * data.shape[4]
elif data_layout == "NHWC":
ic = data.shape[3]
elif data_layout == "NHWC4c":
ic = data.shape[3] * data.shape[4]
else:
raise RuntimeError("Unsupported depthwise_conv2d data layout {}".format(data_layout))
if kernel_layout == "OIHW":
oc = kernel.shape[0]
elif kernel_layout == "OIHW4o":
oc = kernel.shape[0] * kernel.shape[4]
elif kernel_layout == "HWOI":
oc = kernel.shape[2]
elif kernel_layout == "HWOI4o":
oc = kernel.shape[2] * kernel.shape[4]
else:
raise RuntimeError(
"Unsupported depthwise_conv2d kernel layout {}".format(kernel_layout)
)
if ic == oc == groups:
if (data_layout == "NCHW" and kernel_layout == "OIHW") or (
data_layout == "NCHW4c" and kernel_layout == "OIHW4o"
):
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.depthwise_conv2d_nchwc),
wrap_topi_schedule(topi.adreno.schedule_depthwise_conv2d_nchwc),
name="depthwise_conv2d_nchwc.image2d",
plevel=10,
)
elif (data_layout == "NHWC" and kernel_layout == "HWOI") or (
data_layout == "NHWC4c" and kernel_layout == "HWOI4o"
):
if data.shape[-1] >= 4:
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.adreno.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.image2d",
plevel=10,
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.cuda",
)
else:
raise RuntimeError(
"Layout not supported: ("
+ data_layout
+ ", "
+ kernel_layout
+ ") - only support NCHW4c / OIHW4o and NHWC / HWOI layouts for conv2d"
)
else:
raise RuntimeError("General group convolution is not currently supported")
return strategy
@conv2d_winograd_without_weight_transform_strategy.register("adreno")
def conv2d_winograd_without_weight_transform_strategy_adreno(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform adreno strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout in ("NCHW", "NCHW4c"):
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nchw_winograd_without_weight_transform),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nchw_winograd_without_weight_transform),
name="conv2d_nchw_winograd_without_weight_transform.image2d",
plevel=5,
)
elif layout in ("NHWC", "NHWC4c"):
strategy.add_implementation(
wrap_compute_conv2d(topi.adreno.conv2d_nhwc_winograd_without_weight_transform),
wrap_topi_schedule(topi.adreno.schedule_conv2d_nhwc_winograd_without_weight_transform),
name="conv2d_nhwc_winograd_without_weight_transform.image2d",
plevel=5,
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@schedule_pool.register("adreno")
def schedule_pool_adreno(attrs, outs, target):
"""schedule pooling ops for adreno"""
with target:
if attrs.layout == "NCHW4c":
return topi.adreno.schedule_pool(outs, attrs.layout)
return topi.cuda.schedule_pool(outs, attrs.layout)
@schedule_injective.register(["adreno"])
def schedule_injective_adreno(attrs, outs, target):
"""schedule injective ops for adreno"""
with target:
return topi.adreno.schedule_injective(outs)
@schedule_reduce.register(["adreno"])
def schedule_reduce_adreno(attrs, outs, target):
"""schedule reduction ops for adreno GPU"""
with target:
return topi.adreno.schedule_reduce(outs)
@concatenate_strategy.register(["adreno"])
def concatenate_strategy_adreno(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_concat(topi.transform.concatenate),
wrap_topi_schedule(topi.adreno.schedule_injective),
name="concatenate.adreno",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/arm_cpu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of ARM CPU operator strategy."""
from functools import reduce
import logging
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
from tvm import relay, topi
from ....auto_scheduler import is_auto_scheduler_enabled
from ....meta_schedule import is_meta_schedule_enabled
from ....topi.generic import conv2d as conv2d_generic
from .. import op as _op
from .generic import *
logger = logging.getLogger("strategy")
@schedule_reduce.register("arm_cpu")
def schedule_reduce_cpu(attrs, outs, target):
"""schedule reduction ops for arm_cpu"""
with target:
return topi.x86.schedule_reduce(outs)
@schedule_injective.register("arm_cpu")
def schedule_injective_arm_cpu(_, outs, target):
"""schedule injective ops for arm cpu"""
with target:
return topi.arm_cpu.schedule_injective(outs)
@schedule_concatenate.register("arm_cpu")
def schedule_concatenate_arm_cpu(_, outs, target):
"""schedule concatenate for arm cpu"""
with target:
return topi.arm_cpu.schedule_concatenate(outs)
@schedule_pool.register(["arm_cpu"])
def schedule_pool_arm_cpu(attrs, outs, target):
"""schedule pooling ops arm cpu"""
layout = attrs.layout
avg_pool = isinstance(attrs, relay.op.op_attrs.AvgPool2DAttrs)
with target:
if (
avg_pool
and target.features.has_dsp
and layout in ("NCW", "NCHW")
or not avg_pool
and target.features.has_dsp
and layout in ("NWC", "NHWC")
):
return topi.arm_cpu.schedule_pool(outs, layout)
logger.warning("pool is not optimized for arm cpu.")
return topi.generic.schedule_pool(outs, layout)
def _get_padding_width(padding):
assert isinstance(padding, tuple)
if len(padding) == 2:
_, (pad_left, pad_right) = padding
else:
_, pad_left, _, pad_right = padding
return pad_left + pad_right
def _is_simd_aligned(dtype, dimensions, padding=None):
if padding:
assert len(dimensions) == len(padding)
padded_dims = (sum(x) for x in zip(dimensions, padding))
else:
padded_dims = dimensions
# Multiply all elements of padded_dims together. We can't use math.prod, as it
# does not exist in Python 3.7.
size = reduce(lambda x, y: x * y, padded_dims)
return (
(dtype == "int8" and size % 4 == 0)
or (dtype == "int16" and size % 2 == 0)
or (dtype == "int32")
)
@conv2d_strategy.register("arm_cpu")
def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv2d arm cpu strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
stride_h, stride_w = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
if kernel_layout == "OIHW":
if (
topi.arm_cpu.is_int8_hw_support(data.dtype, kernel.dtype)
and kernel.shape[1] >= 64
):
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_int8),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_int8),
name="conv2d_nchw_int8.arm_cpu",
plevel=15,
)
else:
# ARM conv2d spatial pack schedule.
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.arm_cpu",
plevel=10,
)
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_conv2d_nchw),
name="conv2d_nchw.x86",
)
# check if winograd algorithm is applicable
_, _, kh, kw = get_const_tuple(kernel.shape)
pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (kh, kw))
is_winograd_applicable = (
"float" in data.dtype
and "float" in kernel.dtype
and kh == 3
and kw == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
)
if is_winograd_applicable:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_winograd),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.arm_cpu",
plevel=5,
)
if "nnpack" in target.libs and pt == 1 and pb == 1 and pl == 1 and pr == 1:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_winograd_nnpack),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd_nnpack),
name="conv2d_nchw_winograd_nnpack.arm_cpu",
plevel=15,
)
elif re.match(r"OIHW\d*o", kernel_layout):
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.arm_cpu",
)
else:
raise RuntimeError(
"Unsupported weight layout {} for conv2d NCHW".format(kernel_layout)
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
logger.warning("conv2d_hwcn is not optimized for arm cpu.")
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
elif layout == "NHWC":
data_width_padding = _get_padding_width(padding)
if (
target.features.has_dsp
and dilation_w == dilation_h == 1
and kernel_layout == "OHWI"
# Check SIMD alignment
and _is_simd_aligned(data.dtype, data.shape[2:], padding=(data_width_padding, 0))
and _is_simd_aligned(kernel.dtype, kernel.shape[2:])
):
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nhwc_ohwi_dsp, need_out_layout=True),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nhwc_ohwi_dsp),
name="conv2d_nhwc_ohwi_dsp.arm_cpu",
)
elif target.features.has_dsp and kernel_layout == "HWOI":
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nhwc_dsp),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nhwc_dsp),
name="conv2d_nhwc_dsp.arm_cpu",
)
elif kernel_layout == "HWIO":
has_asimd = target.features.has_asimd
has_dot_prod = target.features.has_dotprod
if has_dot_prod and data.dtype in ["int8", "uint8"]:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.compute_conv2d_NHWC_quantized_native),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_NHWC_quantized_native),
name="conv2d_NHWC_quantized_native.arm_cpu",
)
if has_asimd and data.dtype in ["int8", "uint8"]:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved),
name="conv2d_NHWC_quantized_interleaved.arm_cpu",
)
if (not has_asimd) or (data.dtype not in ["int8", "uint8"]):
# TODO(@giuseros)
# This strategy errors out for quantized data types when tuning.
# Let's use this only for non-aarch64 or non-quantized cases
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nhwc_spatial_pack),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack),
name="conv2d_nhwc_spatial_pack.arm_cpu",
)
else:
raise RuntimeError(
"Unsupported kernel layout {} for conv2d NHWC".format(kernel_layout)
)
else:
raise RuntimeError("Unsupported conv2d layout {} for arm cpu".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW" or re.match(r"OIHW\d*o", kernel_layout)
if kernel_layout == "OIHW":
data_width_padding = _get_padding_width(padding)
if (
target.features.has_dsp
and dilation_w == dilation_h == 1
and _is_simd_aligned(data.dtype, data.shape[3:], padding=(data_width_padding,))
and _is_simd_aligned(kernel.dtype, kernel.shape[3:])
):
strategy.add_implementation(
wrap_compute_conv2d(
topi.arm_cpu.depthwise_conv2d_nchw_oihw_dsp, need_out_layout=True
),
wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nchw_oihw_dsp),
name="depthwise_conv2d_nchw_oihw_dsp.arm_cpu",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.arm_cpu",
)
# TODO:
# This schedule has incorrect result on some hardware platforms (like NV Jetson TX2)
# Let us comment it out but not remove.
# see discussion:
# https://discuss.tvm.apache.org/t/autotuner-incorrect-result-after-tuning-mobilenetv2-on-arm-cpu/6088
# strategy.add_implementation(
# wrap_compute_conv2d(topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack),
# wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack),
# name="depthwise_conv2d_nchw_spatial_pack.arm_cpu",
# plevel=15)
# Intel x86 depthwise conv2d schedule.
channel_multiplier = get_const_tuple(inputs[1].shape)[1]
if channel_multiplier == 1 and dilation_h == 1 and dilation_w == 1:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.x86",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
if target.features.has_asimd:
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.compute_depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.arm_cpu",
)
# Optimized special case depthwiseConv2D operation. Requires NHWC layout,
# a HWOI kernel layout (which we rearrange to a custom layout) no dilation,
# int8/16 inputs, int32 output, and the same number of input and output channels.
# The int8 implementation DOES need the DSP unit (for SXTB16), but it is not
# possible to use the DSP unit to speed up a NHWC depthwise convolution (though
# an NCHW convolution would benefit).
elif (
dilation_w == dilation_h == 1
and kernel.shape[3] == 1 # channel_multiplier == 1
and out_type.dtype == "int32"
and (
(data.shape[3] % 4 == 0 and data.dtype == "int8" and target.features.has_dsp)
or (data.shape[3] % 2 == 0 and data.dtype == "int16")
)
and (padding != "SAME" or data.shape[1] % stride_h == data.shape[2] % stride_w == 0)
# Ideally we should check that kernel is a Relay constant, but strategy functions
# don't have access to the data needed to check this.
):
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.depthwise_conv2d_nhwc_dsp),
wrap_topi_schedule(topi.arm_cpu.schedule_depthwise_conv2d_nhwc_dsp),
name="depthwise_conv2d_nhwc_dsp.arm_cpu",
)
else:
logger.warning("depthwise_conv2d with layout NHWC is not optimized for arm cpu.")
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(conv2d_generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {} for arm cpu".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.arm_cpu.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.arm_cpu",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
logger.warning("group_conv2d with layout NHWC is not optimized for arm cpu.")
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {} for arm cpu".format(layout))
return strategy
@conv2d_NCHWc_strategy.register("arm_cpu")
def conv2d_NCHWc_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv2d_NCHWc adopted from x86"""
strategy = _op.OpStrategy()
data, kernel = inputs
if topi.arm_cpu.is_int8_hw_support(data.dtype, kernel.dtype):
strategy.add_implementation(
wrap_compute_conv2d(
topi.arm_cpu.conv2d_NCHWc_int8, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.arm_cpu",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_NCHWc, need_data_layout=True, need_out_layout=True),
wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.x86",
)
return strategy
@depthwise_conv2d_NCHWc_strategy.register("arm_cpu")
def depthwise_conv2d_NCHWc_strategy_arm_cpu(attrs, inputs, out_type, target):
"""depthwise_conv2d_NCHWc adopted from x86"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(
topi.x86.depthwise_conv2d_NCHWc, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.x86",
)
return strategy
def wrap_compute_conv2d_winograd_nnpack(topi_compute):
"""wrap topi compute for conv2d_winograd NNPack"""
def _compute_conv2d_nnpack(attrs, inputs, out_type):
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
out_dtype = attrs.get_str("out_dtype")
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], None, strides, padding, dilation, out_dtype)]
return _compute_conv2d_nnpack
@conv2d_winograd_without_weight_transform_strategy.register("arm_cpu")
def conv2d_winograd_without_weight_transform_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform arm cpu strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
strides = attrs.get_int_tuple("strides")
kernel = inputs[1]
assert dilation == (1, 1), "Do not support dilate now"
assert strides == (1, 1), "Do not support strides now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
if len(kernel.shape) == 5:
pad_kh, pad_kw, _, _, _ = get_const_tuple(inputs[1].shape)
tile_size = attrs.get_int("tile_size")
kh = pad_kh - tile_size + 1
kw = pad_kw - tile_size + 1
assert kh == 3 and kw == 3
strategy.add_implementation(
wrap_compute_conv2d(topi.arm_cpu.conv2d_nchw_winograd),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.arm_cpu",
)
elif len(kernel.shape) == 4:
# kernel must be packed by winograd nnpack
assert "nnpack" in target.libs
strategy.add_implementation(
wrap_compute_conv2d_winograd_nnpack(
topi.arm_cpu.conv2d_nchw_winograd_nnpack_without_weight_transform
),
wrap_topi_schedule(
topi.arm_cpu.schedule_conv2d_nchw_winograd_nnpack_without_weight_transform
),
name="conv2d_nchw_winograd_nnpack_withou_weight_transform.arm_cpu",
plevel=15,
)
else:
raise RuntimeError("Unsupported kernel shape: {}".format(kernel.shape))
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
def wrap_compute_conv2d_gemm(topi_compute):
"""wrap topi compute for conv2d_gemm"""
def _compute_conv2d_gemm(attrs, inputs, out_type):
padding = attrs.get_int_tuple("padding")
strides = attrs.get_int_tuple("strides")
dilation = attrs.get_int_tuple("dilation")
out_dtype = attrs.get_str("out_dtype")
channels = attrs["channels"]
kernel_size = attrs["kernel_size"]
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [
topi_compute(
inputs[0], inputs[1], strides, padding, dilation, out_dtype, kernel_size, channels
)
]
return _compute_conv2d_gemm
@conv2d_gemm_without_weight_transform_strategy.register("arm_cpu")
def conv2d_gemm_without_weight_transform_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform arm cpu strategy"""
layout = attrs.data_layout
data = inputs[0]
strategy = _op.OpStrategy()
interleaved_compute = topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved_without_transform
native_compute = topi.arm_cpu.compute_conv2d_NHWC_quantized_native_without_transform
if layout == "NHWC" and data.dtype in ["int8", "uint8"]:
strategy.add_implementation(
wrap_compute_conv2d_gemm(native_compute),
wrap_topi_schedule(
topi.arm_cpu.schedule_conv2d_NHWC_quantized_native_without_transform
),
name="conv2d_NHWC_quantized_native_without_transform.arm_cpu",
)
strategy.add_implementation(
wrap_compute_conv2d_gemm(interleaved_compute),
wrap_topi_schedule(
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved_without_transform
),
name="conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu",
)
else:
raise RuntimeError(
"Unsupported conv2d_NHWC_quantized_without_transform layout {0}"
"with datatype {1}".format(layout, data.dtype)
)
return strategy
@conv2d_transpose_strategy.register("arm_cpu")
def conv2d_transpose_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv2d_transpose arm cpu strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.arm_cpu.conv2d_transpose_nchw),
wrap_topi_schedule(topi.arm_cpu.schedule_conv2d_transpose_nchw),
name="conv2d_tranpose_nchw.arm_cpu",
)
return strategy
@bitserial_conv2d_strategy.register("arm_cpu")
def bitserial_conv2d_strategy_arm_cpu(attrs, inputs, out_type, target):
"""bitserial_conv2d x86 strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.arm_cpu",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.arm_cpu.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.arm_cpu.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.arm_cpu",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
@bitserial_dense_strategy.register("arm_cpu")
def schedule_bitserial_dense_arm_cpu(attrs, inputs, out_type, target):
"""bitserial_dense arm cpu strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.arm_cpu.bitserial_dense),
wrap_topi_schedule(topi.arm_cpu.schedule_bitserial_dense),
name="bitserial_dense.arm_cpu",
)
return strategy
@dense_strategy.register(["arm_cpu"])
def schedule_dense_arm_cpu(attrs, inputs, out_type, target):
"""dense arm cpu strategy"""
strategy = _op.OpStrategy()
data, _ = inputs
if target.features.has_dsp and data.dtype in ["int8", "int16"]:
strategy.add_implementation(
wrap_compute_dense(topi.arm_cpu.dense_dsp),
wrap_topi_schedule(topi.arm_cpu.schedule_dense_dsp),
name="dense_dsp.arm_cpu",
)
else:
logger.warning("dense is not optimized for arm cpu.")
strategy.add_implementation(
wrap_compute_dense(
topi.nn.dense,
need_auto_scheduler_layout=is_auto_scheduler_enabled(),
need_meta_schedule_layout=is_meta_schedule_enabled(),
),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
@conv1d_strategy.register("arm_cpu")
def conv1d_strategy_arm_cpu(attrs, inputs, out_type, target):
"""conv1d strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
if kernel_layout == "WOI":
if layout == "NWC" and target.features.has_dsp:
strategy.add_implementation(
wrap_compute_conv1d(topi.arm_cpu.conv1d_nwc_dsp),
wrap_topi_schedule(topi.arm_cpu.schedule_conv1d_nwc_dsp),
name="conv1d_dsp.arm_cpu",
)
else:
raise RuntimeError(
"Unsupported kernel layout {} for conv1d {} for arm cpu.".format(
kernel_layout, layout
)
)
elif layout == "NCW":
logger.warning("conv1d with layout %s is not optimized for arm cpu.", layout)
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
logger.warning("conv1d with layout %s is not optimized for arm cpu.", layout)
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise RuntimeError(
"Unsupported kernel layout {} for conv1d {} for arm cpu.".format(kernel_layout, layout)
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/bifrost.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of bifrost operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
from tvm import topi
from .generic import *
from .. import op as _op
@conv2d_strategy.register("bifrost")
def conv2d_strategy_bifrost(attrs, inputs, out_type, target):
"""conv2d mali(bifrost) strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
stride_h, stride_w = attrs.get_int_tuple("strides")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
if kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.bifrost.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.bifrost",
)
_, _, kh, kw = get_const_tuple(kernel.shape)
if (
kh == 3
and kw == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
):
strategy.add_implementation(
wrap_compute_conv2d(topi.bifrost.conv2d_nchw_winograd),
wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.bifrost",
plevel=5,
)
elif re.match(r"OIHW\d*o", kernel_layout):
strategy.add_implementation(
wrap_compute_conv2d(topi.bifrost.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.bifrost",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
# For now just reuse general Mali strategy.
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nhwc_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nhwc_spatial_pack),
name="conv2d_nhwc_spatial_pack.bifrost",
)
else:
raise RuntimeError("Unsupported conv2d layout {} for Mali(Bifrost)".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.bifrost.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.bifrost",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
# For now just reuse general Mali strategy.
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.mali.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nchw.bifrost",
)
else:
raise RuntimeError(
"Unsupported depthwise_conv2d layout {} for Mali(Bifrost)".format(layout)
)
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for Mali(Bifrost)")
return strategy
@conv2d_winograd_without_weight_transform_strategy.register("bifrost")
def conv2d_winograd_without_weight_transform_strategy_bifrost(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform mali(bifrost) strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
strides = attrs.get_int_tuple("strides")
assert dilation == (1, 1), "Do not support dilate now"
assert strides == (1, 1), "Do not support strides now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.bifrost.conv2d_nchw_winograd),
wrap_topi_schedule(topi.bifrost.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.bifrost",
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@dense_strategy.register("bifrost")
def dense_strategy_bifrost(attrs, inputs, out_type, target):
"""dense mali(bifrost) strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.bifrost.dense),
wrap_topi_schedule(topi.bifrost.schedule_dense),
name="dense.bifrost",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of CUDA/GPU operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.contrib import nvcc
from tvm.contrib.thrust import can_use_thrust
from tvm.meta_schedule import is_meta_schedule_enabled
from tvm.te import SpecializedCondition
from ....target import Target
from ....tir import IntImm
from .. import op as _op
from .generic import *
@schedule_injective.register(["cuda", "gpu"])
def schedule_injective_cuda(attrs, outs, target):
"""schedule injective ops for cuda"""
with target:
return topi.cuda.schedule_injective(outs)
@schedule_reduce.register(["cuda", "gpu"])
def schedule_reduce_cuda(attrs, outs, target):
"""schedule reduction ops for cuda"""
with target:
return topi.cuda.schedule_reduce(outs)
@concatenate_strategy.register(["cuda", "gpu"])
def concatenate_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_concat(topi.transform.concatenate),
wrap_topi_schedule(topi.cuda.schedule_injective),
name="concatenate.cuda",
)
return strategy
@schedule_pool.register(["cuda", "gpu"])
def schedule_pool_cuda(attrs, outs, target):
"""schedule pooling ops for cuda"""
with target:
return topi.cuda.schedule_pool(outs, attrs.layout)
@schedule_pool_grad.register(["cuda", "gpu"])
def schedule_pool_grad_cuda(attrs, outs, target):
"""schedule pooling gradient ops for cuda"""
with target:
return topi.cuda.schedule_pool_grad(outs)
@schedule_adaptive_pool.register(["cuda", "gpu"])
def schedule_adaptive_pool_cuda(attrs, outs, target):
"""schedule adaptive pooling ops for cuda"""
with target:
return topi.cuda.schedule_adaptive_pool(outs, attrs.layout)
@softmax_strategy.register(["cuda", "gpu"])
def softmax_strategy_cuda(attrs, inputs, out_type, target):
"""softmax cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="softmax.cuda",
)
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(topi.cuda.softmax_cudnn),
wrap_topi_schedule(topi.cuda.schedule_softmax_cudnn),
name="softmax.cudnn",
plevel=15,
)
return strategy
@fast_softmax_strategy.register(["cuda", "gpu"])
def fast_softmax_strategy_cuda(attrs, inputs, out_type, target):
"""fast_softmax cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.fast_softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="fast_softmax.cuda",
)
return strategy
@log_softmax_strategy.register(["cuda", "gpu"])
def log_softmax_strategy_cuda(attrs, inputs, out_type, target):
"""log_softmax cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="log_softmax.cuda",
)
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(topi.cuda.log_softmax_cudnn),
wrap_topi_schedule(topi.cuda.schedule_log_softmax_cudnn),
name="log_softmax.cudnn",
plevel=15,
)
return strategy
@schedule_lrn.register(["cuda", "gpu"])
def schedule_lrn_cuda(attrs, outs, target):
"""schedule LRN for cuda"""
with target:
return topi.cuda.schedule_lrn(outs)
@conv2d_strategy.register(["cuda", "gpu"])
def conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d cuda strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
stride_h, stride_w = attrs.get_int_tuple("strides")
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
padding = attrs.get_int_tuple("padding")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
if (
(target.kind.name in ["cuda", "vulkan", "rocm"])
and data.dtype in ("int8", "uint8")
and kernel.dtype in ("int8", "uint8")
):
assert data.dtype == kernel.dtype
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_int8),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_int8),
name="conv2d_nchw_int8.cuda",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw),
name="conv2d_nchw.cuda",
)
N, _, H, W = get_const_tuple(data.shape)
CO, CI, KH, KW = get_const_tuple(kernel.shape)
(_, _, judge_winograd_auto_scheduler) = judge_winograd(
N,
H,
W,
KH,
KW,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data.dtype,
kernel.dtype,
pre_flag=False,
)
if is_meta_schedule_enabled() and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nchw),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nchw_winograd.cuda",
plevel=15,
)
elif (
(2 < KH < 8 and 2 < KW < 8 and KH == KW)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.cuda",
plevel=5,
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_hwcn),
wrap_topi_schedule(topi.cuda.schedule_conv2d_hwcn),
name="conv2d_hwcn.cuda",
)
elif layout == "NHWC" and kernel_layout == "HWIO":
strategy.add_implementation(
wrap_compute_conv2d(topi.gpu.conv2d_nhwc),
wrap_topi_schedule(topi.gpu.schedule_conv2d_nhwc),
name="conv2d_nhwc.gpu",
)
N, H, W, _ = get_const_tuple(data.shape)
KH, KW, CI, CO = get_const_tuple(kernel.shape)
# Winograd shape related judgment
(
judge_winograd_tensorcore,
judge_winograd_autotvm,
judge_winograd_auto_scheduler,
) = judge_winograd(
N,
H,
W,
KH,
KW,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data.dtype,
kernel.dtype,
pre_flag=False,
)
if judge_winograd_autotvm:
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and judge_winograd_tensorcore
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore),
name="conv2d_nhwc_winograd_tensorcore.cuda",
plevel=5,
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_direct),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_winograd_direct),
name="conv2d_nhwc_winograd_direct.cuda",
plevel=5,
)
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and (
(N % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (N % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (N % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_nhwc_tensorcore),
name="conv2d_nhwc_tensorcore.cuda",
plevel=20,
)
# register auto-scheduler implementations
if is_auto_scheduler_enabled() and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
# register meta-schedule implementations
if is_meta_schedule_enabled() and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
elif layout == "HWNC":
assert kernel_layout in ["HWOI", "HWOI16o16i", "HWOI8o32i", "HWOI32o16i"]
_, _, N, in_channels = get_const_tuple(data.shape)
pre_computed = len(kernel.shape) == 6
if pre_computed:
_, _, oc_chunk, _, oc_block_factor, _ = get_const_tuple(kernel.shape)
out_channels = oc_chunk * oc_block_factor
else:
_, _, out_channels, _ = get_const_tuple(kernel.shape)
tensorcore_dtypes = ["int4", "uint4", "int8", "uint8"]
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and kernel.dtype in tensorcore_dtypes
and (
(
data.dtype in ["int4", "uint4"]
and N % 8 == 0
and in_channels % 32 == 0
and out_channels % 8 == 0
)
or (
data.dtype in ["int8", "uint8"]
and N % 8 == 0
and in_channels % 16 == 0
and out_channels % 32 == 0
)
)
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_hwnc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv2d_hwnc_tensorcore),
name="conv2d_hwnc_tensorcore_direct.cuda",
plevel=20,
)
else:
raise RuntimeError(
"Unsupported shape for conv2d HWNC.\
Need to satisfy tensor core schedule."
)
elif (
(target.kind.name in ["cuda", "vulkan", "rocm"])
and layout == "NCHW4c"
and data.dtype in ["int8", "uint8"]
):
assert kernel_layout == "OIHW4o4i"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_NCHWc_int8, need_data_layout=True),
wrap_topi_schedule(topi.cuda.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.cuda",
)
elif is_auto_scheduler_enabled() or is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv, need_data_layout=True, need_kernel_layout=True, has_groups=True
),
naive_schedule,
name="conv2d.cuda",
plevel=15,
)
elif target.kind.name == "cuda" and "cudnn" not in target.libs:
# No TVM native kernel applicable
raise RuntimeError("Unsupported conv2d layout {} for CUDA".format(layout))
if (
target.kind.name == "cuda"
and "cudnn" in target.libs
and layout in ["NCHW", "NHWC"]
and padding[0] == padding[2]
and padding[1] == padding[3]
and not (data.dtype in ["uint8", "int8"] or kernel.dtype in ["uint8", "int8"])
):
# add cudnn implementation
if layout == "NHWC":
assert kernel_layout == "OHWI"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_cudnn, need_data_layout=True, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_conv2d_cudnn),
name="conv2d_cudnn.cuda",
plevel=25,
)
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups) and (
layout == "NCHW" or "cudnn" not in target.libs
): # cuDNN requires a different kernel layout for NHWC inputs.
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.cuda",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.cuda.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.cuda",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
# add cudnn implementation, if any
cudnn_impl = False
if target.kind.name == "cuda" and "cudnn" in target.libs:
if (
layout in ["NCHW", "NHWC"]
and padding[0] == padding[2]
and padding[1] == padding[3]
and not (data.dtype in ["uint8", "int8"] or kernel.dtype in ["uint8", "int8"])
):
strategy.add_implementation(
wrap_compute_conv2d(
topi.cuda.conv2d_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.cuda.schedule_conv2d_cudnn),
name="conv2d_cudnn.cuda",
plevel=25,
)
cudnn_impl = True
if layout == "NCHW":
assert kernel_layout == "OIHW"
_, channels, _, _ = get_const_tuple(data.shape)
out_channels, in_channels, _, _ = get_const_tuple(kernel.shape)
oc_chunk = out_channels // 4
ic_chunk = in_channels // 4
if (
(target.kind.name in ["cuda", "vulkan", "rocm"])
and data.dtype in ["int8", "uint8"]
and kernel.dtype in ["int8", "uint8"]
and channels % groups == 0
and out_channels % groups == 0
and channels % 4 == 0
and out_channels % 4 == 0
and groups <= oc_chunk
and groups <= ic_chunk
):
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.group_conv2d_nchw_int8, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_group_conv2d_nchw_int8),
name="group_conv2d_nchw_int8.cuda",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.cuda",
)
elif layout == "NCHW4c" and data.dtype in ["int8", "uint8"]:
assert kernel_layout == "OIHW4o4i"
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.group_conv2d_NCHWc_int8, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_group_conv2d_NCHWc_int8),
name="group_conv2d_NCHWc_int8.cuda",
)
elif not cudnn_impl:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
def judge_winograd(
N,
H,
W,
KH,
KW,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data_dtype,
kernel_dtype,
pre_flag,
):
"""Winograd judgement about tensorcore and shape"""
if H % 8 == 0:
tile_size = 4
else:
tile_size = 2
if pre_flag:
alpha = KH
KH = KW = alpha + 1 - tile_size
pt, pl, pb, pr = topi.nn.get_pad_tuple(padding, (KH, KW))
OH = (H + pt + pb - KH) // stride_h + 1
OW = (W + pl + pr - KW) // stride_w + 1
nH, nW = (OH + tile_size - 1) // tile_size, (OW + tile_size - 1) // tile_size
if not isinstance(N, int):
return False, False, False
P = N * nH * nW
judge_winograd_tensorcore = (
(P % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (P % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (P % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
)
judge_winograd_autotvm = (
2 < KH < 8
and 2 < KW < 8
and KH == KW
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
)
judge_winograd_auto_scheduler = (
("float" in data_dtype and "float" in kernel_dtype)
and (KH == 3 and KW == 3)
and (stride_h == 1 and stride_w == 1)
and (dilation_h == 1 and dilation_w == 1)
)
return judge_winograd_tensorcore, judge_winograd_autotvm, judge_winograd_auto_scheduler
@conv2d_winograd_without_weight_transform_strategy.register(["cuda", "gpu"])
def conv2d_winograd_without_weight_transform_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform cuda strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
data, kernel = inputs
stride_h, stride_w = attrs.get_int_tuple("strides")
padding = attrs.get_int_tuple("padding")
assert dilation == (1, 1), "Do not support dilate now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
if is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nchw_without_weight_transform),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nchw_winograd_without_weight_transform",
plevel=15,
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nchw_winograd_without_weight_transform),
wrap_topi_schedule(
topi.cuda.schedule_conv2d_nchw_winograd_without_weight_transform
),
name="conv2d_nchw_winograd_without_weight_transform.cuda",
)
elif layout == "NHWC":
N, H, W, _ = get_const_tuple(data.shape)
alpha, _, CI, CO = get_const_tuple(kernel.shape)
dilation_h, dilation_w = dilation
judge_winograd_tensorcore, _, _ = judge_winograd(
N,
H,
W,
alpha,
alpha,
CI,
CO,
padding,
stride_h,
stride_w,
dilation_h,
dilation_w,
data.dtype,
kernel.dtype,
pre_flag=True,
)
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and judge_winograd_tensorcore
):
strategy.add_implementation(
wrap_compute_conv2d(
topi.cuda.conv2d_nhwc_winograd_tensorcore_without_weight_transform
),
wrap_topi_schedule(
topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore_without_weight_transform
),
name="conv2d_nhwc_winograd_tensorcore_without_weight_transform.cuda",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.cuda.conv2d_nhwc_winograd_direct_without_weight_transform),
wrap_topi_schedule(
topi.cuda.schedule_conv2d_nhwc_winograd_direct_without_weight_transform
),
name="conv2d_nhwc_winograd_direct_without_weight_transform.cuda",
)
if is_auto_scheduler_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc_without_weight_transform),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc_winograd_without_weight_transform",
plevel=15,
)
if is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_winograd_nhwc_without_weight_transform),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc_winograd_without_weight_transform",
plevel=15,
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@deformable_conv2d_strategy.register(["cuda", "gpu"])
def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target):
"""deformable_conv2d cuda strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.cuda",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.cuda",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d on CUDA" % layout)
return strategy
@conv2d_backward_weight_strategy.register(["cuda"])
def conv2d_backward_weight_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d_backward_weight cuda strategy"""
strategy = _op.OpStrategy()
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_conv2d_backward_weight(topi.cuda.conv2d_backward_weight_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d_backward_weight_strategy.cudnn",
plevel=15,
)
else:
raise RuntimeError(
"conv2d_backward_weight on cuda is currently only supported with cudnn. "
"Please run Legalize pass to decompose this op into supported ops."
)
return strategy
@conv2d_transpose_strategy.register(["cuda", "gpu"])
def conv2d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv2d_transpose cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
num_strategies = 0
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.cuda.conv2d_transpose_nchw, has_groups=True),
wrap_topi_schedule(topi.cuda.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.cuda",
)
num_strategies += 1
if (
target.kind.name == "cuda"
and "cudnn" in target.libs
and (
(layout == "NCHW" and attrs.kernel_layout == "IOHW")
or (layout == "NHWC" and attrs.kernel_layout == "IHWO")
)
):
strategy.add_implementation(
wrap_compute_conv2d_transpose(
topi.cuda.conv2d_transpose_cudnn, add_layout=True, has_groups=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d_transpose.cudnn.cuda",
plevel=25,
)
num_strategies += 1
# TODO(masahi): Support conv2d_transpose NHWC for non-cudnn path.
assert num_strategies > 0, "Unsupported conv2d_transpose workload, layout = %s, groups = %d" % (
layout,
groups,
)
return strategy
@conv3d_transpose_strategy.register(["cuda", "gpu"])
def conv3d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d_transpose cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.cuda.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.cuda.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.cuda",
)
return strategy
@conv3d_strategy.register(["cuda", "gpu"])
def conv3d_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d cuda strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
layout = attrs.data_layout
_, stride_h, stride_w = attrs.get_int_tuple("strides")
_, dilation_h, dilation_w = attrs.get_int_tuple("dilation")
assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout)
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.cuda",
plevel=10,
)
_, _, _, kh, kw = get_const_tuple(kernel.shape)
if (
2 < kh < 8
and 2 < kw < 8
and kh == kw
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
and attrs["groups"] == 1
):
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw_winograd),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw_winograd),
name="conv3d_ncdhw_winograd.cuda",
plevel=5,
)
else: # layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ndhwc),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.cuda",
plevel=10,
)
N, _, _, _, _ = get_const_tuple(data.shape)
_, _, _, CI, CO = get_const_tuple(kernel.shape)
if target.kind.name == "cuda":
if nvcc.have_tensorcore(target=target):
if (
(N % 16 == 0 and CI % 16 == 0 and CO % 16 == 0)
or (N % 8 == 0 and CI % 16 == 0 and CO % 32 == 0)
or (N % 32 == 0 and CI % 16 == 0 and CO % 8 == 0)
) and out_type == "float16":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ndhwc_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ndhwc_tensorcore),
name="conv3d_ndhwc_tensorcore.cuda",
plevel=20,
)
if target.kind.name == "cuda" and "cudnn" in target.libs:
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_cudnn, True),
wrap_topi_schedule(topi.cuda.schedule_conv3d_cudnn),
name="conv3d_cudnn.cuda",
plevel=25,
)
return strategy
@conv3d_winograd_without_weight_transform_strategy.register(["cuda", "gpu"])
def conv3d_winograd_without_weight_transform_strategy_cuda(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transform cuda strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
assert dilation == (1, 1, 1), "Do not support dilate now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.cuda.conv3d_ncdhw_winograd_without_weight_transform),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw_winograd_without_weight_transform),
name="conv3d_ncdhw_winograd_without_weight_transform.cuda",
)
else:
raise RuntimeError(
"Unsupported conv3d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@conv1d_strategy.register(["cuda", "gpu"])
def conv1d_strategy_cuda(attrs, inputs, out_type, target):
"""conv1d cuda strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if attrs.groups == 1:
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.cuda.conv1d_ncw),
wrap_topi_schedule(topi.cuda.schedule_conv1d_ncw),
name="conv1d_ncw.cuda",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.cuda.conv1d_nwc),
wrap_topi_schedule(topi.cuda.schedule_conv1d_nwc),
name="conv1d_nwc.cuda",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
else:
if layout == "NCW":
strategy.add_implementation(
wrap_compute_group_conv1d(topi.cuda.group_conv1d_ncw),
wrap_topi_schedule(topi.cuda.schedule_group_conv1d_ncw),
name="group_conv1d_ncw.cuda",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_group_conv1d(topi.cuda.group_conv1d_nwc),
wrap_topi_schedule(topi.cuda.schedule_group_conv1d_nwc),
name="group_conv1d_nwc.cuda",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
@conv1d_transpose_strategy.register(["cuda", "gpu"])
def conv1d_transpose_strategy_cuda(attrs, inputs, out_type, target):
"""conv1d_transpose cuda strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.cuda.conv1d_transpose_ncw),
wrap_topi_schedule(topi.cuda.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.cuda",
)
return strategy
@matmul_strategy.register(["cuda", "gpu"])
def matmul_strategy_cuda(attrs, inputs, out_type, target):
"""Matmul cuda strategy."""
strategy = _op.OpStrategy()
if is_auto_scheduler_enabled():
strategy.add_implementation(
wrap_compute_matmul(topi.nn.matmul),
naive_schedule,
name="matmul.cuda",
)
elif is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_matmul(topi.nn.matmul),
naive_schedule,
name="matmul.cuda",
)
else:
logger.warning(
"Matmul is not optimized for cuda. Recommend to use cublas for better performance."
)
# Temporary use this as a basic schedule
strategy.add_implementation(
wrap_compute_matmul(topi.gpu.matmul_default),
wrap_topi_schedule(topi.gpu.schedule_matmul_default),
name="matmul_default.gpu",
)
if target.kind.name == "cuda" and "cublas" in target.libs:
strategy.add_implementation(
wrap_compute_matmul(topi.cuda.matmul_cublas),
wrap_topi_schedule(topi.cuda.schedule_matmul_cublas),
name="matmul_cublas.cuda",
plevel=25,
)
return strategy
@dense_strategy.register(["cuda", "gpu"])
def dense_strategy_cuda(attrs, inputs, out_type, target):
"""dense cuda strategy"""
strategy = _op.OpStrategy()
data, weights = inputs
b, i = get_const_tuple(data.shape)
o, _ = get_const_tuple(weights.shape)
if (
target.kind.name in ["cuda", "vulkan", "rocm"]
and data.dtype == "int8"
and weights.dtype == "int8"
and out_type.dtype == "int32"
):
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_int8),
wrap_topi_schedule(topi.cuda.schedule_dense_int8),
name="dense_int8.cuda",
)
else:
strategy.add_implementation(
wrap_compute_dense(topi.gpu.dense_small_batch),
wrap_topi_schedule(topi.gpu.schedule_dense_small_batch),
name="dense_small_batch.gpu",
)
with SpecializedCondition(b >= 32):
strategy.add_implementation(
wrap_compute_dense(topi.gpu.dense_large_batch),
wrap_topi_schedule(topi.gpu.schedule_dense_large_batch),
name="dense_large_batch.gpu",
plevel=5,
)
if target.kind.name == "cuda":
if nvcc.have_tensorcore(target=target):
if (
(
data.dtype in ["float16", "int8", "uint8"]
and (
(i % 16 == 0 and b % 16 == 0 and o % 16 == 0)
or (i % 16 == 0 and b % 8 == 0 and o % 32 == 0)
or (i % 16 == 0 and b % 32 == 0 and o % 8 == 0)
)
)
or (data.dtype in ["int4", "uint4"] and i % 32 == 0 and b % 8 == 0 and o % 8 == 0)
or (data.dtype in ["int1", "uint1"] and i % 128 == 0 and b % 8 == 0 and o % 8 == 0)
):
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_tensorcore),
wrap_topi_schedule(topi.cuda.schedule_dense_tensorcore),
name="dense_tensorcore.cuda",
plevel=20,
)
if target.kind.name == "cuda" and "cublas" in target.libs:
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_cublas),
wrap_topi_schedule(topi.cuda.schedule_dense_cublas),
name="dense_cublas.cuda",
plevel=25,
)
return strategy
@batch_matmul_strategy.register(["cuda", "gpu"])
def batch_matmul_strategy_cuda(attrs, inputs, out_type, target):
"""batch_matmul cuda strategy"""
strategy = _op.OpStrategy()
x, y = inputs
if (
x.dtype == "int8"
and y.dtype == "int8"
and out_type.dtype == "int32"
and not attrs["transpose_a"]
and attrs["transpose_b"]
):
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_int8, need_out_dtype=True),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul_int8),
name="batch_matmul_int8.cuda",
plevel=10,
)
else:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul, need_out_dtype=True),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul),
name="batch_matmul.cuda",
plevel=10,
)
if target.kind.name == "cuda" and "cublas" in target.libs:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_cublas, need_out_dtype=True),
wrap_topi_schedule(topi.generic.schedule_extern),
name="batch_matmul_cublas.cuda",
plevel=30,
)
if (
target.kind.name == "cuda"
and nvcc.have_tensorcore(target=target)
and not attrs["transpose_a"]
and attrs["transpose_b"]
):
x, y = inputs
_, M, K = get_const_tuple(x.shape)
_, N, K = get_const_tuple(y.shape)
if (
x.dtype in ["float16", "int8", "uint8"]
and (
(M % 8 == 0 and K % 16 == 0 and N % 32 == 0)
or (M % 16 == 0 and K % 16 == 0 and N % 16 == 0)
or (M % 32 == 0 and K % 16 == 0 and N % 8 == 0)
)
) or (x.dtype in ["int4", "uint4"] and K % 32 == 0 and M % 8 == 0 and N % 8 == 0):
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_tensorcore, need_out_dtype=True),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul_tensorcore),
name="batch_matmul_tensorcore.cuda",
plevel=20,
)
return strategy
@sparse_dense_strategy.register(["cuda", "gpu"])
def sparse_dense_strategy_cuda(attrs, inputs, out_type, target):
"""sparse dense cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.cuda.sparse_dense),
wrap_topi_schedule(topi.cuda.schedule_sparse_dense),
name="sparse_dense.cuda",
plevel=10,
)
return strategy
@sparse_reshape_strategy.register(["cuda", "gpu"])
def sparse_reshape_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_reshape(topi.cuda.sparse_reshape),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sparse_reshape.cuda",
)
return strategy
@sparse_dense_padded_strategy.register(["cuda", "gpu", "rocm"])
def sparse_dense_padded_strategy_cuda(attrs, inputs, out_type, target):
"""sparse dense cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.cuda.sparse_dense_padded),
wrap_topi_schedule(topi.cuda.schedule_sparse_dense_padded),
name="sparse_dense_padded.cuda",
plevel=10,
)
return strategy
@scatter_strategy.register(["cuda", "gpu"])
def scatter_cuda(attrs, inputs, out_type, target):
"""scatter cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter),
wrap_topi_schedule(topi.cuda.schedule_scatter),
name="scatter.cuda",
plevel=10,
)
rank = len(inputs[0].shape)
with SpecializedCondition(rank == 1):
if can_use_thrust(target, "tvm.contrib.thrust.stable_sort_by_key"):
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter_via_sort),
wrap_topi_schedule(topi.cuda.schedule_scatter_via_sort),
name="scatter_via_sort.cuda",
plevel=9, # use the sequential version by default
)
return strategy
@scatter_add_strategy.register(["cuda", "gpu"])
def scatter_add_cuda(attrs, inputs, out_type, target):
"""scatter_add cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter_add),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_add.cuda",
plevel=10,
)
return strategy
@scatter_nd_strategy.register(["cuda", "gpu"])
def scatter_nd_cuda(attrs, inputs, out_type, target):
"""scatter_nd cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.cuda.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.cuda",
plevel=10,
)
return strategy
@sort_strategy.register(["cuda", "gpu"])
def sort_strategy_cuda(attrs, inputs, out_type, target):
"""sort cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort.cuda",
)
if can_use_thrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort_thrust),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort_thrust.cuda",
plevel=15,
)
return strategy
@argsort_strategy.register(["cuda", "gpu"])
def argsort_strategy_cuda(attrs, inputs, out_type, target):
"""argsort cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort.cuda",
)
if can_use_thrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort_thrust),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort_thrust.cuda",
plevel=15,
)
return strategy
@topk_strategy.register(["cuda", "gpu"])
def topk_strategy_cuda(attrs, inputs, out_type, target):
"""topk cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk.cuda",
)
if can_use_thrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk_thrust),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk_thrust.cuda",
plevel=15,
)
return strategy
@searchsorted_strategy.register(["cuda", "gpu"])
def searchsorted_strategy_cuda(attrs, inputs, out_type, target):
"""searchsorted cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_searchsorted(topi.cuda.searchsorted),
wrap_topi_schedule(topi.cuda.schedule_extern),
name="searchsorted.cuda",
)
return strategy
@multibox_prior_strategy.register(["cuda", "gpu"])
def multibox_prior_strategy_cuda(attrs, inputs, out_type, target):
"""multibox_prior cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.cuda.multibox_prior),
wrap_topi_schedule(topi.cuda.schedule_multibox_prior),
name="multibox_prior.cuda",
)
return strategy
@multibox_transform_loc_strategy.register(["cuda", "gpu"])
def multibox_transform_loc_strategy_cuda(attrs, inputs, out_type, target):
"""multibox_transform_loc cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.cuda.multibox_transform_loc),
wrap_topi_schedule(topi.cuda.schedule_multibox_transform_loc),
name="multibox_transform_loc.cuda",
)
return strategy
@get_valid_counts_strategy.register(["cuda", "gpu"])
def get_valid_counts_strategy_cuda(attrs, inputs, out_type, target):
"""get_valid_counts cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.cuda.get_valid_counts),
wrap_topi_schedule(topi.cuda.schedule_get_valid_counts),
name="get_valid_counts.cuda",
)
return strategy
@nms_strategy.register(["cuda", "gpu"])
def nms_strategy_cuda(attrs, inputs, out_type, target):
"""nms cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.cuda.non_max_suppression),
wrap_topi_schedule(topi.cuda.schedule_nms),
name="nms.cuda",
)
return strategy
@all_class_nms_strategy.register(["cuda", "gpu"])
def all_class_nms_strategy_cuda(attrs, inputs, out_type, target):
"""all class nms cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_all_class_nms(topi.cuda.all_class_non_max_suppression),
wrap_topi_schedule(topi.cuda.schedule_nms),
name="all_class_nms.cuda",
)
return strategy
@roi_align_strategy.register(["cuda", "gpu"])
def roi_align_strategy_cuda(attrs, inputs, out_type, target):
"""roi_align cuda strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.cuda.schedule_roi_align),
name="roi_align_nchw.cuda",
)
else:
assert layout == "NHWC", "layout must be NCHW or NHWC."
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nhwc),
wrap_topi_schedule(topi.cuda.schedule_roi_align),
name="roi_align_nhwc.cuda",
)
return strategy
@schedule_roi_pool.register(["cuda", "gpu"])
def schedule_roi_pool_cuda(attrs, outs, target):
"""schedule roi_pool for cuda"""
with target:
return topi.cuda.schedule_roi_pool(outs)
@proposal_strategy.register(["cuda", "gpu"])
def proposal_strategy_cuda(attrs, inputs, out_type, target):
"""proposal cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.cuda.proposal),
wrap_topi_schedule(topi.cuda.schedule_proposal),
name="proposal.cuda",
)
return strategy
@correlation_strategy.register(["cuda", "gpu"])
def correlation_strategy_cuda(attrs, inputs, out_type, target):
"""correlation cuda strategy"""
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.cuda.correlation_nchw),
wrap_topi_schedule(topi.cuda.schedule_correlation_nchw),
name="correlation.cuda",
)
return strategy
@argwhere_strategy.register(["cuda", "gpu"])
def argwhere_strategy_cuda(attrs, inputs, out_type, target):
"""argwhere cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.cuda.argwhere),
wrap_topi_schedule(topi.cuda.schedule_argwhere),
name="argwhere.cuda",
)
return strategy
@cumsum_strategy.register(["cuda", "gpu"])
def cumsum_strategy_cuda(attrs, inputs, out_type, target):
"""cumsum cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cuda.cumsum),
wrap_topi_schedule(topi.cuda.schedule_scan),
name="cumsum.cuda",
)
return strategy
@cumprod_strategy.register(["cuda", "gpu"])
def cumprod_strategy_cuda(attrs, inputs, out_type, target):
"""cumprod cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cuda.cumprod),
wrap_topi_schedule(topi.cuda.schedule_scan),
name="cumprod.cuda",
)
return strategy
@unique_strategy.register(["cuda", "gpu"])
def unique_strategy_cuda(attrs, inputs, out_type, target):
"""unique cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_unique(topi.cuda.unique),
wrap_topi_schedule(topi.cuda.schedule_scan),
name="unique.cuda",
)
return strategy
@schedule_transpose.register(["cuda", "gpu", "rocm"])
def schedule_transpose_cuda(attrs, outs, target):
"""
Transpose cuda strategy
Dispatches to and optimized schedule if the transpose is standalone (not fused).
"""
warp_size = int(Target.current(allow_none=False).thread_warp_size)
if (
isinstance(outs[0].op.input_tensors[0].op, te.PlaceholderOp)
and len(outs[0].shape) == 2
and (attrs.axes is None or (len(attrs.axes) == 2 and attrs.axes == [1, 0]))
and isinstance(outs[0].shape[0], (int, IntImm))
and outs[0].shape[0] >= warp_size
and isinstance(outs[0].shape[1], (int, IntImm))
and outs[0].shape[1] >= warp_size
):
return topi.cuda.schedule_transpose(outs)
return schedule_injective(attrs, outs, target)
@invert_permutation_strategy.register(["cuda", "gpu"])
def invert_permutation_strategy_cuda(attrs, inputs, out_type, target):
"""invert_permutation cuda strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_invert_permutation(topi.cuda.invert_permutation),
wrap_topi_schedule(topi.cuda.vision._default_schedule),
name="invert_permutation.cuda",
)
return strategy
@einsum_strategy.register(["cuda", "gpu"])
def einsum_strategy_cuda(attrs, inputs, out_type, target):
"""einsum cuda strategy"""
strategy = _op.OpStrategy()
# TODO: Add cuda-specific op implementation for einsum
strategy.add_implementation(
wrap_compute_einsum(topi.einsum),
wrap_topi_schedule(topi.generic.schedule_extern),
name="einsum.cuda",
)
return strategy
@stft_strategy.register(["cuda", "gpu"])
def stft_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_stft(topi.cuda.stft),
wrap_topi_schedule(topi.generic.schedule_extern),
name="stft.cuda",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
# pylint: disable=invalid-name,unused-argument
import logging
import re
from tvm import _ffi, ir, te, topi
from tvm.target import generic_func, override_native_generic_func
from tvm.topi.utils import (
get_const_float,
get_const_int,
get_const_tuple,
get_float_tuple,
)
from .. import op as _op
logger = logging.getLogger("strategy")
def naive_schedule(_, outs, target):
"""Return the naive default schedule.
This function acts as a placeholder for op implementations that uses auto-scheduler.
Implemenations using this function should only be used along with auto-scheduler.
"""
if "gpu" in target.keys:
# For GPU, we at least need thread binding to make a valid schedule.
# So the naive schedule cannot be compiled.
logger.debug(
"Cannot compile for GPU targets if no tuned schedule is found. "
"Please see the warning messages above for more information about the failed workloads."
)
return te.create_schedule(outs[-1].op)
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def wrap_topi_compute(topi_compute):
"""Wrap TOPI compute which doesn't use attrs"""
def wrapper(attrs, inputs, out_type):
return [topi_compute(*inputs)]
return wrapper
def get_conv2d_in_channels(data_shape, data_layout):
"""Get conv2d input channels"""
data_shape = get_const_tuple(data_shape)
if len(data_shape) == 4:
idx = data_layout.find("C")
assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout)
return data_shape[idx]
if re.match(r"NCHW\d*c", data_layout):
# NCHW[8]c
return data_shape[1] * data_shape[4]
raise ValueError("Unknown conv2d data layout {}".format(data_layout))
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout))
def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups):
ic = get_conv2d_in_channels(data_shape, data_layout)
oc = get_conv2d_out_channels(kernel_shape, kernel_layout)
return ic == oc == groups
@generic_func
def schedule_injective(attrs, outs, target):
"""Schedule injective ops"""
with target:
return topi.generic.schedule_injective(outs)
@generic_func
def schedule_reduce(attrs, outs, target):
"""Schedule reduction ops"""
with target:
return topi.generic.schedule_reduce(outs)
_op._schedule_injective = schedule_injective
_op._schedule_reduce = schedule_reduce
# concatenate
@generic_func
def schedule_concatenate(attrs, outs, target):
"""Schedule concatenate op"""
with target:
return topi.generic.schedule_injective(outs)
# pool
@generic_func
def schedule_pool(attrs, outs, target):
"""Schedule pooling ops"""
with target:
return topi.generic.schedule_pool(outs, attrs.layout)
# pool_grad
@generic_func
def schedule_pool_grad(attrs, outs, target):
"""Schedule pooling gradient ops"""
with target:
return topi.generic.schedule_pool_grad(outs)
# adaptive pool
@generic_func
def schedule_adaptive_pool(attrs, outs, target):
"""Schedule adaptive pooling ops"""
with target:
return topi.generic.schedule_adaptive_pool(outs)
# softmax
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax
@override_native_generic_func("softmax_strategy")
def softmax_strategy(attrs, inputs, out_type, target):
"""softmax generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="softmax.generic",
)
return strategy
@override_native_generic_func("fast_softmax_strategy")
def fast_softmax_strategy(attrs, inputs, out_type, target):
"""fast softmax generic strategy"""
# NOTE: This op does not have an optimized manual schedule,
# so it should only be used together with auto-scheduler.
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.fast_softmax),
wrap_topi_schedule(topi.generic.schedule_fast_softmax),
name="fast_softmax.generic",
)
return strategy
@override_native_generic_func("log_softmax_strategy")
def log_softmax_strategy(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.generic.schedule_softmax),
name="log_softmax.generic",
)
return strategy
# lrn
@generic_func
def schedule_lrn(attrs, outs, target):
"""Schedule LRN op"""
with target:
return topi.generic.schedule_lrn(outs)
# pad
@generic_func
def schedule_pad(attrs, outs, target):
"""Schedule PAD op"""
with target:
return schedule_injective(attrs, outs, target)
# bitpack
@generic_func
def schedule_bitpack(attrs, outs, target):
"""Schedule bitpack"""
with target:
return topi.generic.schedule_bitpack(outs)
get_auto_scheduler_rewritten_layout = _ffi.get_global_func(
"relay.attrs.get_auto_scheduler_rewritten_layout"
)
get_meta_schedule_original_shape = _ffi.get_global_func(
"relay.attrs.get_meta_schedule_original_shape"
)
# conv2d
def wrap_compute_conv2d(
topi_compute,
*,
need_data_layout=False,
need_kernel_layout=False,
need_out_layout=False,
has_groups=False,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=False,
):
"""Wrap conv2d topi compute"""
def _compute_conv2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
data_layout = attrs.get_str("data_layout")
kernel_layout = attrs.get_str("kernel_layout")
out_layout = attrs.get_str("out_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilation]
if has_groups:
args.append(attrs.groups)
if need_data_layout:
args.append(data_layout)
if need_kernel_layout:
args.append(kernel_layout)
if need_out_layout:
args.append(out_layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
elif need_meta_schedule_layout:
args.append("")
args.append(get_meta_schedule_original_shape(attrs))
return [topi_compute(*args)]
return _compute_conv2d
@override_native_generic_func("conv2d_strategy")
def conv2d_strategy(attrs, inputs, out_type, target):
"""conv2d generic strategy"""
logger.warning("conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_nchw),
name="conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_conv2d_nhwc),
name="conv2d_nhwc.generic",
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
# conv2d_NCHWc
@override_native_generic_func("conv2d_NCHWc_strategy")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""conv2d_NCHWc generic strategy"""
logger.warning("conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
if inputs[0].dtype == "int8" or inputs[0].dtype == "uint8":
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_NCHWc_int8, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.generic",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, need_data_layout=True, need_out_layout=True),
wrap_topi_schedule(topi.generic.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.generic",
)
return strategy
# depthwise_conv2d_NCHWc
@override_native_generic_func("depthwise_conv2d_NCHWc_strategy")
def depthwise_conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
"""depthwise_conv2d generic strategy"""
logger.warning("depthwise_conv2d_NCHWc is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.depthwise_conv2d_NCHWc, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.generic",
)
return strategy
# conv2d_winograd_without_weight_transform
@override_native_generic_func("conv2d_winograd_without_weight_transform_strategy")
def conv2d_winograd_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform generic strategy"""
raise ValueError("No generic implemenation for conv2d_winograd_without_weight_transform")
# conv2d_gemm_without_weight_transform
@override_native_generic_func("conv2d_gemm_without_weight_transform_strategy")
def conv2d_gemm_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv2d_gemm_without_weight_transform generic strategy"""
raise ValueError("No generic implemenation for conv2d_gemm_without_weight_transform")
# conv2d_winograd_weight_transform
@generic_func
def schedule_conv2d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_weight_transform(outs)
# conv2d_winograd_nnpack_weight_transform
@generic_func
def schedule_conv2d_winograd_nnpack_weight_transform(attrs, outs, target):
"""Schedule conv2d_winograd_nnpack_weight_transform"""
with target:
return topi.generic.schedule_conv2d_winograd_nnpack_weight_transform(outs)
# conv2d_gemm_weight_transform
@generic_func
def schedule_conv2d_gemm_weight_transform(attrs, outs, target):
"""Schedule conv2d_gemm_weight_transform"""
with target:
return topi.generic.schedule_conv2d_gemm_weight_transform(outs)
# deformable_conv2d
def wrap_compute_deformable_conv2d(topi_compute):
"""wrap deformable_conv2d topi compute"""
def _compute_deformable_conv2d(attrs, inputs, out_dtype):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
deformable_groups = attrs.deformable_groups
groups = attrs.groups
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
inputs[2],
strides,
padding,
dilation,
deformable_groups,
groups,
out_dtype,
)
return [out]
return _compute_deformable_conv2d
@override_native_generic_func("deformable_conv2d_strategy")
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
"""deformable_conv2d generic strategy"""
layout = attrs.data_layout
strategy = _op.OpStrategy()
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_deformable_conv2d_nchw),
name="deformable_conv2d_nchw.generic",
)
elif layout == "NHWC":
# This implementation should never be picked by autotvm
strategy.add_implementation(
wrap_compute_deformable_conv2d(topi.nn.deformable_conv2d_nhwc),
naive_schedule,
name="deformable_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout)
return strategy
# conv2d_transpose
def wrap_compute_conv2d_transpose(topi_compute, has_groups=False, add_layout=False):
"""wrap conv2d_transpose topi compute"""
def compute_conv2d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv2d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
# out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
args = [inputs[0], inputs[1], strides, padding, out_dtype, output_padding]
if add_layout:
args.append(attrs.data_layout)
if has_groups:
args.append(attrs.groups)
out = topi_compute(*args)
return [out]
return compute_conv2d_transpose
@override_native_generic_func("conv2d_transpose_strategy")
def conv2d_transpose_strategy(attrs, inputs, out_type, target):
"""conv2d_transpose generic strategy"""
logger.warning("conv2d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
if groups == 1:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
else: # group_conv2d_transpose
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.group_conv2d_transpose_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_transpose_nchw),
name="group_conv2d_transpose_nchw.generic",
)
return strategy
# conv3d_transpose
def wrap_compute_conv3d_transpose(topi_compute):
"""wrap conv3d_transpose topi compute"""
def compute_conv3d_transpose(attrs, inputs, out_dtype):
"""Compute definition of conv3d_transpose"""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
output_padding = get_const_tuple(attrs.output_padding)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return compute_conv3d_transpose
@override_native_generic_func("conv3d_transpose_strategy")
def conv3d_transpose_strategy(attrs, inputs, out_type, target):
"""conv3d_transpose generic strategy"""
logger.warning("conv3d_transpose is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.nn.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.generic",
)
return strategy
# conv3d
def wrap_compute_conv3d(
topi_compute,
need_layout=False,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=False,
):
"""wrap conv3d topi compute"""
def _compute_conv3d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
(dilation_d, dilation_h, dilation_w) = dilation
if dilation_d < 1 or dilation_h < 1 or dilation_w < 1:
raise ValueError("Dilation should be positive value")
args = [inputs[0], inputs[1], strides, padding, dilation, groups]
if need_layout:
args.append(layout)
args.append(out_dtype)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
elif need_meta_schedule_layout:
args.append("")
args.append(get_meta_schedule_original_shape(attrs))
return [topi_compute(*args)]
return _compute_conv3d
@override_native_generic_func("conv3d_strategy")
def conv3d_strategy(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
logger.warning("conv3d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
wrap_topi_schedule(topi.generic.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.generic",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ndhwc),
wrap_topi_schedule(topi.generic.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.generic",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
# conv3d_winograd_without_weight_transform
@override_native_generic_func("conv3d_winograd_without_weight_transform_strategy")
def conv3d_winograd_without_weight_transform_strategy(attrs, inputs, out_type, target):
"""conv3d_winograd_without_weight_transform generic strategy"""
raise ValueError("No generic implemenation for conv3d_winograd_without_weight_transform")
# conv3d_winograd_weight_transform
@generic_func
def schedule_conv3d_winograd_weight_transform(attrs, outs, target):
"""Schedule conv3d_winograd_weight_transform"""
with target:
return topi.generic.schedule_conv3d_winograd_weight_transform(outs)
# conv1d
def wrap_compute_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [topi_compute(inputs[0], inputs[1], strides, padding, dilation, out_dtype)]
return _compute_conv1d
@override_native_generic_func("conv1d_strategy")
def conv1d_strategy(attrs, inputs, out_type, target):
"""conv1d generic strategy"""
logger.warning("conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_ncw),
name="conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_conv1d_nwc),
name="conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
def wrap_compute_group_conv1d(topi_compute):
"""wrap conv1d topi compute"""
def _compute_group_conv1d(attrs, inputs, out_type):
"""Compute definition of conv1d"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
return [
topi_compute(inputs[0], inputs[1], strides, padding, dilation, attrs.groups, out_dtype)
]
return _compute_group_conv1d
@override_native_generic_func("group_conv1d_strategy")
def group_conv1d_strategy(attrs, inputs, out_type, target):
"""group_conv1d generic strategy"""
logger.warning("group_conv1d is not optimized for this platform.")
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.group_conv1d_ncw),
wrap_topi_schedule(topi.generic.schedule_group_conv1d_ncw),
name="group_conv1d_ncw.generic",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.group_conv1d_nwc),
wrap_topi_schedule(topi.generic.schedule_group_conv1d_nwc),
name="group_conv1d_nwc.generic",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
# conv1d_transpose
def wrap_compute_conv1d_transpose(topi_compute):
"""wrap conv1d_transpose topi compute"""
def _compute_conv1d_tranpsoe(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
output_padding = get_const_tuple(attrs.output_padding)
out = topi_compute(inputs[0], inputs[1], strides, padding, out_dtype, output_padding)
return [out]
return _compute_conv1d_tranpsoe
@override_native_generic_func("conv1d_transpose_strategy")
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
"""conv1d_transpose generic strategy"""
logger.warning("conv1d_transpose is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCW", "conv1d_transpose ncw only supported"
assert dilation == (1,), "conv1d_transpose dilation is not supported"
assert groups == 1, "conv1d_transpose groups == 1 only supported"
strategy.add_implementation(
wrap_compute_conv1d_transpose(topi.nn.conv1d_transpose_ncw),
wrap_topi_schedule(topi.generic.schedule_conv1d_transpose_ncw),
name="conv1d_transpose_ncw.generic",
)
return strategy
# dilation2d
def wrap_compute_dilation2d(topi_compute, need_data_layout=False):
"""Wrap dilation2d topi compute"""
def _compute_dilation2d(attrs, inputs, out_type):
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilations = get_const_tuple(attrs.dilations)
data_layout = attrs.get_str("data_layout")
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
args = [inputs[0], inputs[1], strides, padding, dilations]
if need_data_layout:
args.append(data_layout)
args.append(out_dtype)
return [topi_compute(*args)]
return _compute_dilation2d
@override_native_generic_func("dilation2d_strategy")
def dilation2d_strategy(attrs, inputs, out_type, target):
"""dilation2d_strategy generic strategy"""
logger.warning("dilation2d_strategy is not optimized for this platform.")
strategy = _op.OpStrategy()
dilations = get_const_tuple(attrs.dilations)
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
assert layout in ["NCHW", "NHWC"]
(dilation_h, dilation_w) = dilations
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if layout == "NCHW":
assert kernel_layout == "IHW"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nchw),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nchw),
name="dilation2d_nchw.generic",
)
elif layout == "NHWC":
assert kernel_layout == "HWI"
strategy.add_implementation(
wrap_compute_dilation2d(topi.image.dilation2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_dilation2d_nhwc),
name="dilation2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported dilation2d layout {}".format(layout))
return strategy
def copy_if_identical(tensor_a, tensor_b):
"""
When two inputs to batch_matul or dense are the same tensor, e.g. batch_matmul(x, x),
compilation fails because TE thinks there is only one input tensor x, and doing
cache_read(x) on the same tensor twice results in an error.
To prevent such errors, we make the second tensor be the copy of the first one
when two input tensors are identical.
"""
if tensor_a == tensor_b:
return te.compute(tensor_a.shape, lambda *ind: tensor_a[ind])
return tensor_b
# matmul
def wrap_compute_matmul(
topi_compute,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=False,
):
"""wrap matmul topi compute"""
def _compute_matmul(attrs, inputs, out_type):
"""Compute definition of matmul"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
args = [
inputs[0],
inputs[1],
None,
out_dtype,
attrs.transpose_a,
attrs.transpose_b,
]
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
elif need_meta_schedule_layout:
args.append("")
args.append(get_meta_schedule_original_shape(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_matmul
@override_native_generic_func("matmul_strategy")
def matmul_strategy(attrs, inputs, out_type, target):
"""matmul generic strategy"""
logger.warning("matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_matmul(topi.nn.matmul),
wrap_topi_schedule(topi.generic.schedule_matmul),
name="matmul.generic",
)
return strategy
# dense
def wrap_compute_dense(
topi_compute,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=False,
):
"""wrap dense topi compute"""
def _compute_dense(attrs, inputs, out_type):
"""Compute definition of dense"""
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
args = [inputs[0], inputs[1], None, out_dtype]
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
elif need_meta_schedule_layout:
args.append("")
args.append(get_meta_schedule_original_shape(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_dense
@override_native_generic_func("dense_strategy")
def dense_strategy(attrs, inputs, out_type, target):
"""dense generic strategy"""
logger.warning("dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense.generic",
)
return strategy
@override_native_generic_func("dense_pack_strategy")
def dense_pack_strategy(attrs, inputs, out_type, target):
"""dense_pack generic strategy"""
logger.warning("dense_pack is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense_pack),
wrap_topi_schedule(topi.generic.schedule_dense),
name="dense_pack.generic",
)
return strategy
# batch_matmul
def wrap_compute_batch_matmul(
topi_compute,
*,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=False,
need_out_dtype=False,
):
"""wrap batch_matmul topi compute"""
def _compute_batch_matmul(attrs, inputs, out_type):
args = [inputs[0], inputs[1], out_type.shape]
args.append(out_type.dtype if need_out_dtype else None)
args.append(attrs.transpose_a)
args.append(attrs.transpose_b)
if need_auto_scheduler_layout:
args.append(get_auto_scheduler_rewritten_layout(attrs))
elif need_meta_schedule_layout:
args.append("")
args.append(get_meta_schedule_original_shape(attrs))
args[1] = copy_if_identical(inputs[0], inputs[1])
return [topi_compute(*args)]
return _compute_batch_matmul
@override_native_generic_func("batch_matmul_strategy")
def batch_matmul_strategy(attrs, inputs, out_type, target):
"""batch_matmul generic strategy"""
logger.warning("batch_matmul is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.generic.schedule_batch_matmul),
name="batch_matmul.generic",
)
return strategy
# batch_norm
def wrap_compute_batch_norm(topi_compute):
"""wrap batch_norm topi compute"""
def _compute_batch_norm(attrs, inputs, out_type):
return topi_compute(*inputs, attrs.axis, attrs.epsilon, attrs.center, attrs.scale)
return _compute_batch_norm
@override_native_generic_func("batch_norm_strategy")
def batch_norm_strategy(attrs, inputs, out_type, target):
"""batch_norm generic strategy"""
logger.warning("batch_norm is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_norm(topi.nn.batch_norm),
wrap_topi_schedule(topi.generic.schedule_batch_norm),
name="batch_norm.generic",
)
return strategy
# sparse dense
def wrap_compute_sparse_dense(topi_compute):
"""wrap sparse dense topi compute"""
def _compute_sparse_dense(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["sparse_lhs"])]
return _compute_sparse_dense
@override_native_generic_func("sparse_dense_strategy")
def sparse_dense_strategy(attrs, inputs, out_type, target):
"""sparse dense generic strategy"""
logger.warning("sparse dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.generic.schedule_sparse_dense),
name="sparse_dense.generic",
)
return strategy
@override_native_generic_func("sparse_dense_padded_strategy")
def sparse_dense_padded_strategy(attrs, inputs, out_type, target):
"""sparse dense padded generic strategy"""
raise NotImplementedError("sparse_dense_padded is only implemented for cuda")
# sparse_add
def wrap_compute_sparse_add(topi_compute):
"""wrap sparse add topi compute"""
def _compute_sparse_add(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3])]
return _compute_sparse_add
@override_native_generic_func("sparse_add_strategy")
def sparse_add_strategy(attrs, inputs, out_type, target):
"""sparse add generic strategy"""
logger.warning("sparse add is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_add(topi.nn.sparse_add),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sparse_add.generic",
)
return strategy
# sparse_transpose
@generic_func
def schedule_sparse_transpose(attrs, outs, target):
"""schedule sparse_transpose"""
with target:
return topi.generic.schedule_sparse_transpose(outs)
# sparse conv2d
def wrap_compute_sparse_conv2d(topi_compute):
"""wrap sparse conv2d topi compute"""
def _compute_sparse_conv2d(attrs, inputs, out_type):
return [topi_compute(inputs[0], inputs[1], inputs[2], inputs[3], attrs["layout"])]
return _compute_sparse_conv2d
@override_native_generic_func("sparse_conv2d_strategy")
def sparse_conv2d_strategy(attrs, inputs, out_type, target):
"""sparse conv2d generic strategy"""
logger.warning("sparse conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_conv2d(topi.nn.sparse_conv2d),
wrap_topi_schedule(topi.generic.schedule_sparse_conv2d),
name="sparse_conv2d.generic",
)
return strategy
# sort
def wrap_compute_sort(topi_compute):
"""Wrap sort topi compute"""
def _compute_sort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend)]
return _compute_sort
@override_native_generic_func("sort_strategy")
def sort_strategy(attrs, inputs, out_type, target):
"""sort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.sort),
wrap_topi_schedule(topi.generic.schedule_sort),
name="sort.generic",
)
return strategy
# argsort
def wrap_compute_argsort(topi_compute):
"""Wrap argsort topi compute"""
def _compute_argsort(attrs, inputs, _):
axis = get_const_int(attrs.axis)
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
return [topi_compute(inputs[0], axis=axis, is_ascend=is_ascend, dtype=dtype)]
return _compute_argsort
@override_native_generic_func("argsort_strategy")
def argsort_strategy(attrs, inputs, out_type, target):
"""argsort generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.argsort),
wrap_topi_schedule(topi.generic.schedule_argsort),
name="argsort.generic",
)
return strategy
# topk
def wrap_compute_topk(topi_compute):
"""Wrap topk compute"""
def _compute_topk(attrs, inputs, out_type):
if attrs.k is not None:
k = attrs.k
else:
k = inputs[1]
axis = get_const_int(attrs.axis)
ret_type = attrs.ret_type
is_ascend = bool(get_const_int(attrs.is_ascend))
dtype = attrs.dtype
out = topi_compute(inputs[0], k, axis, ret_type, is_ascend, dtype)
out = out if isinstance(out, list) else [out]
return out
return _compute_topk
@override_native_generic_func("topk_strategy")
def topk_strategy(attrs, inputs, out_type, target):
"""topk generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.topk),
wrap_topi_schedule(topi.generic.schedule_topk),
name="topk.generic",
)
return strategy
# searchsorted
def wrap_compute_searchsorted(topi_compute):
"""Wrap searchsorted compute"""
def _compute_searchsorted(attrs, inputs, out_type):
right = attrs.right
dtype = attrs.dtype
return [topi_compute(inputs[0], inputs[1], right, dtype)]
return _compute_searchsorted
# searchsorted_strategy
@override_native_generic_func("searchsorted_strategy")
def searchsorted_strategy(attrs, inputs, out_type, target):
"""searchsorted generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_searchsorted(topi.searchsorted),
wrap_topi_schedule(topi.generic.schedule_extern),
name="searchsorted.generic",
)
return strategy
# multibox_prior
def wrap_compute_multibox_prior(topi_compute):
"""Wrap multibox_prior compute"""
def _compute_multibox_prior(attrs, inputs, _):
"""Compute definition of multibox_prior"""
sizes = get_float_tuple(attrs.sizes)
ratios = get_float_tuple(attrs.ratios)
steps = get_float_tuple(attrs.steps)
offsets = get_float_tuple(attrs.offsets)
clip = bool(get_const_int(attrs.clip))
return [topi_compute(inputs[0], sizes, ratios, steps, offsets, clip)]
return _compute_multibox_prior
@override_native_generic_func("multibox_prior_strategy")
def multibox_prior_strategy(attrs, inputs, out_type, target):
"""multibox_prior generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_prior(topi.vision.ssd.multibox_prior),
wrap_topi_schedule(topi.generic.schedule_multibox_prior),
name="multibox_prior.generic",
)
return strategy
# multibox_transform_loc
def wrap_compute_multibox_transform_loc(topi_compute):
"""Wrap multibox_transform_loc compute"""
def _compute_multibox_transform_loc(attrs, inputs, _):
"""Compute definition of multibox_detection"""
clip = bool(get_const_int(attrs.clip))
threshold = get_const_float(attrs.threshold)
variances = get_float_tuple(attrs.variances)
return topi_compute(inputs[0], inputs[1], inputs[2], clip, threshold, variances)
return _compute_multibox_transform_loc
@override_native_generic_func("multibox_transform_loc_strategy")
def multibox_transform_loc_strategy(attrs, inputs, out_type, target):
"""schedule multibox_transform_loc"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multibox_transform_loc(topi.vision.ssd.multibox_transform_loc),
wrap_topi_schedule(topi.generic.schedule_multibox_transform_loc),
name="multibox_transform_loc.generic",
)
return strategy
# get_valid_counts
def wrap_compute_get_valid_counts(topi_compute):
"""wrap get_valid_counts topi compute"""
def _compute_get_valid_counts(attrs, inputs, out_type):
score_threshold = inputs[1]
id_index = get_const_int(attrs.id_index)
score_index = get_const_int(attrs.score_index)
if attrs.score_threshold is not None:
score_threshold = get_const_float(attrs.score_threshold)
return topi_compute(inputs[0], score_threshold, id_index, score_index)
return _compute_get_valid_counts
@override_native_generic_func("get_valid_counts_strategy")
def get_valid_counts_strategy(attrs, inputs, out_type, target):
"""get_valid_counts generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_get_valid_counts(topi.vision.get_valid_counts),
wrap_topi_schedule(topi.generic.schedule_get_valid_counts),
name="get_valid_counts.generic",
)
return strategy
# non-maximum suppression
def wrap_compute_nms(topi_compute):
"""wrap nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[3]
iou_threshold = inputs[4]
return_indices = bool(get_const_int(attrs.return_indices))
force_suppress = bool(get_const_int(attrs.force_suppress))
top_k = get_const_int(attrs.top_k)
coord_start = get_const_int(attrs.coord_start)
score_index = get_const_int(attrs.score_index)
id_index = get_const_int(attrs.id_index)
invalid_to_bottom = bool(get_const_int(attrs.invalid_to_bottom))
if return_indices:
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
]
return _compute_nms
@override_native_generic_func("non_max_suppression_strategy")
def nms_strategy(attrs, inputs, out_type, target):
"""nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_nms(topi.vision.non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="nms.generic",
)
return strategy
def wrap_compute_all_class_nms(topi_compute):
"""wrap all class nms topi compute"""
def _compute_nms(attrs, inputs, out_type):
max_output_size = inputs[2]
iou_threshold = inputs[3]
score_threshold = inputs[4]
output_format = attrs.output_format
return topi_compute(
inputs[0],
inputs[1],
max_output_size,
iou_threshold,
score_threshold,
output_format,
)
return _compute_nms
@override_native_generic_func("all_class_non_max_suppression_strategy")
def all_class_nms_strategy(attrs, inputs, out_type, target):
"""all class nms generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_all_class_nms(topi.vision.all_class_non_max_suppression),
wrap_topi_schedule(topi.generic.schedule_nms),
name="all_class_nms.generic",
)
return strategy
# roi_align
def wrap_compute_roi_align(topi_compute):
"""wrap roi_align topi compute"""
def _compute_roi_align(attrs, inputs, out_type):
pooled_size = get_const_tuple(attrs.pooled_size)
mode = bytes(attrs.mode, "utf-8")
return [
topi_compute(
inputs[0],
inputs[1],
pooled_size=pooled_size,
spatial_scale=attrs.spatial_scale,
sample_ratio=attrs.sample_ratio,
mode=mode,
)
]
return _compute_roi_align
@override_native_generic_func("roi_align_strategy")
def roi_align_strategy(attrs, inputs, out_type, target):
"""roi_align generic strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
else:
assert layout == "NHWC", "layout must be NCHW or NHWC."
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nhwc),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.generic",
)
return strategy
# sparse_fill_empty_rows
@override_native_generic_func("sparse_fill_empty_rows_strategy")
def sparse_fill_empty_rows_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_fill_empty_rows(topi.sparse_fill_empty_rows),
wrap_topi_schedule(topi.generic.schedule_sparse_fill_empty_rows),
name="sparse_fill_empty_rows.generic",
)
return strategy
def wrap_compute_sparse_fill_empty_rows(topi_compute):
"""Wrap sparse_fill_empty_rows compute"""
def _compute_sparse_fill_empty_rows(attrs, inputs, output_type):
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
inputs[3],
output_type.fields[0].shape,
output_type.fields[1].shape,
output_type.fields[2].shape,
)
return _compute_sparse_fill_empty_rows
# sparse_reshape
@override_native_generic_func("sparse_reshape_strategy")
def sparse_reshape_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_reshape(topi.sparse_reshape),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sparse_reshape.generic",
)
return strategy
def wrap_compute_sparse_reshape(topi_compute):
"""Wrap sparse_reshape compute"""
def _compute_sparse_reshape(attrs, inputs, output_type):
return topi_compute(
inputs[0],
inputs[1],
inputs[2],
output_type.fields[0].shape,
output_type.fields[1].shape,
)
return _compute_sparse_reshape
# stft
@override_native_generic_func("stft_strategy")
def stft_strategy(attrs, outs, out_type, target):
"""stft generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_stft(topi.stft),
wrap_topi_schedule(topi.generic.schedule_extern),
name="stft.generic",
)
return strategy
def wrap_compute_stft(topi_compute):
"""Wrap stft compute"""
def _compute_stft(attrs, inputs, output_type):
return [
topi_compute(
inputs[0],
attrs.n_fft,
attrs.hop_length,
attrs.win_length,
inputs[1],
attrs.normalized,
attrs.onesided,
output_type.shape,
)
]
return _compute_stft
# trilu
@override_native_generic_func("trilu_strategy")
def trilu_strategy(attrs, outs, out_type, target):
"""trilu generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_trilu(topi.trilu),
wrap_topi_schedule(topi.generic.schedule_extern),
name="trilu.generic",
)
return strategy
def wrap_compute_trilu(topi_compute):
"""Wrap trilu compute"""
def _compute_trilu(attrs, inputs, output_type):
return [
topi_compute(
inputs[0],
inputs[1],
attrs.upper,
)
]
return _compute_trilu
# roi_pool
@generic_func
def schedule_roi_pool(attrs, outs, target):
"""schedule roi_pool"""
with target:
return topi.generic.schedule_roi_pool(outs)
# proposal
def wrap_compute_proposal(topi_compute):
"""wrap proposal topi compute"""
def _compute_proposal(attrs, inputs, out_type):
scales = get_float_tuple(attrs.scales)
ratios = get_float_tuple(attrs.ratios)
feature_stride = attrs.feature_stride
threshold = attrs.threshold
rpn_pre_nms_top_n = attrs.rpn_pre_nms_top_n
rpn_post_nms_top_n = attrs.rpn_post_nms_top_n
rpn_min_size = attrs.rpn_min_size
iou_loss = bool(get_const_int(attrs.iou_loss))
return [
topi_compute(
inputs[0],
inputs[1],
inputs[2],
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
]
return _compute_proposal
@override_native_generic_func("proposal_strategy")
def proposal_strategy(attrs, inputs, out_type, target):
"""proposal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_proposal(topi.vision.rcnn.proposal),
wrap_topi_schedule(topi.generic.schedule_proposal),
name="proposal.generic",
)
return strategy
# scatter
@override_native_generic_func("scatter_strategy")
def scatter_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter.generic",
)
return strategy
def wrap_compute_scatter(topi_compute):
"""Wrap scatter topi compute"""
def _compute_scatter(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], attrs.axis)]
return _compute_scatter
@override_native_generic_func("scatter_add_strategy")
def scatter_add_strategy(attrs, outs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.scatter_add),
wrap_topi_schedule(topi.generic.schedule_scatter),
name="scatter_add.generic",
)
return strategy
# scatter_nd
@override_native_generic_func("scatter_nd_strategy")
def scatter_nd_strategy(attrs, inputs, out_type, target):
"""scatter_nd generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.generic",
)
return strategy
def wrap_compute_scatter_nd(topi_compute):
"""Wrap scatter_nd topi compute"""
def _compute_scatter_nd(attrs, inputs, _):
return [topi_compute(inputs[0], inputs[1], inputs[2], attrs.mode)]
return _compute_scatter_nd
# bitserial_conv2d
def wrap_compute_bitserial_conv2d(topi_compute):
"""wrap bitserial_conv2d topi compute"""
def compute_bitserial_conv2d(attrs, inputs, out_dtype):
"""Compute definition for bitserial conv2d."""
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
activation_bits = attrs.activation_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0],
inputs[1],
strides,
padding,
activation_bits,
weight_bits,
pack_dtype,
out_dtype,
unipolar,
)
]
return compute_bitserial_conv2d
@override_native_generic_func("bitserial_conv2d_strategy")
def bitserial_conv2d_strategy(attrs, inputs, out_type, target):
"""bitserial_conv2d generic strategy"""
logger.warning("bitserial_conv2d is not optimized for this platform.")
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.generic",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.generic",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
# bitserial_dense
def wrap_compute_bitserial_dense(topi_compute):
"""wrap bitserial_dense topi compute"""
def compute_bitserial_dense(attrs, inputs, out_type):
"""Compute definition of bitserial dense"""
data_bits = attrs.data_bits
weight_bits = attrs.weight_bits
pack_dtype = attrs.pack_dtype
out_dtype = attrs.out_dtype
out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype
unipolar = attrs.unipolar
return [
topi_compute(
inputs[0], inputs[1], data_bits, weight_bits, pack_dtype, out_dtype, unipolar
)
]
return compute_bitserial_dense
@override_native_generic_func("bitserial_dense_strategy")
def bitserial_dense_strategy(attrs, inputs, out_type, target):
"""bitserial_dense generic strategy"""
logger.warning("bitserial_dense is not optimized for this platform.")
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.nn.bitserial_dense),
wrap_topi_schedule(topi.generic.schedule_bitserial_dense),
name="bitserial_dense.generic",
)
return strategy
# correlation
def wrap_compute_correlation(topi_compute):
"""wrap correlation topi compute"""
def _compute_correlation(attrs, inputs, out_type):
kernel_size = attrs.kernel_size
max_displacement = attrs.max_displacement
stride1 = attrs.stride1
stride2 = attrs.stride2
padding = get_const_tuple(attrs.padding)
is_multiply = attrs.is_multiply
return [
topi_compute(
inputs[0],
inputs[1],
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
]
return _compute_correlation
@override_native_generic_func("correlation_strategy")
def correlation_strategy(attrs, inputs, out_type, target):
"""correlation generic strategy"""
logger.warning("correlation is not optimized for this platform.")
layout = attrs.layout
assert layout == "NCHW", "Only support NCHW layout"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_correlation(topi.nn.correlation_nchw),
wrap_topi_schedule(topi.generic.schedule_correlation_nchw),
name="correlation.generic",
)
return strategy
# argwhere
def wrap_compute_argwhere(topi_compute):
"""wrap argwhere topi compute"""
def _compute_argwhere(attrs, inputs, out_type):
output_shape = []
for s in out_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
output_shape.append(te.var("any_dim", "int32"))
new_output_type = ir.TensorType(output_shape, "int32")
return [topi_compute(new_output_type, inputs[0])]
return _compute_argwhere
@override_native_generic_func("argwhere_strategy")
def argwhere_strategy(attrs, inputs, out_type, target):
"""argwhere generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argwhere(topi.argwhere),
wrap_topi_schedule(topi.generic.schedule_argwhere),
name="argwhere.generic",
)
return strategy
# threefry_generate
def wrap_compute_threefry_generate(topi_compute):
"""Wrap threefry_generate topi compute"""
def _compute_threefry_generate(attrs, inputs, _):
return topi_compute(inputs[0], attrs.out_shape)
return _compute_threefry_generate
@override_native_generic_func("threefry_generate_strategy")
def threefry_generate_strategy(attrs, inputs, out_type, target):
"""threefry_generate generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_threefry_generate(topi.random.threefry_generate),
wrap_topi_schedule(topi.generic.schedule_extern),
name="threefry_generate.generic",
)
return strategy
# threefry_split
def wrap_compute_threefry_split(topi_compute):
"""Wrap threefry_split topi compute"""
def _compute_threefry_split(attrs, inputs, _):
return topi_compute(inputs[0])
return _compute_threefry_split
@override_native_generic_func("threefry_split_strategy")
def threefry_split_strategy(attrs, inputs, out_type, target):
"""threefry_split generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_threefry_split(topi.random.threefry_split),
wrap_topi_schedule(topi.generic.schedule_extern),
name="threefry_split.generic",
)
return strategy
# uniform
def wrap_compute_uniform(topi_compute):
"""Wrap uniform topi compute"""
def _compute_uniform(attrs, inputs, _):
return list(topi_compute(inputs[0], inputs[1], inputs[2], attrs.out_shape, attrs.out_dtype))
return _compute_uniform
@override_native_generic_func("uniform_strategy")
def uniform_strategy(attrs, inputs, out_type, target):
"""uniform generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_uniform(topi.random.uniform),
wrap_topi_schedule(topi.generic.schedule_extern),
name="uniform.generic",
)
return strategy
# multinomial
def wrap_compute_multinomial(topi_compute):
"""Wrap multinomial topi compute"""
def _compute_multinomial(attrs, inputs, _):
return list(topi_compute(inputs[0], inputs[1], attrs.num_samples))
return _compute_multinomial
# sliding_window
def wrap_compute_sliding_window():
"""Wrap sliding_window topi compute"""
def _compute_sliding_window(attrs, inputs, _):
return [topi.sliding_window(inputs[0], attrs.axis, attrs.window_shape, attrs.strides)]
return _compute_sliding_window
@override_native_generic_func("sliding_window_strategy")
def sliding_window_strategy(attrs, inputs, out_type, target):
"""sliding_window generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sliding_window(),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sliding_window.generic",
)
return strategy
@override_native_generic_func("normal_strategy")
def normal_strategy(attrs, inputs, out_type, target):
"""normal generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_uniform(topi.random.normal),
wrap_topi_schedule(topi.generic.schedule_extern),
name="normal.generic",
)
return strategy
@override_native_generic_func("multinomial_strategy")
def multinomial_strategy(attrs, inputs, out_type, target):
"""multinomial generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_multinomial(topi.random.multinomial),
wrap_topi_schedule(topi.generic.schedule_extern),
name="multinomial.generic",
)
return strategy
def wrap_compute_scanop(topi_compute):
"""Wrap scanop style topi compute"""
def _compute_scanop(attrs, inputs, _):
return [topi_compute(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)]
return _compute_scanop
def wrap_compute_concat(topi_compute):
"""Wrap concatenate topi compute"""
def _compute_concat(attrs, inputs, _):
return [topi_compute(inputs, attrs.axis)]
return _compute_concat
@override_native_generic_func("cumsum_strategy")
def cumsum_strategy(attrs, inputs, out_type, target):
"""cumsum generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cumsum),
wrap_topi_schedule(topi.generic.schedule_extern),
name="cumsum.generic",
)
return strategy
@override_native_generic_func("concat_strategy")
def concatenate_strategy(attrs, inputs, out_type, target):
"""concatenate generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_concat(topi.concatenate),
wrap_topi_schedule(topi.generic.schedule_injective),
name="concatenate",
)
return strategy
@override_native_generic_func("cumprod_strategy")
def cumprod_strategy(attrs, inputs, out_type, target):
"""cumprod generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scanop(topi.cumprod),
wrap_topi_schedule(topi.generic.schedule_extern),
name="cumprod.generic",
)
return strategy
def wrap_compute_unique(topi_compute):
"""Wrap unique topi compute"""
def _compute_unique(attrs, inputs, _):
return topi_compute(inputs[0], attrs.sorted, attrs.return_counts)
return _compute_unique
@override_native_generic_func("unique_strategy")
def unique_strategy(attrs, inputs, out_type, target):
"""unique generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_unique(topi.unique),
wrap_topi_schedule(topi.generic.schedule_unique),
name="unique.generic",
)
return strategy
@generic_func
def schedule_transpose(attrs, outs, target):
"""schedule transpose"""
with target:
return schedule_injective(attrs, outs, target)
# invert_permutation
def wrap_compute_invert_permutation(topi_compute):
"""wrap invert_permutation topi compute"""
def _compute_invert_permutation(attrs, inputs, out_type):
return [topi_compute(inputs[0])]
return _compute_invert_permutation
@override_native_generic_func("invert_permutation_strategy")
def invert_permutation_strategy(attrs, inputs, out_type, target):
"""invert_permutation generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_invert_permutation(topi.invert_permutation),
wrap_topi_schedule(topi.generic.schedule_injective),
name="invert_permutation.generic",
)
return strategy
def wrap_compute_einsum(topi_compute):
"""Wrap einsum topi compute"""
def _compute_einsum(attrs, inputs, _):
return [topi_compute(attrs.equation, *inputs)]
return _compute_einsum
@override_native_generic_func("einsum_strategy")
def einsum_strategy(attrs, inputs, out_type, target):
"""einsum generic strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_einsum(topi.einsum),
wrap_topi_schedule(topi.generic.schedule_einsum),
name="einsum.generic",
)
return strategy
# conv2d_backward_weight
def wrap_compute_conv2d_backward_weight(topi_compute):
"""wrap conv2d_backward_weight topi compute"""
def _compute_conv2d_backward_weight(attrs, inputs, out_dtype):
kernel_size = get_const_tuple(attrs.kernel_size)
padding = get_const_tuple(attrs.padding)
strides = get_const_tuple(attrs.strides)
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
out_dtype = attrs.out_dtype
layout = attrs.data_layout
out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype
out = topi_compute(
inputs[0],
inputs[1],
kernel_size,
padding,
strides,
dilation,
groups,
layout,
out_dtype,
)
return [out]
return _compute_conv2d_backward_weight
@override_native_generic_func("conv2d_backward_weight_strategy")
def conv2d_backward_weight_strategy(attrs, inputs, out_type, target):
"""wgrad generic strategy"""
raise RuntimeError(
"conv2d_backward_weight is currently only supported with cudnn. "
"Please run Legalize pass to decompose this op into supported ops."
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of Hexagon operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
# --- Op strategy registration
@batch_matmul_strategy.register("hexagon")
def batch_matmul_strategy_hexagon(attrs, inputs, out_type, target):
"""batch_matmul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_matmul(topi.nn.batch_matmul, need_out_dtype=True),
wrap_topi_schedule(topi.hexagon.schedule_batch_matmul),
name="batch_matmul.hexagon",
)
return strategy
@concatenate_strategy.register("hexagon")
def concatenate_strategy_hexagon(attrs, inputs, out_type, target):
"""concatenate strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_concat(topi.concatenate),
wrap_topi_schedule(topi.hexagon.schedule_injective),
name="concatenate.hexagon",
)
return strategy
@conv2d_strategy.register("hexagon")
def conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""Conv2d strategy for Hexagon"""
strategy = _op.OpStrategy()
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
groups = attrs.groups
data, kernel = inputs
layout = attrs.data_layout
if groups == 1:
if data_layout == "NHWC" and kernel_layout == "HWIO":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nhwc),
name="conv2d_nhwc.hexagon",
)
elif data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_nchw),
name="conv2d_nchw.hexagon",
)
else:
raise RuntimeError(
f"Unsupported layouts: data_layout:{data_layout}, kernel_layout:{kernel_layout}, "
f"groups:{attrs.groups}"
)
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.hexagon.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.hexagon",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.hexagon.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.hexagon",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
raise RuntimeError(f"Unsupported group_conv2d layout {layout}")
return strategy
@dense_strategy.register("hexagon")
def dense_strategy_hexagon(attrs, inputs, out_type, target):
"""Dense strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.hexagon.schedule_dense),
name="dense.hexagon",
)
return strategy
@softmax_strategy.register("hexagon")
def softmax_strategy_hexagon(attrs, inputs, out_type, target):
"""Softmax strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.hexagon.schedule_softmax),
name="softmax.hexagon",
)
return strategy
@conv2d_transpose_strategy.register("hexagon")
def conv2d_transpose_strategy_hexagon(attrs, inputs, out_type, target):
"""conv2d_transpose hexagon strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
if groups == 1:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.generic",
)
else:
raise RuntimeError("Unsupported conv2d_transpose layout {}".format(layout))
return strategy
# --- Op schedule registration
@schedule_adaptive_pool.register("hexagon")
def schedule_adaptive_pool_hexagon(attrs, outs, target):
"""Schedule adaptive pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_adaptive_pool(outs)
@schedule_injective.register("hexagon")
def schedule_injective_hexagon(attrs, outs, target):
"""Schedule injective ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)
@schedule_concatenate.register("hexagon")
def schedule_concatenate_hexagon(attrs, outs, target):
"""Schedule concatenate ops for Hexagon"""
with target:
return topi.hexagon.schedule_injective(outs)
@schedule_pad.register("hexagon")
def schedule_pad_hexagon(attrs, outs, target):
"""Schedule pad ops for Hexagon"""
with target:
return topi.hexagon.schedule_pad(outs)
@schedule_pool.register("hexagon")
def schedule_pool_hexagon(attrs, outs, target):
"""Schedule pool ops for Hexagon"""
with target:
return topi.hexagon.schedule_pool(outs)
@schedule_reduce.register("hexagon")
def schedule_reduce_hexagon(attrs, outs, target):
"""Schedule reduction ops for Hexagon"""
with target:
return topi.hexagon.schedule_reduce(outs)
@conv2d_NCHWc_strategy.register("hexagon")
def conv2d_NCHWc_strategy_hexagon(attrs, inputs, out_type, target):
"""conv2d_NCHWc_ hexagon strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(
topi.hexagon.conv2d_NCHWc_int8, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.hexagon.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.hexagon",
)
return strategy
@dense_pack_strategy.register("hexagon")
def dense_pack_strategy_hexagon(attrs, inputs, out_type, target):
"""dense_pack hexagon strategy"""
strategy = _op.OpStrategy()
if (
inputs[0].dtype == "uint8"
and inputs[1].dtype == "uint8"
and out_type.dtype == "int32"
and attrs["weight_layout"] == "NC32n4c"
):
strategy.add_implementation(
wrap_compute_dense(topi.hexagon.dense.dense_u8u8i32_vrmpy_compute),
wrap_topi_schedule(topi.hexagon.dense.dense_u8u8i32_vrmpy_schedule),
name="dense_uint8.hexagon",
plevel=12,
)
return strategy
@fast_softmax_strategy.register("hexagon")
def fast_softmax_strategy_cpu(attrs, inputs, out_type, target):
"""fast_softmax hexagon strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.fast_softmax),
wrap_topi_schedule(topi.hexagon.schedule_softmax),
name="fast_softmax.hexagon",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/hls.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of HLS operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
@schedule_injective.register("hls")
def schedule_injective_hls(attrs, outs, target):
"""schedule injective ops for hls"""
with target:
return topi.hls.schedule_injective(outs)
@schedule_reduce.register("hls")
def schedule_reduce_hls(attrs, outs, target):
"""schedule reduction ops for hls"""
with target:
return topi.hls.schedule_reduce(outs)
@schedule_concatenate.register("hls")
def schedule_concatenate_hls(attrs, outs, target):
"""schedule concatenate for hls"""
with target:
return topi.hls.schedule_injective(outs)
@schedule_pool.register("hls")
def schedule_pool_hls(attrs, outs, target):
"""schedule pooling ops for hls"""
with target:
return topi.hls.schedule_pool(outs, attrs.layout)
@schedule_adaptive_pool.register("hls")
def schedule_adaptive_pool_hls(attrs, outs, target):
"""schedule adaptive pooling ops for hls"""
with target:
return topi.hls.schedule_adaptive_pool(outs)
@softmax_strategy.register("hls")
def softmax_strategy_hls(attrs, inputs, out_type, target):
"""softmax hls strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.hls.schedule_softmax),
name="softmax.hls",
)
return strategy
@log_softmax_strategy.register("hls")
def log_softmax_strategy_hls(attrs, inputs, out_type, target):
"""log_softmax hls strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.hls.schedule_softmax),
name="log_softmax.hls",
)
return strategy
@conv2d_strategy.register("hls")
def conv2d_strategy_hls(attrs, inputs, out_type, target):
"""conv2d hls strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
(dilation_h, dilation_w) = dilation
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nchw),
wrap_topi_schedule(topi.hls.schedule_conv2d_nchw),
name="conv2d_nchw.hls",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_nhwc),
wrap_topi_schedule(topi.hls.schedule_conv2d_nhwc),
name="conv2d_nhwc.hls",
)
else:
raise RuntimeError("Unsupported conv2d layout {}".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.hls.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.hls",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.hls.schedule_depthwise_conv2d_nhwc),
name="depthwise_nhwc.hls",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for hls")
return strategy
@conv2d_NCHWc_strategy.register("hls")
def conv2d_NCHWc_strategy_hls(attrs, inputs, out_type, target):
"""conv2d_NCHWc hls strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_NCHWc, need_data_layout=True, need_out_layout=True),
wrap_topi_schedule(topi.hls.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.hls",
)
return strategy
@conv2d_transpose_strategy.register("hls")
def conv2d_transpose_strategy_hls(attrs, inputs, out_type, target):
"""conv2d_transpose hls strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.conv2d_transpose_nchw),
wrap_topi_schedule(topi.hls.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.hls",
)
return strategy
@dense_strategy.register("hls")
def dense_strategy_hls(attrs, inputs, out_type, target):
"""dense hls strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense),
wrap_topi_schedule(topi.hls.schedule_dense),
name="dense.hls",
)
return strategy
@bitserial_conv2d_strategy.register("hls")
def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target):
"""bitserial_conv2d hls strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.hls",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.nn.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.hls.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.hls",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/intel_graphics.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of x86 operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from .. import op as _op
@conv2d_strategy.register("intel_graphics")
def conv2d_strategy_intel_graphics(attrs, inputs, out_type, target):
"""conv2d intel graphics strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.conv2d_nchw),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_nchw),
name="conv2d_nchw.intel_graphics",
)
# conv2d_NCHWc won't work without alter op layout pass
# TODO(@Laurawly): fix this
strategy.add_implementation(
wrap_compute_conv2d(
topi.intel_graphics.conv2d_NCHWc, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.intel_graphics",
plevel=5,
)
else:
raise RuntimeError("Unsupported conv2d layout {} for intel graphics".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.intel_graphics.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.intel_graphics.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.intel_graphics",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for intel graphics")
return strategy
@conv2d_NCHWc_strategy.register("intel_graphics")
def conv2d_NCHWc_strategy_intel_graphics(attrs, inputs, out_type, target):
"""conv2d_NCHWc intel_graphics strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(
topi.intel_graphics.conv2d_NCHWc, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.intel_graphics.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.intel_graphics",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/mali.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of mali operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
from tvm import topi
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.meta_schedule import is_meta_schedule_enabled
from .. import op as _op
from .generic import *
@conv2d_strategy.register("mali")
def conv2d_strategy_mali(attrs, inputs, out_type, target):
"""conv2d mali strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
stride_h, stride_w = attrs.get_int_tuple("strides")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
if kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.mali",
)
# check if winograd algorithm is applicable
_, _, kh, kw = get_const_tuple(kernel.shape)
if (
kh == 3
and kw == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
):
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.mali",
plevel=5,
)
elif re.match(r"OIHW\d*o", kernel_layout):
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.mali",
)
else:
raise RuntimeError(
"Unsupported weight layout {} for conv2d NCHW".format(kernel_layout)
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if need_auto_scheduler_layout or need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_nhwc,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule,
name="conv2d_nhwc.mali",
)
is_winograd_applicable = False
if len(kernel.shape) == 4:
kernel_h, kernel_w, _, _ = get_const_tuple(kernel.shape)
is_winograd_applicable = (
"float" in data.dtype
and "float" in kernel.dtype
and kernel_h == 3
and kernel_w == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
)
if is_winograd_applicable:
if need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=True,
),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
elif need_auto_scheduler_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc,
need_auto_scheduler_layout=True,
need_meta_schedule_layout=False,
),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
else:
raise RuntimeError("Both AutoScheduler and MetaSchedule are not enabled")
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nhwc_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nhwc_spatial_pack),
name="conv2d_nhwc_spatial_pack.mali",
)
else:
raise RuntimeError("Unsupported conv2d layout {} for mali".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.mali.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.mali",
)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
if is_auto_scheduler_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
naive_schedule,
name="depthwise_conv2d_nhwc.mali",
)
elif is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
naive_schedule,
name="depthwise_conv2d_nhwc.mali",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.mali.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.mali",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {} for mali".format(layout))
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for mali")
return strategy
@conv2d_winograd_without_weight_transform_strategy.register("mali")
def conv2d_winograd_without_weight_transform_strategy_mali(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform mali strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
strides = attrs.get_int_tuple("strides")
kernel = inputs[1]
assert dilation == (1, 1), "Do not support dilate now"
assert strides == (1, 1), "Do not support strides now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
assert len(kernel.shape) == 5, "Kernel must be packed into 5-dim"
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.mali",
)
elif layout == "NHWC":
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if need_auto_scheduler_layout or need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc_without_weight_transform,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc_winograd_without_weight_transform",
plevel=15,
)
else:
raise RuntimeError(
"Winograd conv2d NHWC is not enabled for mali without auto_scheduler."
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@dense_strategy.register("mali")
def dense_strategy_mali(attrs, inputs, out_type, target):
"""dense mali strategy"""
strategy = _op.OpStrategy()
if is_auto_scheduler_enabled():
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense, need_auto_scheduler_layout=True),
naive_schedule,
name="dense.mali",
)
elif is_meta_schedule_enabled():
strategy.add_implementation(
wrap_compute_dense(topi.nn.dense, need_meta_schedule_layout=True),
naive_schedule,
name="dense.mali",
)
else:
strategy.add_implementation(
wrap_compute_dense(topi.mali.dense),
wrap_topi_schedule(topi.mali.schedule_dense),
name="dense.mali",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/rocm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of ROCm operator strategy."""
# pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import
from tvm import topi
from tvm.te import SpecializedCondition
from tvm.contrib.thrust import can_use_rocthrust
from tvm.contrib import miopen
from .generic import *
from .. import op as _op
from .cuda import batch_matmul_strategy_cuda, conv2d_strategy_cuda, dense_strategy_cuda
@conv2d_strategy.register("rocm")
def conv2d_strategy_rocm(attrs, inputs, out_type, target):
"""conv2d rocm strategy"""
groups = attrs.groups
layout = attrs.data_layout
padding = attrs.get_int_tuple("padding")
strategy = conv2d_strategy_cuda(attrs, inputs, out_type, target)
# add miopen implementation
if (
"miopen" in target.libs
and groups == 1
and layout == "NCHW"
and padding[0] == padding[2]
and padding[1] == padding[3]
):
strategy.add_implementation(
wrap_compute_conv2d(topi.rocm.conv2d_nchw_miopen, need_data_layout=True),
wrap_topi_schedule(topi.rocm.schedule_conv2d_nchw_miopen),
name="conv2d_nchw_miopen.rocm",
plevel=50,
)
return strategy
@dense_strategy.register("rocm")
def dense_strategy_rocm(attrs, inputs, out_type, target):
"""Dense strategy for ROCM"""
assert len(inputs[0].shape) == 2 and len(inputs[1].shape) == 2, "Only support 2-dim dense"
strategy = dense_strategy_cuda(attrs, inputs, out_type, target)
if target.kind.name == "rocm" and "rocblas" in target.libs:
assert out_type.dtype == inputs[0].dtype, "Mixed precision not supported."
strategy.add_implementation(
wrap_compute_dense(topi.rocm.dense_rocblas),
wrap_topi_schedule(topi.rocm.schedule_dense_rocblas),
name="dense_rocblas.rocm",
plevel=15,
)
return strategy
@batch_matmul_strategy.register("rocm")
def batch_matmul_strategy_rocm(attrs, inputs, out_type, target):
"""Batch matmul strategy for ROCM"""
strategy = batch_matmul_strategy_cuda(attrs, inputs, out_type, target)
if target.kind.name == "rocm" and "rocblas" in target.libs:
assert out_type.dtype == inputs[0].dtype, "Mixed precision not supported."
strategy.add_implementation(
wrap_compute_batch_matmul(topi.rocm.batch_matmul_rocblas),
wrap_topi_schedule(topi.rocm.schedule_batch_matmul_rocblas),
name="batch_matmul_rocblas.rocm",
plevel=12,
)
return strategy
@argsort_strategy.register(["rocm"])
def argsort_strategy_cuda(attrs, inputs, out_type, target):
"""argsort rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort_thrust),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort_thrust.rocm",
plevel=15,
)
return strategy
@scatter_strategy.register(["rocm"])
def scatter_cuda(attrs, inputs, out_type, target):
"""scatter rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter),
wrap_topi_schedule(topi.cuda.schedule_scatter),
name="scatter.rocm",
plevel=10,
)
rank = len(inputs[0].shape)
with SpecializedCondition(rank == 1):
if can_use_rocthrust(target, "tvm.contrib.thrust.stable_sort_by_key"):
strategy.add_implementation(
wrap_compute_scatter(topi.cuda.scatter_via_sort),
wrap_topi_schedule(topi.cuda.schedule_scatter_via_sort),
name="scatter_via_sort.rocm",
plevel=9, # use the sequential version by default
)
return strategy
@sort_strategy.register(["rocm"])
def sort_strategy_cuda(attrs, inputs, out_type, target):
"""sort rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort_thrust),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort_thrust.cuda",
plevel=15,
)
return strategy
@topk_strategy.register(["rocm"])
def topk_strategy_cuda(attrs, inputs, out_type, target):
"""topk rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk_thrust),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk_thrust.rocm",
plevel=15,
)
return strategy
@softmax_strategy.register(["rocm"])
def softmax_strategy_rocm(attrs, inputs, out_type, target):
"""rocm strategy for softmax"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="softmax.rocm",
)
if "miopen" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(miopen.softmax),
wrap_topi_schedule(topi.generic.schedule_extern),
name="softmax.miopen",
plevel=15,
)
return strategy
@log_softmax_strategy.register(["rocm"])
def log_softmax_strategy_rocm(attrs, inputs, out_type, target):
"""rocm strategy for log softmax"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="log_softmax.rocm",
)
if "miopen" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(miopen.log_softmax),
wrap_topi_schedule(topi.generic.schedule_extern),
name="log_softmax.miopen",
plevel=15,
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/strategy/x86.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of x86 operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import logging
import re
from tvm import tir, topi
from tvm.auto_scheduler import is_auto_scheduler_enabled
from tvm.meta_schedule import is_meta_schedule_enabled
from tvm.relay.ty import is_dynamic
from tvm.target import Target
from tvm.te import SpecializedCondition
from tvm.topi.x86.utils import target_has_vnni
from .. import op as _op
from .generic import *
logger = logging.getLogger("strategy")
_NCHWc_matcher = re.compile("^NCHW[0-9]+c$")
_OIHWio_matcher = re.compile("^OIHW[0-9]+i[0-9]+o$")
@schedule_injective.register("cpu")
def schedule_injective_cpu(attrs, outs, target):
"""schedule injective ops for x86"""
with target:
return topi.x86.schedule_injective(outs)
@schedule_reduce.register("cpu")
def schedule_reduce_cpu(attrs, outs, target):
"""schedule reduction ops for x86"""
with target:
return topi.x86.schedule_reduce(outs)
@schedule_pool.register("cpu")
def schedule_pool_cpu(attrs, outs, target):
"""schedule pooling ops for x86"""
with target:
return topi.x86.schedule_pool(outs, attrs.layout)
@schedule_adaptive_pool.register("cpu")
def schedule_adaptive_pool_cpu(attrs, outs, target):
"""schedule adaptive pooling ops for x86"""
with target:
return topi.x86.schedule_adaptive_pool(outs)
@softmax_strategy.register("cpu")
def softmax_strategy_cpu(attrs, inputs, out_type, target):
"""softmax x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.x86.schedule_softmax),
name="softmax.x86",
)
return strategy
@fast_softmax_strategy.register("cpu")
def fast_softmax_strategy_cpu(attrs, inputs, out_type, target):
"""fast_softmax x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.fast_softmax),
wrap_topi_schedule(topi.x86.schedule_softmax),
name="fast_softmax.x86",
)
return strategy
@log_softmax_strategy.register("cpu")
def log_softmax_strategy_cpu(attrs, inputs, out_type, target):
"""log_softmax x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.x86.schedule_softmax),
name="log_softmax.x86",
)
return strategy
@conv2d_strategy.register("cpu")
def conv2d_strategy_cpu(attrs, inputs, out_type, target):
"""conv2d x86 strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
stride_h, stride_w = get_const_tuple(attrs.strides)
dilation_h, dilation_w = get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if groups == 1:
if layout == "NCHW":
assert kernel_layout == "OIHW"
if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype):
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_nchw_int8),
wrap_topi_schedule(topi.x86.schedule_conv2d_nchw_int8),
name="conv2d_nchw_int8.x86",
)
elif "dnnl" in target.libs:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_nchw_dnnl),
wrap_topi_schedule(topi.x86.schedule_conv2d_nchw_dnnl),
name="conv2d_nchw_dnnl.x86",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_conv2d_nchw),
name="conv2d_nchw.x86",
)
elif _NCHWc_matcher.match(layout): # check if layout is NCHWxc
assert _OIHWio_matcher.match(kernel_layout) # check if kernel is OIHWio
return conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
if (not need_auto_scheduler_layout) and (not need_meta_schedule_layout):
logger.warning("conv2d NHWC layout is not optimized for x86 with autotvm.")
if "dnnl" in target.libs:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_nhwc_dnnl),
wrap_topi_schedule(topi.x86.schedule_conv2d_nhwc_dnnl),
name="conv2d_nhwc_dnnl.x86",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_nhwc,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
wrap_topi_schedule(topi.x86.schedule_conv2d_nhwc),
name="conv2d_nhwc.x86",
)
judge_winograd_auto_scheduler = False
if len(kernel.shape) == 4:
kernel_h, kernel_w, _, co = get_const_tuple(kernel.shape)
judge_winograd_auto_scheduler = (
"float" in data.dtype
and "float" in kernel.dtype
and kernel_h == 3
and kernel_w == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
and 64 < co < 512
# The last condition of co is based on our profiling of resnet workloads
# on skylake avx512 machines. We found winograd is faster than direct
# only when co is within this range
)
# register auto-scheduler implementations
if (
need_auto_scheduler_layout or need_meta_schedule_layout
) and judge_winograd_auto_scheduler:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule, # this implementation should never be picked by autotvm
name="conv2d_nhwc.winograd",
plevel=15,
)
elif layout == "HWCN":
assert kernel_layout == "HWIO"
if (not need_auto_scheduler_layout) or (not need_meta_schedule_layout):
logger.warning("conv2d HWCN layout is not optimized for x86 with autotvm.")
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.conv2d_hwcn),
wrap_topi_schedule(topi.generic.schedule_conv2d_hwcn),
name="conv2d_hwcn.generic",
)
else:
raise RuntimeError("Unsupported conv2d layout {} for x86".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
channel_multiplier = get_const_tuple(inputs[1].shape)[1]
if channel_multiplier == 1 and dilation_h == 1 and dilation_w == 1:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.x86",
)
else:
logger.warning(
"For x86 target, depthwise_conv2d with channel "
"multiplier greater than 1 is not optimized"
)
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.generic",
)
elif _NCHWc_matcher.match(layout): # check if layout is NCHWxc
assert _OIHWio_matcher.match(kernel_layout) # check if kernel is OIHWio
return depthwise_conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target)
elif layout == "NHWC":
assert kernel_layout == "HWOI"
if (not need_auto_scheduler_layout) and (not need_meta_schedule_layout):
logger.warning(
"depthwise_conv2d NHWC layout is not optimized for x86 with autotvm."
)
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc),
name="depthwise_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout))
else: # group_conv2d
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.group_conv2d_nchw, has_groups=True),
wrap_topi_schedule(topi.x86.schedule_group_conv2d_nchw),
name="group_conv2d_nchw.x86",
)
elif layout == "NHWC":
assert kernel_layout == "HWIO"
if (not need_auto_scheduler_layout) and (not need_meta_schedule_layout):
logger.warning("group_conv2d is not optimized for x86 with autotvm.")
strategy.add_implementation(
wrap_compute_conv2d(topi.nn.group_conv2d_nhwc, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_nhwc),
name="group_conv2d_nhwc.generic",
)
else:
raise RuntimeError("Unsupported group_conv2d layout {}".format(layout))
return strategy
@conv2d_NCHWc_strategy.register("cpu")
def conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target):
"""conv2d_NCHWc x86 strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
if topi.x86.is_int8_hw_support(data.dtype, kernel.dtype):
strategy.add_implementation(
wrap_compute_conv2d(
topi.x86.conv2d_NCHWc_int8, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc_int8),
name="conv2d_NCHWc_int8.x86",
)
else:
strategy.add_implementation(
wrap_compute_conv2d(topi.x86.conv2d_NCHWc, need_data_layout=True, need_out_layout=True),
wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc),
name="conv2d_NCHWc.x86",
)
return strategy
@depthwise_conv2d_NCHWc_strategy.register("cpu")
def depthwise_conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target):
"""depthwise_conv2d x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv2d(
topi.x86.depthwise_conv2d_NCHWc, need_data_layout=True, need_out_layout=True
),
wrap_topi_schedule(topi.x86.schedule_depthwise_conv2d_NCHWc),
name="depthwise_conv2d_NCHWc.x86",
)
return strategy
@conv2d_transpose_strategy.register("cpu")
def conv2d_transpose_strategy_cpu(attrs, inputs, out_type, target):
"""conv2d_transpose x86 strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCHW", "only support nchw for now"
assert dilation == (1, 1), "not support dilate now"
strategy = _op.OpStrategy()
if groups == 1:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.x86.conv2d_transpose_nchw),
wrap_topi_schedule(topi.x86.schedule_conv2d_transpose_nchw),
name="conv2d_transpose_nchw.x86",
)
else:
strategy.add_implementation(
wrap_compute_conv2d_transpose(topi.nn.group_conv2d_transpose_nchw, has_groups=True),
wrap_topi_schedule(topi.generic.schedule_group_conv2d_transpose_nchw),
name="group_conv2d_transpose_nchw.x86",
)
return strategy
@conv3d_transpose_strategy.register("cpu")
def conv3d_transpose_strategy_cpu(attrs, inputs, out_type, target):
"""conv3d_transpose x86 strategy"""
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
groups = attrs.groups
assert layout == "NCDHW", "only support ncdhw for now"
assert dilation == (1, 1, 1), "not support dilate now"
assert groups == 1, "only support groups == 1 for now"
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_conv3d_transpose(topi.x86.conv3d_transpose_ncdhw),
wrap_topi_schedule(topi.x86.schedule_conv3d_transpose_ncdhw),
name="conv3d_transpose_ncdhw.x86",
)
return strategy
@conv3d_strategy.register("cpu")
def conv3d_strategy_cpu(attrs, inputs, out_type, target):
"""conv3d generic strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if need_auto_scheduler_layout or need_meta_schedule_layout:
# Use auto-scheduler. We should provide clear compute definition without autotvm templates
# or packed layouts.
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.nn.conv3d_ncdhw),
naive_schedule,
name="conv3d_ncdhw.x86",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(
topi.nn.conv3d_ndhwc,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule,
name="conv3d_ndhwc.x86",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
else:
# Use autotvm templates
if layout == "NCDHW":
strategy.add_implementation(
wrap_compute_conv3d(topi.x86.conv3d_ncdhw),
wrap_topi_schedule(topi.x86.schedule_conv3d_ncdhw),
name="conv3d_ncdhw.x86",
)
elif layout == "NDHWC":
strategy.add_implementation(
wrap_compute_conv3d(topi.x86.conv3d_ndhwc),
wrap_topi_schedule(topi.x86.schedule_conv3d_ndhwc),
name="conv3d_ndhwc.x86",
)
else:
raise ValueError("Not support this layout {} yet".format(layout))
return strategy
@conv1d_strategy.register("cpu")
def conv1d_strategy_cpu(attrs, inputs, out_type, target):
"""conv1d x86 strategy"""
layout = attrs.data_layout
groups = attrs.groups
dilation = get_const_tuple(attrs.dilation)
if dilation[0] < 1:
raise ValueError("dilation should be a positive value")
strategy = _op.OpStrategy()
if groups == 1:
if layout == "NCW":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_ncw),
wrap_topi_schedule(topi.x86.schedule_conv1d_ncw),
name="conv1d_ncw.x86",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_conv1d(topi.nn.conv1d_nwc),
wrap_topi_schedule(topi.x86.schedule_conv1d_nwc),
name="conv1d_nwc.x86",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
else:
if layout == "NCW":
strategy.add_implementation(
wrap_compute_group_conv1d(topi.nn.group_conv1d_ncw),
wrap_topi_schedule(topi.x86.schedule_group_conv1d_ncw),
name="group_conv1d_ncw.x86",
)
elif layout == "NWC":
strategy.add_implementation(
wrap_compute_group_conv1d(topi.nn.group_conv1d_nwc),
wrap_topi_schedule(topi.x86.schedule_group_conv1d_nwc),
name="group_conv1d_nwc.x86",
)
else:
raise ValueError("Unsupported conv1d layout {}".format(layout))
return strategy
@matmul_strategy.register("cpu")
def matmul_strategy_cpu(attrs, inputs, out_type, target):
"""matmul x86 strategy"""
strategy = _op.OpStrategy()
same_type = inputs[0].dtype == inputs[1].dtype == out_type.dtype
dtype = inputs[0].dtype
u8s8s32 = dtype == "uint8" and inputs[1].dtype == "int8" and out_type.dtype == "int32"
if "cblas" in target.libs:
length_before = len(strategy.specializations) if strategy.specializations else 0
with SpecializedCondition(same_type and dtype in ["float32", "float64"]):
strategy.add_implementation(
wrap_compute_matmul(topi.x86.matmul_cblas),
wrap_topi_schedule(topi.x86.schedule_matmul_cblas),
name="matmul_cblas.x86",
plevel=13,
)
length_after = len(strategy.specializations) if strategy.specializations else 0
if length_before == length_after:
logger.warning(
"Currently cblas only support the data type to be float32 or float64. Skip."
)
if "mkl" in target.libs:
length_before = len(strategy.specializations) if strategy.specializations else 0
with SpecializedCondition(same_type and dtype in ["float32", "float64"] or u8s8s32):
strategy.add_implementation(
wrap_compute_matmul(topi.x86.matmul_mkl),
wrap_topi_schedule(topi.x86.schedule_matmul_mkl),
name="matmul_mkl.x86",
plevel=14,
)
length_after = len(strategy.specializations) if strategy.specializations else 0
if length_before == length_after:
logger.warning(
"Currently mkl only support the data type to be float32, float64 or input with "
"uint8 and int8 while output wiht int32. Skip."
)
if "dnnl" in target.libs:
length_before = len(strategy.specializations) if strategy.specializations else 0
with SpecializedCondition(same_type and dtype == "float32"):
strategy.add_implementation(
wrap_compute_matmul(topi.x86.matmul_dnnl),
wrap_topi_schedule(topi.x86.schedule_matmul_dnnl),
name="matmul_dnnl.x86",
plevel=15,
)
length_after = len(strategy.specializations) if strategy.specializations else 0
if length_before == length_after:
logger.warning("Currently dnnl only support the data type to be float32. Skip.")
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if need_auto_scheduler_layout or need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_matmul(
topi.nn.matmul,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule,
name="matmul.generic",
plevel=11,
)
else:
# If no cblas/mkl/dnnl strategy choosed
if not strategy.specializations:
logger.warning(
"Matmul is not optimized for x86. "
"Recommend to use cblas/mkl/dnnl for better performance."
)
strategy.add_implementation(
wrap_compute_matmul(topi.nn.matmul),
naive_schedule,
name="matmul.generic",
)
return strategy
@dense_strategy.register("cpu")
def dense_strategy_cpu(attrs, inputs, out_type, target):
"""dense x86 strategy"""
strategy = _op.OpStrategy()
same_type = inputs[0].dtype == inputs[1].dtype == out_type.dtype
dtype = inputs[0].dtype
u8s8s32 = dtype == "uint8" and inputs[1].dtype == "int8" and out_type.dtype == "int32"
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_nopack),
wrap_topi_schedule(topi.x86.schedule_dense_nopack),
name="dense_nopack.x86",
plevel=5,
)
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_pack),
wrap_topi_schedule(topi.x86.schedule_dense_pack),
name="dense_pack.x86",
plevel=10,
)
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if need_auto_scheduler_layout or need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_dense(
topi.nn.dense,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
naive_schedule,
name="dense.generic",
plevel=11,
)
if "cblas" in target.libs:
with SpecializedCondition(same_type and dtype in ["float32", "float64"]):
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_cblas),
wrap_topi_schedule(topi.x86.schedule_dense_cblas),
name="dense_cblas.x86",
plevel=13,
)
if "mkl" in target.libs:
with SpecializedCondition(same_type and dtype in ["float32", "float64"] or u8s8s32):
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_mkl),
wrap_topi_schedule(topi.x86.schedule_dense_mkl),
name="dense_mkl.x86",
plevel=14,
)
if "dnnl" in target.libs:
with SpecializedCondition(same_type and dtype == "float32"):
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_dnnl),
wrap_topi_schedule(topi.x86.schedule_dense_dnnl),
name="dense_dnnl.x86",
plevel=15,
)
return strategy
@dense_pack_strategy.register("cpu")
def dense_pack_strategy_cpu(attrs, inputs, out_type, target):
"""dense_pack x86 strategy"""
strategy = _op.OpStrategy()
if (
inputs[0].dtype == "uint8"
and inputs[1].dtype == "int8"
and out_type.dtype == "int32"
and attrs["weight_layout"] == "NC16n4c"
):
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_vnni),
wrap_topi_schedule(topi.x86.schedule_dense_vnni),
name="dense_vnni.x86",
plevel=12,
)
else:
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_pack),
wrap_topi_schedule(topi.x86.schedule_dense_pack),
name="dense_pack.x86",
plevel=10,
)
return strategy
@batch_matmul_strategy.register("cpu")
def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
"""batch_matmul x86 strategy"""
strategy = _op.OpStrategy()
mcpu = Target.current().mcpu
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if (
not attrs.transpose_a
and attrs.transpose_b
and target_has_vnni(mcpu)
and inputs[0].dtype == "uint8"
and inputs[1].dtype == "int8"
and inputs[1].shape[-2] % 16 == 0
and inputs[1].shape[-1] % 4 == 0
):
strategy.add_implementation(
wrap_compute_batch_matmul(topi.x86.batch_matmul_vnni_compute, need_out_dtype=True),
wrap_topi_schedule(topi.x86.schedule_batch_matmul_vnni),
name="batch_matmul_vnni.x86",
plevel=10,
)
elif is_dynamic(out_type) or need_auto_scheduler_layout or need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_batch_matmul(
topi.nn.batch_matmul,
need_out_dtype=True,
need_auto_scheduler_layout=need_auto_scheduler_layout,
need_meta_schedule_layout=need_meta_schedule_layout,
),
wrap_topi_schedule(topi.generic.nn.schedule_batch_matmul),
name="batch_matmul.generic",
plevel=10,
)
else:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.x86.batch_matmul, need_out_dtype=True),
wrap_topi_schedule(topi.x86.schedule_batch_matmul),
name="batch_matmul.x86",
plevel=10,
)
if "cblas" in target.libs:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.x86.batch_matmul_cblas),
wrap_topi_schedule(topi.x86.schedule_batch_matmul_cblas),
name="batch_matmul_cblas.x86",
plevel=15,
)
if "mkl" in target.libs:
strategy.add_implementation(
wrap_compute_batch_matmul(topi.x86.batch_matmul_mkl),
wrap_topi_schedule(topi.x86.schedule_batch_matmul_mkl),
name="batch_matmul_mkl.x86",
plevel=15,
)
return strategy
@sparse_dense_strategy.register("cpu")
def sparse_dense_strategy_cpu(attrs, inputs, out_type, target):
"""sparse dense x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sparse_dense(topi.nn.sparse_dense),
wrap_topi_schedule(topi.x86.schedule_sparse_dense),
name="sparse_dense.x86",
plevel=10,
)
return strategy
@sparse_conv2d_strategy.register("cpu")
def sparse_conv2d_strategy_cpu(attrs, inputs, out_type, target):
"""sparse conv2d x86 strategy"""
strategy = _op.OpStrategy()
if attrs["kernel_size"][0] == 1:
strategy.add_implementation(
wrap_compute_sparse_conv2d(topi.nn.sparse_conv2d),
wrap_topi_schedule(topi.generic.schedule_sparse_conv2d),
name="sparse_conv2d.generic",
)
elif attrs["kernel_size"][0] == 3:
if attrs["layout"] == "NHWC":
strategy.add_implementation(
wrap_compute_sparse_conv2d(topi.x86.spconv2d_3x3_nhwc),
wrap_topi_schedule(topi.x86.schedule_spconv2d_3x3_nhwc),
name="conv3x3_spNHWC.x86",
)
elif attrs["layout"] == "NCHW":
strategy.add_implementation(
wrap_compute_sparse_conv2d(topi.x86.spconv2d_3x3_nchw),
wrap_topi_schedule(topi.x86.schedule_spconv2d_3x3_nchw),
)
return strategy
@roi_align_strategy.register("cpu")
def roi_align_strategy_cpu(attrs, inputs, out_type, target):
"""roi_align x86 strategy"""
strategy = _op.OpStrategy()
layout = attrs.layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_roi_align(topi.x86.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.x86",
)
else:
assert layout == "NHWC", "layout must be NCHW or NHWC."
strategy.add_implementation(
wrap_compute_roi_align(topi.vision.rcnn.roi_align_nhwc),
wrap_topi_schedule(topi.generic.schedule_roi_align),
name="roi_align.x86",
)
return strategy
@bitserial_conv2d_strategy.register("cpu")
def bitserial_conv2d_strategy_cpu(attrs, inputs, out_type, target):
"""bitserial_conv2d x86 strategy"""
strategy = _op.OpStrategy()
layout = attrs.data_layout
if layout == "NCHW":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nchw),
wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nchw),
name="bitserial_conv2d_nchw.x86",
)
elif layout == "NHWC":
strategy.add_implementation(
wrap_compute_bitserial_conv2d(topi.x86.bitserial_conv2d_nhwc),
wrap_topi_schedule(topi.x86.schedule_bitserial_conv2d_nhwc),
name="bitserial_conv2d_nhwc.x86",
)
else:
raise ValueError("Data layout {} not supported.".format(layout))
return strategy
@bitserial_dense_strategy.register("cpu")
def bitserial_dense_strategy_cpu(attrs, inputs, out_type, target):
"""bitserial_dense x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_bitserial_dense(topi.x86.bitserial_dense),
wrap_topi_schedule(topi.x86.schedule_bitserial_dense),
name="bitserial_dense.x86",
)
return strategy
@scatter_nd_strategy.register("cpu")
def scatter_nd_strategy_cpu(attrs, inputs, out_type, target):
"""scatter_nd x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_nd(topi.x86.scatter_nd),
wrap_topi_schedule(topi.generic.schedule_extern),
name="scatter_nd.x86",
plevel=10,
)
return strategy
@conv2d_winograd_without_weight_transform_strategy.register("cpu")
def conv2d_winograd_without_weight_transform_strategy_cpu(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transform cpu strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
strides = attrs.get_int_tuple("strides")
assert dilation == (1, 1), "Do not support dilate now"
assert strides == (1, 1), "Do not support strides now"
assert groups == 1, "Do not support arbitrary group number"
strategy = _op.OpStrategy()
need_auto_scheduler_layout = is_auto_scheduler_enabled()
need_meta_schedule_layout = is_meta_schedule_enabled()
if layout == "NHWC":
if need_meta_schedule_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc_without_weight_transform,
need_auto_scheduler_layout=False,
need_meta_schedule_layout=True,
),
naive_schedule,
name="ansor.winograd",
)
elif need_auto_scheduler_layout:
strategy.add_implementation(
wrap_compute_conv2d(
topi.nn.conv2d_winograd_nhwc_without_weight_transform,
need_auto_scheduler_layout=True,
need_meta_schedule_layout=False,
),
naive_schedule,
name="ansor.winograd",
)
else:
raise RuntimeError("Both AutoScheduler and MetaSchedule are not enabled")
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout)
)
return strategy
@concatenate_strategy.register(["cpu"])
def concatenate_strategy_cpu(attrs, inputs, out_type, target):
"""concatenate x86 strategy"""
strategy = _op.OpStrategy()
use_only_old_concat = False
for inpt in inputs:
shape = inpt.shape
for i in shape:
if not isinstance(i, tir.expr.IntImm):
use_only_old_concat = True
break
if use_only_old_concat:
strategy.add_implementation(
wrap_compute_concat(topi.transform.concatenate),
wrap_topi_schedule(topi.x86.injective.schedule_concatenate),
name="concatenate.generic",
)
else:
strategy.add_implementation(
wrap_compute_concat(topi.x86.concatenate),
wrap_topi_schedule(topi.x86.schedule_concatenate_cpu),
name="concatenate.cpu",
)
strategy.add_implementation(
wrap_compute_concat(topi.transform.concatenate),
wrap_topi_schedule(topi.x86.injective.schedule_concatenate),
name="concatenate.generic",
)
return strategy
@batch_norm_strategy.register(["cpu"])
def batch_norm_strategy_cpu(attrs, inputs, out_type, target):
"""batch_norm x86 strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_batch_norm(topi.nn.batch_norm),
wrap_topi_schedule(topi.x86.schedule_batch_norm),
name="batch_norm.cpu",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Basic tensor operations."""
# pylint: disable=redefined-builtin, unused-argument
from tvm import target
from tvm.runtime import ndarray as _nd
from tvm.runtime import Device as _Device
from tvm.te.hybrid import script
from . import _make
from .dyn import _make as _dyn_make
from ..expr import Tuple, Expr, Constant
from . import op as reg
def _make_virtual_device(device):
if isinstance(device, _Device):
return target.VirtualDevice(device)
if isinstance(device, str):
return target.VirtualDevice(_nd.device(device))
raise ValueError("expecting a Device or device name, but received a %s" % (type(device)))
# We create a wrapper function for each operator in the
# python side to call into the positional _make.OpName function.
#
# We make this decision so that we can:
# - Have declare python docstring for each function
# - Enable keyword arguments easily
# - Not put too much burden on FFI to support complicated features
# like default value and keyword arguments
def log(data):
"""Compute elementwise log of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log(data)
def log2(data):
"""Compute elementwise log to the base 2 of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log2(data)
def log10(data):
"""Compute elementwise log to the base 10 of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log10(data)
def tan(data):
"""Compute elementwise tan of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.tan(data)
def cos(data):
"""Compute elementwise cos of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.cos(data)
def cosh(data):
"""Compute elementwise cosh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.cosh(data)
def sin(data):
"""Compute elementwise sin of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sin(data)
def sinh(data):
"""Compute elementwise sinh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sinh(data)
def acos(data):
"""Compute elementwise acos of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.acos(data)
def acosh(data):
"""Compute elementwise acosh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.acosh(data)
def asin(data):
"""Compute elementwise asin of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.asin(data)
def asinh(data):
"""Compute elementwise asinh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.asinh(data)
def atan(data):
"""Compute elementwise atan of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.atan(data)
def atanh(data):
"""Compute elementwise atanh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.atanh(data)
def exp(data):
"""Compute elementwise exp of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.exp(data)
def erf(data):
"""Compute elementwise error function of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.erf(data)
def sqrt(data):
"""Compute elementwise sqrt of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sqrt(data)
def rsqrt(data):
"""Compute elementwise rsqrt of data.
.. math::
1/sqrt(x)
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.rsqrt(data)
def sigmoid(data):
"""Compute elementwise sigmoid of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sigmoid(data)
def floor(data):
"""Compute element-wise floor of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.floor(data)
def ceil(data):
"""Compute element-wise ceil of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.ceil(data)
def trunc(data):
"""Compute element-wise trunc of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.trunc(data)
def round(data):
"""Compute element-wise round of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.round(data)
def abs(data):
"""Compute element-wise absolute of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.abs(data)
def sign(data):
"""Compute element-wise absolute of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sign(data)
def tanh(data):
"""Compute element-wise tanh of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.tanh(data)
def negative(data):
"""Compute element-wise negative of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.negative(data)
def logical_not(data):
"""Compute element-wise logical not of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.logical_not(data)
def bitwise_not(data):
"""Compute element-wise bitwise not of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.bitwise_not(data)
def add(lhs, rhs):
"""Addition with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
Examples
--------
.. code:: python
x = relay.Var("a") # shape is [2, 3]
y = relay.Var("b") # shape is [2, 1]
z = relay.add(x, y) # result shape is [2, 3]
"""
return _make.add(lhs, rhs)
def subtract(lhs, rhs):
"""Subtraction with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.subtract(lhs, rhs)
def multiply(lhs, rhs):
"""Multiplication with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.multiply(lhs, rhs)
def divide(lhs, rhs):
"""Division with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.divide(lhs, rhs)
def floor_divide(lhs, rhs):
"""Floor division with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.floor_divide(lhs, rhs)
def trunc_divide(lhs, rhs):
"""Trunc division with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.trunc_divide(lhs, rhs)
def power(lhs, rhs):
"""Power with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.power(lhs, rhs)
def mod(lhs, rhs):
"""Mod with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.mod(lhs, rhs)
def floor_mod(lhs, rhs):
"""Floor mod with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.floor_mod(lhs, rhs)
def trunc_mod(lhs, rhs):
"""Trunc mod with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.trunc_mod(lhs, rhs)
def logical_and(lhs, rhs):
"""logical AND with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.logical_and(lhs, rhs)
def logical_or(lhs, rhs):
"""logical OR with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.logical_or(lhs, rhs)
def logical_xor(lhs, rhs):
"""logical XOR with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.logical_xor(lhs, rhs)
def bitwise_and(lhs, rhs):
"""bitwise AND with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.bitwise_and(lhs, rhs)
def bitwise_or(lhs, rhs):
"""bitwise OR with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.bitwise_or(lhs, rhs)
def bitwise_xor(lhs, rhs):
"""bitwise XOR with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.bitwise_xor(lhs, rhs)
def equal(lhs, rhs):
"""Broadcasted elementwise test for (lhs == rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.equal(lhs, rhs)
def not_equal(lhs, rhs):
"""Broadcasted elementwise test for (lhs != rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.not_equal(lhs, rhs)
def less(lhs, rhs):
"""Broadcasted elementwise test for (lhs < rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.less(lhs, rhs)
def less_equal(lhs, rhs):
"""Broadcasted elementwise test for (lhs <= rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.less_equal(lhs, rhs)
def greater(lhs, rhs):
"""Broadcasted elementwise test for (lhs > rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.greater(lhs, rhs)
def greater_equal(lhs, rhs):
"""Broadcasted elementwise test for (lhs >= rhs).
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.greater_equal(lhs, rhs)
def maximum(lhs, rhs):
"""Maximum with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.maximum(lhs, rhs)
def minimum(lhs, rhs):
"""Minimum with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.minimum(lhs, rhs)
def right_shift(lhs, rhs):
"""Right shift with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.right_shift(lhs, rhs)
def left_shift(lhs, rhs):
"""Left shift with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side input data
rhs : relay.Expr
The right hand side input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.left_shift(lhs, rhs)
def zeros(shape, dtype):
"""Fill array with zeros.
Parameters
----------
shape : tuple of int or relay.Expr
The shape of the target.
dtype : data type
The data type of the target.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.zeros(shape, dtype)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.zeros(shape, dtype)
def zeros_like(data):
"""Returns an array of zeros, with same type and shape as the input.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.zeros_like(data)
def ones(shape, dtype):
"""Fill array with ones.
Parameters
----------
shape : tuple of int or relay.Expr
The shape of the target.
dtype : data type
The data type of the target.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.ones(shape, dtype)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.ones(shape, dtype)
def ones_like(data):
"""Returns an array of ones, with same type and shape as the input.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.ones_like(data)
def clip(a, a_min, a_max):
"""Clip the elements in `a` between `a_min` and `a_max`.
`a_min` and `a_max` are cast to `a`'s dtype.
Parameters
----------
a : relay.Expr
The input tensor.
a_min : float
The clip minimum.
a_max : float
The clip maximum.
Returns
-------
result : relay.Expr
`a` with elements clipped between `a_min` and `a_max`.
Examples
--------
.. code:: python
x = relay.Constant(tvm.nd.array([0, 1, 5, 3, 4, 2]))
relay.clip(x, 1., 4.)
# [1, 1, 4, 3, 4, 2]
"""
return _make.clip(a, a_min, a_max)
def fixed_point_multiply(data, multiplier, shift):
"""Fixed point multiplication between data and a fixed point
constant expressed as multiplier * 2^(-shift), where multiplier
is a Q-number with 31 fractional bits
Parameters
----------
data : relay.Expr
The input tensor.
multiplier : int
The integer multiplier of the fixed point constant.
shift : int
The integer shift of the fixed point constant.
Returns
-------
result : relay.Expr
The output of the fixed point multiplication
"""
return _make.fixed_point_multiply(data, multiplier, shift)
def concatenate(data, axis):
"""Concatenate the input tensors along the given axis.
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr])
A list of tensors.
axis : int
The axis along which the tensors are concatenated.
Returns
-------
result: relay.Expr
The concatenated tensor.
"""
data = list(data)
if not data:
raise ValueError("relay.concatenate requires data to be non-empty.")
if not isinstance(axis, int):
raise ValueError("For now, we only support integer axis")
return _make.concatenate(Tuple(data), axis)
def einsum(data, equation):
"""Evaluates the Einstein summation convention on data
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr])
A list of tensors.
equation : str
The einsum expression string.
Returns
-------
result : relay.Expr
The output tensor from the einsum op.
"""
data = list(data)
if not data:
raise ValueError("relay.einsum requires data to be non-empty.")
if not isinstance(equation, str):
raise ValueError("einsum `equation` must be a str")
return _make.einsum(Tuple(data), equation)
def stack(data, axis):
"""Join a sequence of arrays along a new axis.
Parameters
----------
data : Union(List[relay.Expr], relay.Expr)
A list of tensors or a Relay expression that evaluates to a tuple of tensors.
axis : int
The axis in the result array along which the input arrays are stacked.
Returns
-------
ret : relay.Expr
The stacked tensor.
"""
if not data:
raise ValueError("relay.stack requires data to be non-empty.")
if not isinstance(axis, int):
raise ValueError("For now, we only support integer axis")
if not isinstance(data, Expr):
data = Tuple(list(data))
return _make.stack(data, axis)
def copy(data):
"""Copy a tensor.
Parameters
----------
data : relay.Expr
The tensor to be copied.
Returns
-------
result: relay.Expr
The copied result.
"""
return _make.copy(data)
@script
def _copy_shape_func_tensor(data_shape):
ndim = data_shape.shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
out[i] = data_shape[i]
return out
@script
def _copy_shape_func_scalar(data_shape):
out = output_tensor((), "int64")
return out
@reg.register_shape_func("copy", False)
def copy_shape_func(attrs, inputs, _):
"""
Shape function for copy op.
"""
input = inputs[0]
if len(input.shape) == 0:
return [_copy_shape_func_scalar(input)]
return [_copy_shape_func_tensor(input)]
def device_copy(data, src_device, dst_device):
"""Copy data from the source device to the destination device. This
operator helps data transferring between difference devices for
heterogeneous execution.
Parameters
----------
data : tvm.relay.Expr
The tensor to be copied.
src_device : Union[:py:class:`Device`, str]
The source device where the data is copied from.
dst_device : Union[:py:class:`Device`, str]
The destination device where the data is copied to.
Returns
-------
result : tvm.relay.Expr
The copied result.
"""
return _make.DeviceCopy(
data, _make_virtual_device(src_device), _make_virtual_device(dst_device)
)
def shape_of(data, dtype="int32"):
"""Get shape of a tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The shape tensor.
"""
return _make.shape_of(data, dtype)
def ndarray_size(data, dtype="int32"):
"""Get number of elements of input tensor.
Parameters
----------
data : tvm.relay.Expr
The input tensor.
dtype : str, optional
The target data type.
Returns
-------
result : tvm.relay.Expr
The number of elements of input tensor.
"""
return _make.ndarray_size(data, dtype)
def isnan(data):
"""Check nan in input data element-wise.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.isnan(data)
def isfinite(data):
"""Compute element-wise finiteness of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.isfinite(data)
def isinf(data):
"""Compute element-wise infiniteness of data.
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.isinf(data)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-outside-toplevel
"""Transform operators."""
from ...tir import expr as _expr
from ..expr import Constant, Expr, Tuple, TupleWrapper, const
from . import _make
from .dyn import _make as _dyn_make
from .tensor import shape_of
def sliding_window(data, axis, window_shape, strides):
"""Slide a window over the data tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int
What axis the window begins sliding over. Window will be slid over
this axis and all following axes. The axis value determines the window
shape (and thus, the number of strides): window shape and strides must
both be of length `data.ndim-axis`.
window_shape : List[int]
The window shape to form over the input. Window shape must be of length
`data.ndim-axis`.
strides : List[int]
How to stride the window along each dimension. Strides must be of length
`data.ndim-axis`.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of
# shape (3, 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
data = np.random.rand(2, 3, 32, 32).astype("float32")
result = create_executor().evaluate(y, {x: relay.const(data)}).numpy()
# The resulting shape still has batch size 2. Each dimension in
# (1, 15, 10) represents the locations where we were able to
# form a window; that is, we were able to place the window
# in one place along the dimension of length 3, 15 places along
# the dimension of length 32 (when striding by 2), and 10 places
# along the second dimension of length 32 (when striding by 3).
# The remaining dimension (3, 4, 5) represent the formed windows.
assert result.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
"""
from .. import _ffi_api as _relay_make
return _relay_make.sliding_window(data, axis, window_shape, strides)
def cast(data, dtype):
"""Cast input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype: str
The target data type
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _ffi_api as _relay_make
return _relay_make.cast(data, dtype)
def cast_like(data, dtype_like):
"""Cast input tensor to data type of another tensor.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype_like: relay.Expr
The tensor to cast to.
Returns
-------
result : relay.Expr
The casted result.
"""
from .. import _ffi_api as _relay_make
return _relay_make.cast_like(data, dtype_like)
def reinterpret(data, dtype):
"""Reinterpret input tensor to data type.
Parameters
----------
data : relay.Expr
The input data to the operator.
dtype: str
The target data type
Returns
-------
result : relay.Expr
The reinterpreted result.
"""
from .. import _make as _relay_make
return _relay_make.reinterpret(data, dtype)
def expand_dims(data, axis, num_newaxis=1):
"""Insert `num_newaxis` axes at the position given by `axis`.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : Union[int, Expr]
The axis at which the input array is expanded.
Should lie in range `[-data.ndim - 1, data.ndim]`.
If `axis < 0`, it is the first axis inserted;
If `axis >= 0`, it is the last axis inserted in Python's negative indexing.
num_newaxis : int
Number of axes to be inserted. Should be >= 0.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(axis, int):
return _make.expand_dims(data, axis, num_newaxis)
if isinstance(axis, Expr):
# TODO (AndrewZhaoLuo): investigate performance issues with consecutive
# dynamic expand_dims on non-llvm targets.
return _dyn_make.expand_dims(data, axis, num_newaxis)
raise ValueError(f"Unknown type for axis: {type(axis)}")
def transpose(data, axes=None):
"""Permutes the dimensions of an array.
Parameters
----------
data : relay.Expr
The input data to the operator.
axes : None or List[int]
The target axes order, reverse order if not specified.
Returns
-------
result : relay.Expr
The transposed result.
"""
if axes is not None:
axes = list(axes)
return _make.transpose(data, axes)
def squeeze(data, axis=None):
"""Squeeze axes in the array.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
axis : None or List[int] or Expr
The set of axes to remove.
If axis = None, remove all axis of dimensions 1.
If any specified axis has dimension that does not equal 1, it is an error.
Returns
-------
result : tvm.relay.Expr
The squeezed result.
"""
if isinstance(axis, Constant):
axis = list(axis.data.numpy())
if isinstance(axis, Expr):
return _dyn_make.squeeze(data, axis)
return _make.squeeze(data, axis)
def reshape(data, newshape, allowzero=False):
"""Reshape the input array.
To give user more convenience in without doing manual shape inference,
some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}.
The significance of each is explained below:
``0`` copy this dimension from the input to the output shape.
.. code-block:: python
data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2)
data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4)
Note: If the parameter allowzero is manually set to true, it specifies a
special case where 0 actually means a true empty tensor.
``-1`` infers the dimension of the output shape by using the remainder of
the input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
.. code-block:: python
data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4)
data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8)
data.shape = (2,3,4), newshape = (-1,), result.shape = (24,)
``-2`` copy all/remainder of the input dimensions to the output shape.
.. code-block:: python
data.shape = (2,3,4), newshape = (-2,), result.shape = (2,3,4)
data.shape = (2,3,4), newshape = (2,-2), result.shape = (2,3,4)
data.shape = (2,3,4), newshape = (-2,1,1), result.shape = (2,3,4,1,1)
``-3`` use the product of two consecutive dimensions of the input shape
as the output dimension.
.. code-block:: python
data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4)
data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20)
data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12)
data.shape = (2,3,4), newshape = (-3,-2), result.shape = (6,4)
``-4`` split one dimension of the input into two dimensions passed subsequent
to -4 in shape (can contain -1).
.. code-block:: python
data.shape = (2,3,4), newshape = (-4,1,2,-2), result.shape = (1,2,3,4)
data.shape = (2,3,4), newshape = (2,-4,-1,3,-2), result.shape = (2,1,3,4)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]] or relay.Expr
The new shape. Should be compatible with the original shape.
allowzero : Bool, optional
If true, then treat zero as true empty tensor rather than a copy instruction.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, Constant):
newshape = list(newshape.data.numpy())
if isinstance(newshape, Expr):
return _dyn_make.reshape(data, newshape, allowzero)
if isinstance(newshape, int):
newshape = [newshape]
if isinstance(newshape, (tuple, list)):
tempshape = []
for shape in newshape:
if isinstance(shape, _expr.IntImm):
tempshape.append(shape.value)
else:
try:
tempshape.append(int(shape))
except ValueError as err:
raise RuntimeError("Unrecognized shape type: %s" % err)
newshape = tempshape
return _make.reshape(data, list(newshape), allowzero)
def argwhere(condition):
"""Find the indices of elements of a tensor that are
non-zero.
Parameters
----------
condition : relay.Expr
The input condition tensor.
Returns
-------
out : relay.Expr
Tensor with the indices of elements that are non-zero.
Examples
--------
.. code-block:: python
condition = [[True, False], [False, True]]
relay.argwhere(condition) = [[0, 0], [1, 1]]
"""
return _make.argwhere(condition)
def scatter(data, indices, updates, axis):
"""Update data at positions defined by indices with values in updates
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
axis : int
The axis to scatter on
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.scatter(data, indices, updates, axis)
def scatter_add(data, indices, updates, axis):
"""Update data by adding values in updates at positions defined by indices
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to add.
axis : int
The axis to scatter_add on
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.scatter_add(data, indices, updates, axis)
def scatter_nd(data, indices, updates, mode="update"):
"""Scatter values from an array and update.
See :py:func:`tvm.topi.scatter` for how data is scattered.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The index locations to update.
updates : relay.Expr
The values to update.
mode : string
The accumulation mode for scatter. "update" or "add"
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.scatter_nd(data, indices, updates, mode)
def reshape_like(data, shape_like, lhs_begin=0, lhs_end=None, rhs_begin=0, rhs_end=None):
"""Reshapes the input tensor by the size of another tensor.
For an input tensor with shape ``(d0, d1, ..., d(k-1))``, `reshape_like` operation reshapes
the input tensor into an output tensor with the same shape as the second input tensor,
in particular reshaping the dimensions of `data` in `[lhs_begin, lhs_end)` using the dimensions
from `shape_like` in `[rhs_begin, rhs_end)`.
.. note::
Sizes for `data` and the output tensor should be compatible.
Parameters
----------
data : relay.Expr
The input data to the operator.
shape_like : relay.Expr
The tensor to reshape data like. Should be compatible with the original shape on the
reshaped dimensions.
lhs_begin : int, optional
The axis of data to begin reshaping. Default is 0.
lhs_end : int or None, optional
The axis of data where reshaping should stop, exclusive. Default is None which reshapes to
the end.
rhs_begin : int, optional
The axis of shape_like where the target shape begins. Default is 0.
rhs_end : int or None, optional
The axis of shape_like where the target shape ends, exclusive. Default is None which extends
to the end.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data.shape == (1, 2, 3, 4)
shape_like.shape == (6, 2, 2, 3)
ret = relay.reshape_like(data, shape_like, lhs_begin=1, rhs_end=3)
ret.shape == (1, 6, 2, 2)
"""
return _make.reshape_like(data, shape_like, lhs_begin, lhs_end, rhs_begin, rhs_end)
def take(data, indices, axis=None, batch_dims=0, mode="clip"):
"""Take elements from an array along an axis.
Parameters
----------
data : relay.Expr
The source array.
indices : rely.Expr
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default,
the flattened input array is used.
batch_dims : int
The number of batch dimensions. By default is 0.
mode : str, optional
Specifies how out-of-bound indices will behave [clip, wrap, fast].
clip: clip to the range (default).
wrap: wrap around the indices.
fast: no clip or wrap around (user must make sure indices are in-bound).
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.take(data, indices, batch_dims, axis, mode)
def full(fill_value, shape=(), dtype=""):
"""Fill array with scalar value.
Parameters
----------
fill_value : relay.Expr
The value to fill. Must be a scalar.
shape : tuple of int or relay.Expr
The shape of the target.
dtype : data type, optional (defaults to data type of the fill value)
The data type of the target.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.full(fill_value, shape, dtype)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.full(fill_value, shape, dtype)
def full_like(data, fill_value):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
fill_value : relay.Expr
The scalar value to fill.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.full_like(data, fill_value)
def arange(start, stop=None, step=None, dtype="float32"):
"""Return evenly spaced values within a given interval.
.. note::
Similar to ``numpy.arange``, when only one argument is given, it is used
as `stop` instead of `start` while `start` takes default value 0.
Warning: Undefined behavior when dtype is incompatible with start/stop/step.
It could lead to different results compared to numpy, MXNet, pytorch, etc.
Parameters
----------
start : tvm.Expr, optional
Start of interval. The interval includes this value. The default start
value is 0.
stop : tvm.Expr
Stop of interval. The interval does not include this value.
step : tvm.Expr, optional
Spacing between values. The default step size is 1.
dtype : str, optional
The target data type.
Returns
-------
result : relay.Expr
The resulting tensor.
Examples
--------
.. code-block:: python
relay.arange(5) = [0, 1, 2, 3, 4]
relay.arange(1, 5) = [1, 2, 3, 4]
relay.arange(1, 5, 1.5) = [1, 2.5, 4]
"""
if step is None:
step = const(1, dtype)
if stop is None:
stop = start
start = const(0, dtype=dtype)
return _make.arange(start, stop, step, dtype)
def meshgrid(data, indexing="ij"):
"""Create coordinate matrices from coordinate vectors.
.. note::
Similar to ``numpy.meshgrid``.
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr])
A list of tensors, which must be either scalars or 1-D vectors.
indexing : str
Indexing mode, either "ij" for matrix indexing or "xy" for Cartesian indexing.
Returns
-------
ret : relay.Tuple([relay.Expr, relay.Expr])
The computed result.
Examples
--------
.. code-block:: python
x = [1, 2, 3]
y = [4, 5]
gx, gy = relay.meshgrid([x, y])
gx = [[1., 1.],
[2., 2.],
[3., 3.]]
gy = [[4., 5.],
[4., 5.],
[4., 5.]]
"""
data = list(data)
ret_size = len(data)
return TupleWrapper(_make.meshgrid(Tuple(data), indexing), ret_size)
def repeat(data, repeats, axis):
"""Repeats elements of an array.
By default, repeat flattens the input array into 1-D and then repeats the elements.
repeats : int
The number of repetitions for each element.
axis: int
The axis along which to repeat values. The negative numbers are interpreted
counting from the backward. By default, use the flattened input array, and
return a flat output array.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.repeat(x, repeats=2) = [1., 1., 2., 2., 3., 3., 4., 4.]
relay.repeat(x, repeats=2, axis=1) = [[1., 1., 2., 2.],
[3., 3., 4., 4.]]
"""
return _make.repeat(data, repeats, axis)
def tile(data, reps):
"""Repeats the whole array multiple times.
Parameters
----------
data : relay.Expr
The input data to the operator.
reps : tuple of int or relay.Expr
The number of times repeating the tensor data.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
relay.tile(x, reps=(2,3)) = [[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.],
[1., 2., 1., 2., 1., 2.],
[3., 4., 3., 4., 3., 4.]]
relay.tile(x, reps=(2,)) = [[1., 2., 1., 2.],
[3., 4., 3., 4.]]
Notes
-----
Each dim size of reps must be a positive integer. If reps has length d,
the result will have dimension of max(d, data.ndim); If data.ndim < d,
data is promoted to be d-dimensional by prepending new axes.
If data.ndim >= d, reps is promoted to a.ndim by pre-pending 1's to it.
"""
if isinstance(reps, Constant):
reps = list(reps.data.numpy())
if isinstance(reps, Expr):
return _dyn_make.tile(data, reps)
return _make.tile(data, reps)
def reverse(data, axis):
"""Reverses the order of elements along given axis while preserving array shape.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis: int
The axis along which to reverse elements.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[1., 2.], [3., 4.]]
relay.reverse(x, axis=0) = [[3., 4.], [1., 2.]]
relay.reverse(x, axis=1) = [[2., 1.], [4., 3.]]
"""
return _make.reverse(data, axis)
def reverse_sequence(data, seq_lengths, seq_axis=1, batch_axis=0):
"""Reverse the tensor for variable length slices.
Input is first sliced along batch axis and then elements are reversed along seq axis.
Parameters
----------
data : relay.Expr
The tensor to be reversed.
seq_lengths : relay.Expr
A 1D Tensor with length a.dims[batch_axis]
Must be one of the following types: int32, int64
if seq_lengths[i] > a.dims[seq_axis], it is rounded to a.dims[seq_axis]
if seq_lengths[i] < 1, it is rounded to 1
seq_axis : int, optional
The axis along which the elements will be reversed. Default is 1.
batch_axis : int, optional
The axis along which the tensor will be sliced. Default is 0.
Returns
-------
ret : relay.Expr
The computed result of same shape and type as of input.
Examples
--------
.. code-block:: python
x = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]]
relay.reverse(x, [1, 2, 3, 4], 0, 1) = [[0, 5, 10, 15],
[4, 1, 6, 11],
[8, 9, 2, 7],
[12, 13, 14, 3]]
relay.reverse(x, [1, 2, 3, 4], 1, 0) = [[0, 1, 2, 3],
[5, 4, 6, 7],
[10, 9, 8, 11],
[15, 14, 13, 12]]
"""
return _make.reverse_sequence(data, seq_lengths, seq_axis, batch_axis)
def where(condition, x, y):
"""Selecting elements from either x or y depending on the value of the
condition.
.. note::
Shapes of condition, x, and y must be broadcastable to a common shape.
Semantics follow numpy where function
https://numpy.org/doc/stable/reference/generated/numpy.where.html
Parameters
----------
condition : relay.Expr
Where True, yield x, otherwise yield y
x : relay.Expr
The first array or scalar to be selected.
y : relay.Expr
The second array or scalar to be selected.
Returns
-------
result : relay.Expr
The selected array. The output shape is the broadcasted shape from
condition, x, and y.
Examples
--------
.. code-block:: python
x = [[1, 2], [3, 4]]
y = [[5, 6], [7, 8]]
condition = [[0, 1], [-1, 0]]
relay.where(conditon, x, y) = [[5, 2], [3, 8]]
condition = [[1], [0]]
relay.where(conditon, x, y) = [[1, 2], [7, 8]]
"""
return _make.where(condition, x, y)
def broadcast_to(data, shape):
"""Return a scalar value array with the same type, broadcast to
the provided shape.
Parameters
----------
data : relay.Expr
The input tensor.
shape : tuple of int or relay.Expr
Provide the shape to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, Constant):
shape = list(shape.data.numpy())
if isinstance(shape, Expr):
return _dyn_make.broadcast_to(data, shape)
if isinstance(shape, int):
shape = [shape]
if isinstance(shape, (list, tuple)):
shape = list(shape)
return _make.broadcast_to(data, shape)
def broadcast_to_like(data, broadcast_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
broadcast_type : relay.Expr
Provide the type to broadcast to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.broadcast_to_like(data, broadcast_type)
def collapse_sum_like(data, collapse_type):
"""Return a scalar value array with the same shape and type as the input array.
Parameters
----------
data : relay.Expr
The input tensor.
collapse_type : relay.Expr
Provide the type to collapse to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
return _make.collapse_sum_like(data, collapse_type)
def collapse_sum_to(data, shape):
"""Return a summation of data to the specified shape.
Parameters
----------
data : relay.Expr
The input tensor.
shape : relay.Expr
Shape to collapse to.
Returns
-------
result : relay.Expr
The resulting tensor.
"""
if isinstance(shape, (list, tuple)):
shape = const(list(shape), "int32")
return _make.collapse_sum_to(data, shape)
def split(data, indices_or_sections, axis=0):
"""Split input tensor along axis by sections or indices.
If indices_or_sections is an integer, the input will be divided equally
along given axis. If such a split is not possible, an error is raised.
If indices_or_sections is a tuple of sorted integers,
the entries indicate where along axis the array is split.
Parameters
----------
data : relay.Expr
The source array.
indices_or_sections : int or tuple of int
Indices or sections to split into. Accepts an int or a tuple
axis : int, optional
The axis over which to split.
Returns
-------
ret : relay.Tuple([relay.Expr, relay.Expr])
The computed result.
"""
if isinstance(indices_or_sections, int):
ret_size = indices_or_sections
else:
ret_size = len(indices_or_sections) + 1
return TupleWrapper(_make.split(data, indices_or_sections, axis), ret_size)
def strided_slice(data, begin, end, strides=None, axes=None, slice_mode="end"):
"""Strided slice of an array.
Parameters
----------
data : relay.Expr
The source array to be sliced.
begin : relay.Expr, Tuple[int], or List[int]
The indices to begin with in the slicing.
end : relay.Expr, Tuple[int], or List[int]
Indices indicating end of the slice.
strides : relay.Expr, Tuple[int], or List[int], optional
Specifies the stride values, it can be negative in that case,
the input tensor will be reversed in that particular axis.
axes : Tuple[int] or List[int], optional
Axes along which slicing is applied. When it is specified, the length of begin, end,
strides, and axes must be equal. Moreover, begin, end, strides, and axes must be
static (cannot be relay.Expr). Axes argument for dynamic parameter slicing is
not supported yet.
slice_mode : str, optional
The slice mode [end, size].
end: The ending indices for the slice [default].
size: The input strides will be ignored, input end in this mode indicates
the size of a slice starting at the location specified by begin. If end[i]
is -1, all remaining elements in that dimension are included in the slice.
Returns
-------
ret : relay.Expr
The computed result.
"""
strides = strides or [1]
if isinstance(begin, Constant):
begin = list(begin.data.numpy())
if isinstance(end, Constant):
end = list(end.data.numpy())
if isinstance(strides, Constant):
strides = list(strides.data.numpy())
if isinstance(begin, Expr) or isinstance(end, Expr) or isinstance(strides, Expr):
if isinstance(begin, (tuple, list)):
begin = const(list(begin))
if isinstance(end, (tuple, list)):
end = const(list(end))
if isinstance(strides, (tuple, list)):
strides = const(list(strides))
ishape = cast_like(shape_of(data), begin)
ishape_slice = slice_like(ishape, begin)
begin = _make.where(begin < cast_like(const(0), begin), begin + ishape_slice, begin)
begin = _make.where(begin >= ishape_slice, ishape_slice, begin)
# TODO(masahi): Support axes argument in dynamic strided slice
assert axes is None, "Axes argument for dynamic parameter slicing is not supported yet."
return _dyn_make.strided_slice(data, begin, end, strides, slice_mode)
return _make.strided_slice(data, begin, end, strides, slice_mode, axes)
def strided_set(data, v, begin, end, strides=None):
"""Strided set of an array.
Parameters
----------
data : relay.Expr
The source array to be sliced.
v : relay.Expr
The data to be set.
begin: relay.Expr, Tuple[int], or List[int]
The indices to begin with in the slicing.
end: relay.Expr, Tuple[int], or List[int]
Indices indicating end of the slice.
strides: relay.Expr, Tuple[int], or List[int], optional
Specifies the stride values, it can be negative in that case,
the input tensor will be reversed in that particular axis.
Returns
-------
ret : relay.Expr
The computed result.
"""
strides = strides or const([1], dtype="int32")
if isinstance(begin, (tuple, list)):
begin = const(list(begin))
if isinstance(end, (tuple, list)):
end = const(list(end))
if isinstance(strides, (tuple, list)):
strides = const(list(strides))
return _make.strided_set(data, v, begin, end, strides)
def slice_like(data, shape_like, axes=None):
"""Slice the first input with respect to the second input.
For an input array with shape ``(d1, d2, ..., dk)``, `slice_like` operation slices the
the input array corresponding size of second array. By default will slice on all axes.
Parameters
----------
data : tvm.relay.Expr
The source array.
shape_like : tvm.relay.Expr
The new shape.
axes : Optional[Tuple[int]]
List of axes on which input data will be sliced according to the corresponding size of
the second input. By default will slice on all axes. Negative axes mean counting in reverse.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.slice_like(data, shape_like, axes)
def layout_transform(data, src_layout, dst_layout):
"""Transform the layout of a tensor
Parameters
----------
data : relay.Expr
The source tensor to be transformed
src_layout: str
The source layout. (e.g NCHW)
dst_layout: str
The destination layout. (e.g. NCHW16c)
Returns
-------
ret : relay.Expr
The transformed tensor.
"""
return _make.layout_transform(data, src_layout, dst_layout)
def reverse_reshape(data, newshape):
"""Reshapes the input array where the special values are inferred from
right to left.
The special values have the same semantics as :py:class:`tvm.relay.reshape`.
The difference is that special values are inferred from right to left. It
can be explained in the example below.
.. code-block:: python
data.shape = (10,5,4), newshape = (-1,0), reshape results in (40,5)
data.shape = (10,5,4), newshape = (-1,0), reverse_reshape results in (40,5)
Parameters
----------
data : relay.Expr
The input data to the operator.
newshape : Union[int, Tuple[int], List[int]]
The new shape. Should be compatible with the original shape.
Returns
-------
result : relay.Expr
The reshaped result.
"""
if isinstance(newshape, int):
newshape = [newshape]
return _make.contrib_reverse_reshape(data, list(newshape))
def gather(data, axis, indices):
"""Gather values along given axis from given indices.
E.g. for a 3D tensor, output is computed as:
.. code-block:: python
out[i][j][k] = data[indices[i][j][k]][j][k] # if axis == 0
out[i][j][k] = data[i][indices[i][j][k]][k] # if axis == 1
out[i][j][k] = data[i][j][indices[i][j][k]] # if axis == 2
``indices`` must have same shape as ``data``, except at dimension ``axis``
which must just be not null. Output will have same shape as ``indices``.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis along which to index. negative axis is supported.
indices: relay.Expr
The indices of values to gather.
Examples
--------
.. code-block:: python
data = [[1, 2], [3, 4]]
axis = 1
indices = [[0, 0], [1, 0]]
relay.gather(data, axis, indices) = [[1, 1], [4, 3]]
"""
return _make.gather(data, axis, indices)
def gather_nd(data, indices, batch_dims=0, index_rank=None):
"""Gather elements or slices from data and store to a tensor whose shape is
defined by indices.
Parameters
----------
data : relay.Expr
The input data to the operator.
indices : relay.Expr
The shape of output tensor.
batch_dims : int
The number of batch dimensions.
index_rank : int, optional
The size of an indexing tuple, which is a fixed value and the same as indices.shape[0]
Only needed when other dimensions of indices are dynamic.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
data = [[0, 1], [2, 3]]
indices = [[1, 1, 0], [0, 1, 0]]
relay.gather_nd(data, indices) = [2, 3, 0]
data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
indices = [[0, 1], [1, 0]]
relay.gather_nd(data, indices) = [[3, 4], [5, 6]]
data = [[[0,1],[2,3]],[[4,5],[6,7]]]
indices = [[1, 0]]
relay.gather_nd(data, indices, batch_dims=1) = [[2,3],[4,5]]
"""
return _make.gather_nd(data, indices, batch_dims, index_rank)
def sequence_mask(data, valid_length, mask_value=0, axis=0):
"""Sets all elements outside the expected length of the sequence to a constant value.
This function takes an n-dimensional input array of the form [MAX_LENGTH, batch_size, ...] or
[batch_size, MAX_LENGTH, ...] and returns an array of the same shape.
Parameters
----------
data : relay.Expr
The input data.
valid_length : relay.Expr
The expected (valid) length of each sequence in the tensor.
mask_value : float
The masking value.
axis : int
The axis of the length dimension.
Returns
-------
ret : relay.Expr
The computed result.
Examples
--------
.. code-block:: python
x = [[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 13., 14., 15.], [ 16., 17., 18.]]]
relay.sequence_mask(x, valid_length=[1, 1]) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 0., 0., 0.], [ 0., 0., 0.]],
[[ 0., 0., 0.], [ 0., 0., 0.]]]
relay.sequence_mask(x, valid_length=[2, 3], mask_value=0.1) =
[[[ 1., 2., 3.], [ 4., 5., 6.]],
[[ 7., 8., 9.], [ 10., 11., 12.]],
[[ 0.1, 0.1, 0.1], [ 16., 17., 18.]]]
"""
return _make.sequence_mask(data, valid_length, mask_value, axis)
def one_hot(indices, on_value, off_value, depth, axis, dtype):
"""
Returns a one-hot tensor where the locations repsented by indices take value on_value,
other locations take value off_value.
Final dimension is <indices outer dimensions> x depth x <indices inner dimensions>.
Parameters
----------
indices : relay.Expr
Locations to set to on_value.
on_value : relay.Expr
Value to fill at indices.
off_value : relay.Expr
Value to fill at all other positions besides indices.
depth : int or relay.Expr
Depth of the one-hot dimension.
axis : int
Axis to fill.
dtype : str
Data type of the output tensor.
Returns
-------
ret : relay.Expr
The one-hot tensor.
Examples
--------
.. code-block:: python
indices = [0, 1, 2]
relay.one_hot(indices, 3) =
[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
if isinstance(depth, Constant):
depth = depth.data.numpy().item()
if isinstance(depth, Expr):
return _dyn_make.one_hot(indices, on_value, off_value, depth, axis, dtype)
return _make.one_hot(indices, on_value, off_value, depth, axis, dtype)
def unravel_index(indices, shape):
"""Convert a flat index or array of flat indices into a tuple of coordinate arrays.
Example::
- unravel_index([22, 41, 37], [7, 6]) = [[3, 6, 6],[4, 5, 1]]
Parameters
----------
indices : relay.Expr
An integer array containing indices.
shape : relay.Expr
The shape of the array.
Returns
-------
result : relay.Expr
The tuple of coordinate arrays.
"""
return _make.unravel_index(indices, shape)
def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0):
"""Converts a sparse representation into a dense tensor.
Example::
- sparse_to_dense([[0, 0], [1, 1]], [2, 2], [3, 3], 0) = [[3, 0], [0, 3]]
Parameters
----------
sparse_indices : relay.Expr
A 0-D, 1-D, or 2-D tensor of integers containing location of sparse values.
output_shape : relay.Expr
A list of integers. Shape of the dense output tensor.
sparse_values : relay.Expr
A 0-D or 1-D tensor containing the sparse values for the sparse indices.
default_value : relay.Expr
A 0-D tensor containing the default value for the remaining locations.
Defaults to 0.
Returns
-------
result : relay.Expr
Dense tensor of shape output_shape. Has the same type as sparse_values.
"""
if default_value == 0:
default_value = const(0)
if isinstance(output_shape, Expr):
return _dyn_make.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
return _make.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value)
def matrix_set_diag(data, diagonal, k=0, align="RIGHT_LEFT"):
"""
Returns a tensor with the diagonals of input tensor replaced with the provided diagonal values.
Parameters
----------
data : relay.Expr
Input Tensor.
diagonal : relay.Expr
Values to be filled in the diagonal.
k : int or tuple of int, optional
Diagonal Offset(s). The diagonal or range of diagonals to set. (0 by default)
Positive value means superdiagonal, 0 refers to the main diagonal, and
negative value means subdiagonals. k can be a single integer (for a single diagonal)
or a pair of integers specifying the low and high ends of a matrix band.
k[0] must not be larger than k[1].
align : string, optional
Some diagonals are shorter than max_diag_len and need to be padded.
align is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT",
"LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right
(left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing
format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns
-------
result : relay.Expr
New tensor with given diagonal values.
Examples
--------
.. code-block:: python
data = [[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]
diagonal = [[1, 2, 3],
[4, 5, 6]]
relay.matrix_set_diag(input, diagonal) =
[[[1, 7, 7, 7],
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
"""
if isinstance(k, (tuple, list)):
k_one = k[0]
if len(k) >= 2:
k_two = k[1]
else:
k_two = k[0]
else:
k_one = k
k_two = k
super_diag_right_align = align[:5] == "RIGHT"
sub_diag_right_align = align[-5:] == "RIGHT"
return _make.matrix_set_diag(
data, diagonal, k_one, k_two, super_diag_right_align, sub_diag_right_align
)
def adv_index(inputs):
"""
Numpy style advanced indexing. Index with a list of tensors.
Parameters
----------
inputs : Union(List[relay.Expr], Tuple[relay.Expr])
Input tensor and indices.
The first tensor is input data and rests are indices.
Returns
-------
result: relay.Expr
Output tensor.
"""
return _make.adv_index(Tuple(inputs))
def sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value):
"""
Fill rows in a sparse matrix that do no contain any values. Values are placed in the first
column of empty rows. The sparse array is in COO format.
It returns a TupleWrapper with 3 outputs
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, ndims] of integers containing location of sparse values, where N is
the number of sparse values and n_dim is the number of dimensions of the dense_shape.
The first column of this relay parameter must be sorted in ascending order.
sparse_values : relay.Expr
A 1-D tensor[N] containing the sparse values for the sparse indices.
dense_shape : relay.Expr
A 1-D tensor[ndims] which contains shape of the dense output tensor.
default_value : relay.Expr
A 1-D tensor[1] containing the default value for the remaining locations.
Returns
-------
new_sparse_indices : relay.Expr
A 2-D tensor[?, ndims] of integers containing location of new sparse
indices. The first column outputs must be sorted in ascending order.
new_sparse_values : relay.Expr
A 1-D tensor[?] containing the sparse values for the sparse indices.
empty_row_indicator : relay.Expr
A 1-D tensor[dense_shape[0]] filled with zeros and ones
indicating whether the particular row is empty or full respectively
Note
----
This op exactly follows the documentation here:
https://www.tensorflow.org/api_docs/python/tf/sparse/fill_empty_rows
There are two exceptions:
1. Input Sparse Indices are expected to be in row-major order.
2. Empty Row Indicator has int64 output type with 1(for True) and 0(for False).
Examples
-------
.. code-block:: python
sparse_indices = [[0, 1],
[0, 3],
[2, 0],
[3, 1]]
sparse_values = [1, 2, 3, 4]
default_value = [10]
dense_shape = [5, 6]
new_sparse_indices, empty_row_indicator, new_sparse_values, slice_element_index =
relay.sparse_fill_empty_rows(
sparse_indices,
sparse_values,
default_value,
dense_shape)
new_sparse_indices = [[0, 1],
[0, 3],
[1, 0],
[2, 0],
[3, 1],
[4, 0]]
empty_row_indicator = [False, True, False, False, True]
new_sparse_values = [1, 2, 10, 3, 4, 10]
"""
new_sparse_indices, new_sparse_values, empty_row_indicator = TupleWrapper(
_make.sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value), 3
)
new_sparse_indices = cast_like(new_sparse_indices, sparse_indices)
new_sparse_values = cast_like(new_sparse_values, sparse_values)
empty_row_indicator = cast(empty_row_indicator, "bool")
return Tuple((new_sparse_indices, new_sparse_values, empty_row_indicator))
def sparse_reshape(sparse_indices, prev_shape, new_shape):
"""
Reshape a Sparse Tensor. The sparse array is in COO format.
Parameters
----------
sparse_indices : relay.Expr
A 2-D tensor[N, n_dim] of integers containing location of sparse values, where N is the
number of sparse values and n_dim is the number of dimensions of the dense_shape
prev_shape : relay.Expr
A 1-D tensor containing the previous shape of the dense tensor
new_shape : relay.Expr
A 1-D tensor containing the new shape of the dense tensor
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
new_sparse_indices, new_shape = relay.sparse_reshape(sparse_indices,
prev_shape,
new_shape)
new_sparse_indices = [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
new_shape = [9, 4]
"""
return TupleWrapper(_make.sparse_reshape(sparse_indices, prev_shape, new_shape), 2)
def segment_sum(data, segment_ids, num_segments=None):
"""
Computes the sum along segment_ids along axis 0. If multiple segment_ids reference the same
location their contributions add up.
result[index, j, k, ...] = Σi... data[i, j, k,..] where index = segment_ids[i]
This op is much better understood with visualization articulated in the following links and
examples at the end of this docstring.
https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum
https://caffe2.ai/docs/sparse-operations.html#null__unsorted-segment-reduction-ops
Parameters
----------
data : relay.Expr
Input Tensor. It can be of any type and multi-dimensional
segment_ids : relay.Expr
A 1-D int32/int64 tensor containing the segment_ids of the rows to calculate the output
sum upon. It defines a mapping from the zeroth dimension of data onto segment_ids. The
segment_ids tensor should be the size of the first dimension, d0, with consecutive IDs
in the range 0 to k, where k<d0. In particular, a segmentation of a matrix tensor is a
mapping of rows to segments. This tensor doesn't need to be sorted
num_segments : Optional[int]
An integer describing the shape of the zeroth dimension. If unspecified, its calculated
equivalent to the number of unique segment_ids
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
data = [[1, 2, 3, 4],
[4, -3, 2, -1],
[5, 6, 7, 8]]
segment_ids = [0, 0, 1]
result = segment_sum(data, segment_ids)
result = [[5, -1, 5, 3],[5, 6, 7, 8]]
data = [[1, 2, 3, 4],
[4, -3, 2, -1],
[5, 6, 7, 8]]
segment_ids = [2, 0, 0]
num_segments = 3
result = segment_sum(data, segment_ids, num_segments)
result = [[5, 6, 7, 8],[0, 0, 0, 0], [5, -1, 5, 3]]
"""
one_tensor = cast_like(const([1]), segment_ids)
if num_segments:
if isinstance(num_segments, int):
max_segments = const([num_segments])
max_segments = cast_like(max_segments, segment_ids)
else:
max_segments = cast_like(num_segments, segment_ids)
else:
max_segments = _make.add(reshape(_make.max(segment_ids, [0], False, False), -1), one_tensor)
data_offrow_shape = strided_slice(_make.shape_of(data, "int32"), [1], [-1], slice_mode="size")
data_offrow_shape = cast_like(data_offrow_shape, max_segments)
new_shape = _make.concatenate(Tuple([max_segments, data_offrow_shape]), 0)
segment_ids_tiled_shape = _make.concatenate(
Tuple([reverse(data_offrow_shape, 0), one_tensor]), 0
)
expanded_segment_ids = tile(segment_ids, segment_ids_tiled_shape)
scatter_add_segment_ids = transpose(expanded_segment_ids)
src = cast_like(_dyn_make.zeros(new_shape, "float64"), data)
return scatter_add(src, scatter_add_segment_ids, data, axis=0)
def cumsum(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumsum op. Return the cumulative inclusive sum of the elements along
a given axis.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int, optional
Axis along which the cumulative sum is computed. The default (None) is to compute
the cumsum over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are summed.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive sum in which the first element is not
included. In other terms, if true, the j-th output element would be
the sum of the first (j-1) elements. Otherwise, it would be the sum of
the first j elements.
Returns
-------
result : relay.Expr
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
Examples
--------
.. code-block:: python
a = [[1,2,3], [4,5,6]]
cumsum(a) # if axis is not provided, cumsum is done over the flattened input.
-> [ 1, 3, 6, 10, 15, 21]
cumsum(a, dtype="float32")
-> [ 1., 3., 6., 10., 15., 21.]
cumsum(a, axis=0) # sum over rows for each of the 3 columns
-> [[1, 2, 3],
[5, 7, 9]]
cumsum(a, axis=1)
-> [[ 1, 3, 6],
[ 4, 9, 15]]
a = [1, 0, 1, 0, 1, 1, 0] # a is a boolean array
cumsum(a, dtype=int32) # dtype should be provided to get the expected results
-> [1, 1, 2, 2, 3, 4, 4]
"""
return _make.cumsum(data, axis, dtype, exclusive)
def cumprod(data, axis=None, dtype=None, exclusive=None):
"""Numpy style cumprod op. Return the cumulative inclusive product of the elements along
a given axis.
Parameters
----------
data : relay.Expr
The input data to the operator.
axis : int, optional
Axis along which the cumulative product is computed. The default (None) is to compute
the cumprod over the flattened array.
dtype : string, optional
Type of the returned array and of the accumulator in which the elements are multiplied.
If dtype is not specified, it defaults to the dtype of data.
exclusive : bool, optional
If true will return exclusive product in which the first element is not
included. In other terms, if true, the j-th output element would be
the product of the first (j-1) elements. Otherwise, it would be the product of
the first j elements. The product of zero elements will be 1.
Returns
-------
result : relay.Expr
The result has the same size as data, and the same shape as data if axis is not None.
If axis is None, the result is a 1-d array.
Examples
--------
.. code-block:: python
a = [[1,2,3], [4,5,6]]
cumprod(a) # if axis is not provided, cumprod is done over the flattened input.
-> [ 1, 2, 6, 24, 120, 720]
cumprod(a, dtype="float32")
-> [ 1., 2., 6., 24., 120., 720.]
cumprod(a, axis=0) # multiply over rows for each of the 3 columns
-> [[1, 2, 3],
[4, 10, 18]]
cumprod(a, axis=1)
-> [[ 1, 2, 6],
[ 4, 20, 120]]
a = [1, 1, 1, 0, 1, 1, 0] # a is a boolean array
cumprod(a, dtype=int32) # dtype should be provided to get the expected results
-> [1, 1, 1, 0, 0, 0, 0]
"""
return _make.cumprod(data, axis, dtype, exclusive)
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : relay.Expr
A 1-D tensor of integers.
is_sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : relay.Expr
A 1-D tensor containing the unique elements of the input data tensor.
indices : relay.Expr
A 1-D tensor containing the index of each data element in the output tensor.
inverse_indices : relay.Expr
A 1-D tensor. For each entry in data, it contains the index of that data element in the
unique array.
num_unique : relay.Expr
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : relay.Expr
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
if return_counts:
return TupleWrapper(_make.unique(data, is_sorted, return_counts), 5)
return TupleWrapper(_make.unique(data, is_sorted, return_counts), 4)
def invert_permutation(data):
"""Computes the inverse permutation of data.
This operation computes the inverse of an index permutation.
It takes a 1-D integer tensor x, which represents the indices of a zero-based
array and swaps each value with its index position.
For an output tensor y and an input tensor x, this operation computes the following:
y[x[i]] = i for i in [0, 1, ..., len(x) - 1]
Parameters
----------
data : relay.Expr
The source data to be invert permuated.
Returns
-------
ret : relay.Expr
Invert permuated data. Has the same type as data.
Examples
--------
.. code-block:: python
data = [3, 4, 0, 2, 1]
relay.invert_permutation(data) = [2, 4, 3, 0, 1]
"""
return _make.invert_permutation(data)
def stft(
data, n_fft, hop_length=None, win_length=None, window=None, normalized=False, onesided=True
):
"""
The STFT computes the Fourier transform of short overlapping windows of the input.
This gives frequency components of the signal as they change over time.
Parameters
----------
data : relay.Expr
Either a 1-D tensor or a 2-D batch tensor.
n_fft : int
The size of Fourier transform
hop_length : int, optional
The distance between neighboring sliding window frames. If is None,
it is treated as equal to floor(n_fft / 4).
win_length : int, optional
The size of window frame and STFT filter. If is None, it is treated as equal to n_fft.
window : relay.Expr, optional
A 1-D tensor window frame. If is None (default), it is treated as if
having 1 everywhere in the window.
normalized : bool, optional
Whether to return the normalized STFT results. Default value is False.
onesided : bool, optional
Whether to return onesided result or fill with conjugate symmetry. Default value is True.
Returns
-------
output : relay.Expr
Tensor containing the STFT result with shape [batch, N, T, 2], where N is the
number of frequencies where STFT is applied and T is the total number of frames used.
Examples
--------
.. code-block:: python
data = [1, 2, 3, 4, 5, 6]
window = [4, 3, 2]
[n_fft, hop_length, win_length, normalized, onesided] = [3, 3, 3, False, True]
relay.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
-> [[[15.0000, 0.0000], [34.0000, 0.0000]], [[ 4.5000, 0.8660], [ 1.0000, -1.7321]]]
"""
if hop_length is None:
hop_length = n_fft // 4
if win_length is None:
win_length = n_fft
if window is None:
window = _make.ones([n_fft], "int32")
return _make.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
def trilu(data, k, upper=True):
"""
Given a 2-D matrix or batches of 2-D matrices, returns the
upper or lower triangular part of the tensor.
Parameters
----------
data: relay.Expr
The tensor that trilu will be applied to. Must be either
a 2D matrix or a tensor of batches of 2D matrices.
k: int
The number of diagonals above or below the main diagonal
to exclude or include.
upper: bool, optional
If True, only upper triangular values of input are kept,
if False, the lower triangular values are kept.
Returns
-------
ret : relay.Expr
The new tensor with appropriate diagonals set to zero.
Examples
--------
.. code-block:: python
x = [[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]
relay.trilu(x, True, 0) =
[[0, 1, 2],
[0, 4, 5],
[0, 0, 8]]
"""
if not isinstance(k, Expr):
k = const(k, dtype="int32")
return _make.trilu(data, k, upper)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Vision network related operators."""
from .multibox import *
from .nms import *
from .rcnn import *
from .yolo import *
from . import _rcnn
from . import _yolo
from . import _vision
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.op.vision._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/_rcnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Faster R-CNN and Mask R-CNN operations."""
from tvm import topi
from tvm.topi.utils import get_const_tuple
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# roi_align
reg.register_strategy("vision.roi_align", strategy.roi_align_strategy)
reg.register_pattern("vision.roi_align", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_convert_op_layout("vision.roi_align")
def convert_roi_align(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for roi_align op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current roi_align
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and rois inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, rois = inputs
new_attrs = dict(attrs)
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of vision.roi_align's inputs"
desired_data_layout, desired_rois_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
assert desired_rois_layout == "default", "Rois layout must be default"
new_attrs["layout"] = desired_data_layout
# rois layout not change
if desired_data_layout in ["NCHW", "NHWC"]:
return relay.vision.roi_align(data, rois, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
@reg.register_convert_op_layout("vision.roi_pool")
def convert_roi_pool(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for roi_pool op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current roi_pool
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and rois inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, rois = inputs
new_attrs = dict(attrs)
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of vision.roi_pool's inputs"
desired_data_layout, desired_rois_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
assert desired_rois_layout == "default", "Rois layout must be default"
new_attrs["layout"] = desired_data_layout
# rois layout not change
if desired_data_layout in ["NCHW", "NHWC"]:
return relay.vision.roi_pool(data, rois, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# roi_pool
@reg.register_compute("vision.roi_pool")
def compute_roi_pool(attrs, inputs, _):
"""Compute definition of roi_pool"""
assert attrs.layout == "NCHW", "only support nchw for now"
return [
topi.vision.rcnn.roi_pool_nchw(
inputs[0],
inputs[1],
pooled_size=get_const_tuple(attrs.pooled_size),
spatial_scale=attrs.spatial_scale,
)
]
reg.register_schedule("vision.roi_pool", strategy.schedule_roi_pool)
reg.register_pattern("vision.roi_pool", OpPattern.OUT_ELEMWISE_FUSABLE)
# proposal
reg.register_strategy("vision.proposal", strategy.proposal_strategy)
reg.register_pattern("vision.proposal", OpPattern.OPAQUE)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/_vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Definition of vision ops"""
from __future__ import absolute_import
from tvm import topi
from tvm.te.hybrid import script
from tvm.runtime import convert
from .. import op as reg
from .. import strategy
from ..op import OpPattern
# multibox_prior
reg.register_strategy("vision.multibox_prior", strategy.multibox_prior_strategy)
reg.register_pattern("vision.multibox_prior", OpPattern.OPAQUE)
# multibox_transform_loc
reg.register_strategy("vision.multibox_transform_loc", strategy.multibox_transform_loc_strategy)
reg.register_pattern("vision.multibox_transform_loc", OpPattern.OPAQUE)
# Get counts of valid boxes
reg.register_strategy("vision.get_valid_counts", strategy.get_valid_counts_strategy)
reg.register_pattern("vision.get_valid_counts", OpPattern.OPAQUE)
# non-maximum suppression
reg.register_strategy("vision.non_max_suppression", strategy.nms_strategy)
reg.register_pattern("vision.non_max_suppression", OpPattern.OPAQUE)
reg.register_strategy("vision.all_class_non_max_suppression", strategy.all_class_nms_strategy)
reg.register_pattern("vision.all_class_non_max_suppression", OpPattern.OPAQUE)
@script
def _get_valid_counts_shape_func(data_shape):
valid_counts_shape = output_tensor((1,), "int64")
out_tensor_shape = output_tensor((data_shape.shape[0],), "int64")
out_indices_shape = output_tensor((2,), "int64")
valid_counts_shape[0] = data_shape[0]
for i in const_range(data_shape.shape[0]):
out_tensor_shape[i] = data_shape[i]
out_indices_shape[0] = data_shape[0]
out_indices_shape[1] = data_shape[1]
return valid_counts_shape, out_tensor_shape, out_indices_shape
@reg.register_shape_func("vision.get_valid_counts", False)
def get_valid_counts_shape_func(attrs, inputs, _):
return _get_valid_counts_shape_func(inputs[0])
@script
def _nms_shape_func(data_shape):
out_shape = output_tensor((2,), "int64")
count_shape = output_tensor((2,), "int64")
out_shape[0] = data_shape[0]
out_shape[1] = data_shape[1]
count_shape[0] = data_shape[0]
count_shape[1] = int64(1)
return out_shape, count_shape
@reg.register_shape_func("vision.non_max_suppression", False)
def nms_shape_func(attrs, inputs, _):
if attrs.return_indices:
return _nms_shape_func(inputs[0])
return [topi.math.identity(inputs[0])]
@script
def _all_class_nms_shape_func_onnx(boxes_shape, scores_shape):
out_shape = output_tensor((2,), "int64")
count_shape = output_tensor((1,), "int64")
out_shape[0] = boxes_shape[0] * scores_shape[1] * boxes_shape[1]
out_shape[1] = int64(3)
count_shape[0] = int64(1)
return out_shape, count_shape
@script
def _all_class_nms_shape_func_tf(boxes_shape, scores_shape):
out_indices_shape = output_tensor((3,), "int64")
out_scores_shape = output_tensor((2,), "int64")
count_shape = output_tensor((1,), "int64")
out_indices_shape[0] = boxes_shape[0]
out_indices_shape[1] = scores_shape[1] * boxes_shape[1]
out_indices_shape[2] = int64(2)
out_scores_shape[0] = boxes_shape[0]
out_scores_shape[1] = scores_shape[1] * boxes_shape[1]
count_shape[0] = boxes_shape[0]
return out_indices_shape, out_scores_shape, count_shape
@reg.register_shape_func("vision.all_class_non_max_suppression", False)
def all_class_nms_shape_func(attrs, inputs, _):
if attrs.output_format == "onnx":
return _all_class_nms_shape_func_onnx(inputs[0], inputs[1])
return _all_class_nms_shape_func_tf(inputs[0], inputs[1])
@script
def _roi_align_shape_func_nchw(data_shape, rois_shape, pooled_size):
out = output_tensor((4,), "int64")
out[0] = rois_shape[0]
out[1] = data_shape[1]
out[2] = int64(pooled_size[0])
out[3] = int64(pooled_size[1])
return out
@script
def _roi_align_shape_func_nhwc(data_shape, rois_shape, pooled_size):
out = output_tensor((4,), "int64")
out[0] = rois_shape[0]
out[1] = int64(pooled_size[0])
out[2] = int64(pooled_size[1])
out[3] = data_shape[3]
return out
@reg.register_shape_func("vision.roi_align", False)
def roi_align_shape_func(attrs, inputs, _):
if attrs.layout == "NCHW":
return [_roi_align_shape_func_nchw(inputs[0], inputs[1], convert(attrs.pooled_size))]
assert attrs.layout == "NHWC", "layout must be NCHW or NHWC."
return [_roi_align_shape_func_nhwc(inputs[0], inputs[1], convert(attrs.pooled_size))]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/_yolo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from ..op import register_pattern, OpPattern
from ..op import register_injective_schedule
# reorg
register_pattern("vision.yolo_reorg", OpPattern.INJECTIVE)
register_injective_schedule("vision.yolo_reorg")
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/multibox.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Multibox operations."""
from tvm.relay import expr
from . import _make
def multibox_prior(
data, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=False
):
"""Generate prior(anchor) boxes from data, sizes and ratios.
Parameters
----------
data : relay.Expr
The input data tensor.
sizes : tuple of float, optional
Tuple of sizes for anchor boxes.
ratios : tuple of float, optional
Tuple of ratios for anchor boxes.
steps : Tuple of float, optional
Priorbox step across y and x, -1 for auto calculation.
offsets : tuple of int, optional
Priorbox center offsets, y and x respectively.
clip : boolean, optional
Whether to clip out-of-boundary boxes.
Returns
-------
out : relay.Expr
3-D tensor with shape [1, h_in * w_in * (num_sizes + num_ratios - 1), 4]
"""
return _make.multibox_prior(data, sizes, ratios, steps, offsets, clip)
def multibox_transform_loc(
cls_prob, loc_pred, anchor, clip=True, threshold=0.01, variances=(0.1, 0.1, 0.2, 0.2)
):
"""Location transformation for multibox detection
Parameters
----------
cls_prob : tvm.relay.Expr
Class probabilities.
loc_pred : tvm.relay.Expr
Location regression predictions.
anchor : tvm.relay.Expr
Prior anchor boxes.
clip : boolean, optional
Whether to clip out-of-boundary boxes.
threshold : double, optional
Threshold to be a positive prediction.
variances : Tuple of float, optional
variances to be decoded from box regression output.
Returns
-------
ret : tuple of tvm.relay.Expr
"""
return expr.TupleWrapper(
_make.multibox_transform_loc(cls_prob, loc_pred, anchor, clip, threshold, variances), 2
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/nms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Non-maximum suppression operations."""
from tvm.relay import expr
from . import _make
def get_valid_counts(data, score_threshold, id_index=0, score_index=1):
"""Get valid count of bounding boxes given a score threshold.
Also moves valid boxes to the top of input data.
Parameters
----------
data : relay.Expr
Input data. 3-D tensor with shape [batch_size, num_anchors, 6].
score_threshold : optional, float
Lower limit of score for valid bounding boxes.
id_index : optional, int
index of the class categories, -1 to disable.
score_index: optional, int
Index of the scores/confidence of boxes.
Returns
-------
valid_count : relay.Expr
1-D tensor for valid number of boxes.
out_tensor : relay.Expr
Rearranged data tensor.
out_indices: relay.Expr
Indices in input data
"""
if not isinstance(score_threshold, expr.Expr):
score_threshold = expr.const(score_threshold, "float32")
return expr.TupleWrapper(
_make.get_valid_counts(data, score_threshold, id_index, score_index), 3
)
def non_max_suppression(
data,
valid_count,
indices,
max_output_size=-1,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
coord_start=2,
score_index=1,
id_index=0,
return_indices=True,
invalid_to_bottom=False,
):
"""Non-maximum suppression operator for object detection.
Parameters
----------
data : relay.Expr
3-D tensor with shape [batch_size, num_anchors, 6]
or [batch_size, num_anchors, 5].
The last dimension should be in format of
[class_id, score, box_left, box_top, box_right, box_bottom]
or [score, box_left, box_top, box_right, box_bottom]. It could
be the second output out_tensor of get_valid_counts.
valid_count : relay.Expr
1-D tensor for valid number of boxes. It could be the output
valid_count of get_valid_counts.
indices: relay.Expr
2-D tensor with shape [batch_size, num_anchors], represents
the index of box in original data. It could be the third
output out_indices of get_valid_counts. The values in the
second dimension are like the output of arange(num_anchors)
if get_valid_counts is not used before non_max_suppression.
max_output_size : int or relay.Expr, optional
Max number of output valid boxes for each instance.
Return all valid boxes if the value of max_output_size is less than 0.
iou_threshold : float or relay.Expr, optional
Non-maximum suppression threshold.
force_suppress : bool, optional
Suppress all detections regardless of class_id.
top_k : int, optional
Keep maximum top k detections before nms, -1 for no limit.
coord_start : int, optional
The starting index of the consecutive 4 coordinates.
score_index : int, optional
Index of the scores/confidence of boxes.
id_index : int, optional
index of the class categories, -1 to disable.
return_indices : bool, optional
Whether to return box indices in input data.
invalid_to_bottom : bool, optional
Whether to move all valid bounding boxes to the top.
Returns
-------
out : relay.Expr or relay.Tuple
return relay.Expr if return_indices is disabled, a 3-D tensor
with shape [batch_size, num_anchors, 6] or [batch_size, num_anchors, 5].
If return_indices is True, return relay.Tuple of two 2-D tensors, with
shape [batch_size, num_anchors] and [batch_size, num_valid_anchors] respectively.
"""
if not isinstance(max_output_size, expr.Expr):
max_output_size = expr.const(max_output_size, "int32")
if not isinstance(iou_threshold, expr.Expr):
iou_threshold = expr.const(iou_threshold, "float32")
out = _make.non_max_suppression(
data,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
return_indices,
invalid_to_bottom,
)
if return_indices:
return expr.TupleWrapper(out, 2)
return out
def all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class=-1,
iou_threshold=-1.0,
score_threshold=-1.0,
output_format="onnx",
):
"""Non-maximum suppression operator for object detection, corresponding to ONNX
NonMaxSuppression and TensorFlow combined_non_max_suppression.
NMS is performed for each class separately.
Parameters
----------
boxes : relay.Expr
3-D tensor with shape (batch_size, num_boxes, 4)
scores: relay.Expr
3-D tensor with shape (batch_size, num_classes, num_boxes)
max_output_boxes_per_class : int or relay.Expr, optional
The maxinum number of output selected boxes per class
iou_threshold : float or relay.Expr, optionaIl
IoU test threshold
score_threshold : float or relay.Expr, optional
Score threshold to filter out low score boxes early
output_format : string, optional
"onnx" or "tensorflow". Specify by which frontends the outputs are
intented to be consumed.
Returns
-------
out : relay.Tuple
If `output_format` is "onnx", the output is a relay.Tuple of two tensors, the first is
`indices` of size `(batch_size * num_class* num_boxes , 3)` and the second is a scalar
tensor `num_total_detection` of shape `(1,)` representing the total number of selected
boxes. The three values in `indices` encode batch, class, and box indices.
Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come first,
in descending of scores, followed by boxes from batch 0, class 1 etc. Out of
`batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection`
rows are valid.
If `output_format` is "tensorflow", the output is a relay.Tuple of three tensors, the first
is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of
size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size
`(batch_size,)` representing the total number of selected boxes per batch. The two values
in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at
batch b, only the first `num_total_detection[b]` entries are valid. The second axis of
`indices` and `scores` are sorted within each class by box scores, but not across classes.
So the box indices and scores for the class 0 come first in a sorted order, followed by
the class 1 etc.
"""
if not isinstance(max_output_boxes_per_class, expr.Expr):
max_output_boxes_per_class = expr.const(max_output_boxes_per_class, "int32")
if not isinstance(iou_threshold, expr.Expr):
iou_threshold = expr.const(iou_threshold, "float32")
if not isinstance(score_threshold, expr.Expr):
score_threshold = expr.const(score_threshold, "float32")
out = _make.all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
output_format,
)
if output_format == "onnx":
return expr.TupleWrapper(out, 2)
return expr.TupleWrapper(out, 3)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/rcnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Faster R-CNN and Mask R-CNN operations."""
from . import _make
def roi_align(data, rois, pooled_size, spatial_scale, sample_ratio=-1, layout="NCHW", mode="avg"):
"""ROI align operator.
Parameters
----------
data : relay.Expr
4-D tensor with shape [batch, channel, height, width]
rois : relay.Expr
2-D tensor with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : list/tuple of two ints
output size
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
mode : str, Optional
The pooling method. Relay supports two methods, 'avg' and 'max'. Default is 'avg'.
Returns
-------
output : relay.Expr
4-D tensor with shape [num_roi, channel, pooled_size, pooled_size]
"""
return _make.roi_align(data, rois, pooled_size, spatial_scale, sample_ratio, layout, mode)
def roi_pool(data, rois, pooled_size, spatial_scale, layout="NCHW"):
"""ROI pool operator.
Parameters
----------
data : relay.Expr
4-D tensor with shape [batch, channel, height, width]
rois : relay.Expr
2-D tensor with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : list/tuple of two ints
output size
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
Returns
-------
output : relay.Expr
4-D tensor with shape [num_roi, channel, pooled_size, pooled_size]
"""
return _make.roi_pool(data, rois, pooled_size, spatial_scale, layout)
def proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
):
"""Proposal operator.
Parameters
----------
cls_prob : relay.Expr
4-D tensor with shape [batch, 2 * num_anchors, height, width].
bbox_pred : relay.Expr
4-D tensor with shape [batch, 4 * num_anchors, height, width].
im_info : relay.Expr
2-D tensor with shape [batch, 3]. The last dimension should be in format of
[im_height, im_width, im_scale]
scales : list/tuple of float
Scales of anchor windows.
ratios : list/tuple of float
Ratios of anchor windows.
feature_stride : int
The size of the receptive field each unit in the convolution layer of the rpn, for example
the product of all stride's prior to this layer.
threshold : float
Non-maximum suppression threshold.
rpn_pre_nms_top_n : int
Number of top scoring boxes to apply NMS. -1 to use all boxes.
rpn_post_nms_top_n : int
Number of top scoring boxes to keep after applying NMS to RPN proposals.
rpn_min_size : int
Minimum height or width in proposal.
iou_loss : bool
Usage of IoU loss.
Returns
-------
output : relay.Expr
2-D tensor with shape [batch * rpn_post_nms_top_n, 5]. The last dimension is in format of
[batch_index, w_start, h_start, w_end, h_end].
"""
return _make.proposal(
cls_prob,
bbox_pred,
im_info,
scales,
ratios,
feature_stride,
threshold,
rpn_pre_nms_top_n,
rpn_post_nms_top_n,
rpn_min_size,
iou_loss,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vision/yolo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Yolo operations."""
from . import _make
def yolo_reorg(data, stride):
"""Yolo reorg operation used in darknet models.
This layer shuffles the input tensor values based on the stride value.
Along with the shuffling, it does the shape transform.
If '(n, c, h, w)' is the data shape and 's' is stride, output shape is '(n, c*s*s, h/s, w/s)'.
Example:
.. code-block:: python
data(1, 4, 2, 2) = [[[[ 0 1] [ 2 3]]
[[ 4 5] [ 6 7]]
[[ 8 9] [10 11]]
[[12 13] [14 15]]]]
stride = 2
ret(1, 16, 1, 1) = [[[[ 0]] [[ 2]] [[ 8]] [[10]]
[[ 1]] [[ 3]] [[ 9]] [[11]]
[[ 4]] [[ 6]] [[12]] [[14]]
[[ 5]] [[ 7]] [[13]] [[15]]]]
.. note::
stride=1 has no significance for reorg operation.
Parameters
----------
data : relay.Expr
The input data tensor.
stride : int
The stride value for reorganisation.
Returns
-------
ret : relay.Expr
The computed result.
"""
return _make.yolo_reorg(data, stride)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vm/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""Dialect operators for Relay VM."""
from __future__ import absolute_import as _abs
from .vm import *
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vm/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for relay.op.vm"""
import tvm._ffi
tvm._ffi._init_api("relay.op.vm", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/op/vm/vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return,invalid-name,len-as-condition,too-many-nested-blocks
"""Dialect operators for Relay VM."""
from . import _ffi_api
def shape_of(expr):
"""Invoke a function to get the shape of a tensor.
Parameters
----------
expr : tvm.relay.Expr
The expr used to evaluate its tensor shape.
Returns
-------
result : tvm.relay.Expr
The expression with the evaluated tensor shape.
"""
return _ffi_api.shape_of(expr)
def invoke_tvm_op(func, inputs, outputs):
"""Call a primitive function with the TVM operator calling convention.
Parameters
----------
func : tvm.relay.Expr
The input expr.
inputs : tvm.relay.Expr
A tuple of the inputs to pass to the TVM function.
outputs : tvm.relay.Expr
A tuple of the outputs to pass to the TVM function.
Returns
-------
result : tvm.relay.Expr
The invoke_tvm_op call node.
"""
return _ffi_api.invoke_tvm_op(func, inputs, outputs)
def shape_func(func, inputs, outputs, is_inputs):
"""Invoke the shape function of the passed function.
Parameters
----------
func : tvm.relay.Expr
The primitive function from which to compute the shape function.
inputs : tvm.relay.Tuple
The tupled inputs.
outputs : tvm.relay.Tuple
The tupled outputs.
is_inputs : List[bool]
A boolean list indicating whether the shape function should expect
shape or input at each position.
Returns
-------
result : tvm.relay.Expr
The shape function expression.
"""
return _ffi_api.shape_func(func, inputs, outputs, is_inputs)
def reshape_tensor(data, shape, newshape):
"""Invoke the VM ReshapeTensor instruction.
Parameters
----------
data : tvm.relay.Expr
The input data.
shape : tvm.relay.Expr
The newshape tensor.
newshape : List[tvm.ir.PrimExpr]
The new shape.
"""
return _ffi_api.reshape_tensor(data, shape, newshape)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/param_dict.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Helper utility to save parameter dicts."""
import tvm.runtime
def save_param_dict(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
.. deprecated:: 0.9.0
Use :py:func:`tvm.runtime.save_param_dict` instead.
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
Examples
--------
.. code-block:: python
# set up the parameter dict
params = {"param0": arr0, "param1": arr1}
# save the parameters as byte array
param_bytes = tvm.runtime.save_param_dict(params)
# We can serialize the param_bytes and load it back later.
# Pass in byte array to module to directly set parameters
tvm.runtime.load_param_dict(param_bytes)
"""
return tvm.runtime.save_param_dict(params)
def load_param_dict(param_bytes):
"""Load parameter dictionary to binary bytes.
.. deprecated:: 0.9.0
Use :py:func:`tvm.runtime.load_param_dict` instead.
Parameters
----------
param_bytes: bytearray
Serialized parameters.
Returns
-------
params : dict of str to NDArray
The parameter dictionary.
"""
return tvm.runtime.load_param_dict(param_bytes)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/prelude.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
"""A prelude containing useful global functions and ADT definitions."""
from tvm.ir import IRModule, TypeCall
from tvm.tir import Any
from tvm.relay.transform import ToANormalFormExpr
from .ty import GlobalTypeVar, TensorType, scalar_type
from .expr import Var, GlobalVar, If, const
from .function import Function
from .op.tensor import add, subtract, equal
from .adt import Constructor, TypeData, Clause, Match
from .adt import PatternConstructor, PatternVar, PatternWildcard
from . import op, transform
from .analysis import free_vars
def get_tensor_array_shape(expr, dtype, prelude):
"""Get the static shape of a tensor array if it has fixed rank shape.
By design, static ADT tensor in TVM has type name in the format
of static_tensor_dim0_dim1_..._dimN_t.
Parameters
----------
expr : Relay Expr
Input expression.
dtype : str
Data type.
prelude : Prelude
Tensor array prelude
Returns
-------
shape : tuple of (int, Any) or None
The output shape. None if input tensor array
has dynamic shape.
"""
mod = prelude.mod
mod["main"] = Function(free_vars(expr), expr)
mod = transform.InferType()(mod)
checked_type = mod["main"].body.checked_type
assert isinstance(checked_type, TypeCall), "Input must be a tensor array."
ta_type_str = checked_type.args[0].func.name_hint
static_ta_ty_start = "static_tensor_{}".format(dtype)
if ta_type_str.startswith(static_ta_ty_start):
shape_str = ta_type_str.replace("{}_".format(static_ta_ty_start), "").replace("_t", "")
shape = []
if "scalar" not in shape_str:
for dim_str in shape_str.split("_"):
if dim_str in ["?", "any"]:
shape.append(Any())
else:
shape.append(int(dim_str))
return tuple(shape)
return None
def _get_name_static(canonical, dtype, shape, batch_dim=None, extra_shapes=None):
"""Get name for static shape tensor array op
By design, static ADT tensor in TVM has type name in the format
of static_tensor_dim0_dim1_..._dimN_t
or static_tensor_batch1_dim0_dim1_..._dimN_t if tensorlist stack only have one item.
Parameters
----------
canonical : String
Tensor array op name
dtype : str
Data type.
shape : tuple of (int, Any) or None
Tensor array shape
batch_dim: None or int
1 if tensorlist stack only have one item.
None by default
Returns
-------
name : String
The tensor array op name
"""
shape_str = _to_str(shape)
if extra_shapes is not None:
for n, s in extra_shapes.items():
extra_shape_str = "_{}_{}".format(n, _to_str(s))
shape_str += extra_shape_str
if len(shape_str) == 0:
shape_str = "scalar"
if canonical == "tensor_t":
return "static_tensor_{}_{}_t".format(dtype, shape_str)
if batch_dim is None or canonical in ["tensor_constructor", "tensor_nil"]:
return "{}_{}_{}".format(canonical, dtype, shape_str)
if batch_dim != 1:
return "{}_{}_{}".format(canonical, dtype, shape_str)
return "{}_{}_batch{}_{}".format(canonical, dtype, str(batch_dim), shape_str)
def _to_str(shape):
dim_names = []
for dim in shape:
if isinstance(dim, Any):
dim_names.append("any")
else:
dim_names.append(str(dim))
return "_".join(dim_names)
class StaticTensorArrayOps(object):
"""Contains tensor array related ops for fixed rank tensor array"""
def __init__(self, prelude, dtype, shape, batch_dim=None):
"""Create tensor array ops registry"""
self.prelude = prelude
self.dtype = dtype
self.shape = shape
self.batch_dim = batch_dim
self.list, self.cons, self.nil = self.prelude.mod.get_type("List")
def get_name(self, canonical, extra_shapes=None):
"""Get name corresponding to the canonical name"""
return _get_name_static(canonical, self.dtype, self.shape, self.batch_dim, extra_shapes)
def get_global_var(self, canonical):
"""Get global corresponding to the canonical name"""
return self.prelude.get_global_var_static(canonical, self.dtype, self.shape, self.batch_dim)
def get_type(self, canonical):
"""Get type corresponding to the canonical name"""
return self.prelude.get_type_static(canonical, self.dtype, self.shape)
def get_ctor(self, canonical):
"""Get ctor corresponding to the canonical name"""
return self.prelude.get_ctor_static("tensor_t", canonical, self.dtype, self.shape)
def define_tensor_adt(self):
"""Defines the static tensor ADT, which is the container for tensors
with fixed shapes."""
tensor_type_name = self.get_name("tensor_t")
# This is effectively functioning as a monomorphizer.
# TODO(@jroesch): we should add full shape polymoprhism
# and do monomorphization.
#
# Skip register if tensor type is already registered.
global_type_names = set()
for g_ty_var in self.prelude.mod.get_global_type_vars():
global_type_names.add(g_ty_var.name_hint)
if tensor_type_name in global_type_names:
self.tensor_type_var = self.get_type("tensor_t")
return
self.tensor_type_var = GlobalTypeVar(tensor_type_name)
tensor_type = TensorType(self.shape, self.dtype)
tensor_constructor_name = self.get_name("tensor_constructor")
tensor_nil_name = self.get_name("tensor_nil")
tensor_nil_case = Constructor(tensor_nil_name, [], self.tensor_type_var)
tensor_case = Constructor(tensor_constructor_name, [tensor_type], self.tensor_type_var)
self.prelude.mod[self.tensor_type_var] = TypeData(
self.tensor_type_var, [], [tensor_nil_case, tensor_case]
)
def define_tensor_array(self):
"""Defines a function to create a tensor array with size n.
tensor_array(n) : Tensor[(), int32] -> list[tensor_t]
"""
tensor_array_constructor_name = self.get_name("tensor_array")
tensor_array_constructor_var = self._create_global_var(tensor_array_constructor_name)
tensor_nil_var = self.get_ctor("tensor_nil")
tensor_type_var = self.get_ctor("tensor_t")
n = Var("x", scalar_type("int32"))
body = If(
equal(n, const(0)),
self.nil(),
self.cons(tensor_nil_var(), tensor_array_constructor_var(subtract(n, const(1)))),
)
self.prelude.mod[tensor_array_constructor_var] = Function(
[n], body, self.list(tensor_type_var()), []
)
def define_tensor_take(self):
"""Defines a function to return a range of tensor_t on axis 0.
tensor_take(t, lower, upper) :
tensor_t -> Tensor[(), int32] -> Tensor[(), int32] -> tensor_t
"""
# We don't register take for scalar tensor.
ndim = len(self.shape)
if ndim == 0:
return
take_name = self.get_name("tensor_take")
if self.is_cached(take_name):
return
take_var = GlobalVar(take_name)
origin_tensor_constructor = self.get_ctor("tensor_constructor")
output_shape = [
Any(),
] + list(self.shape[1:])
tensor_type_var, tensor_constructor, _ = self._get_adt_by_shape(output_shape)
t = Var("tensor", self.tensor_type_var())
lower = Var("lower", scalar_type("int32"))
upper = Var("upper", scalar_type("int32"))
tvar = Var("t")
case = Clause(
PatternConstructor(origin_tensor_constructor, [PatternVar(tvar)]),
tensor_constructor(op.take(tvar, op.arange(lower, upper, dtype="int32"), axis=0)),
)
self.prelude.mod[take_var] = Function(
[t, lower, upper], Match(t, [case], False), tensor_type_var(), []
)
def define_tensor_concatenate(self):
"""Defines a function to concatenate two tensor_t on axis 0.
tensor_concatenate(t) : tensor_t -> tensor_t -> tensor_t
"""
# We don't register concatenate for scalar tensor.
ndim = len(self.shape)
if ndim == 0:
return
concat_name = self.get_name("tensor_concatenate")
concat_var = GlobalVar(concat_name)
if self.is_cached(concat_name):
return
output_shape = [
Any(),
] + list(self.shape[1:])
tensor_type_var, tensor_constructor, _ = self._get_adt_by_shape(output_shape)
origin_tensor_constructor = self.get_ctor("tensor_constructor")
origin_tensor_type_var = self.tensor_type_var
x = Var("x", origin_tensor_type_var())
y = Var("y", origin_tensor_type_var())
t1 = Var("t1")
t2 = Var("t2")
case = Clause(
PatternConstructor(origin_tensor_constructor, [PatternVar(t1)]),
Match(
y,
[
Clause(
PatternConstructor(origin_tensor_constructor, [PatternVar(t2)]),
tensor_constructor(op.concatenate([t1, t2], axis=0)),
)
],
False,
),
)
self.prelude.mod[concat_var] = Function(
[x, y], Match(x, [case], False), tensor_type_var(), []
)
def define_tensor_expand_dims(self):
"""Defines a function to grow a tensor_t's rank by adding one dimension in front
of the original tensor_t.
tensor_expand_dims(t) : tensor_t -> tensor_t
"""
expand_dims_name = self.get_name("tensor_expand_dims")
expand_dims_var = self._create_global_var(expand_dims_name)
setattr(self.prelude, expand_dims_name, expand_dims_var)
origin_tensor_type_var = self.tensor_type_var
origin_tensor_constructor = self.get_ctor("tensor_constructor")
x = Var("x", origin_tensor_type_var())
# Note: we set the added axis to be Any() instead of 1 due to
# in stack op, we need to recursively concatenate.
new_axis = Any() if self.batch_dim is None or self.batch_dim != 1 else self.batch_dim
tensor_type_var, tensor_constructor, _ = self._get_adt_by_shape(
[
new_axis,
]
+ list(self.shape)
)
t = Var("t")
case = Clause(
PatternConstructor(origin_tensor_constructor, [PatternVar(t)]),
tensor_constructor(op.expand_dims(t, 0, 1)),
)
self.prelude.mod[expand_dims_var] = Function(
[x], Match(x, [case], False), tensor_type_var(), []
)
def define_tensor_array_read(self):
"""Defines a function to get the nth element of a list. Assume the list has at least one
element.
tensor_array_read(ta, n) : list[static_tensor_t] -> Tensor[(), int32] ->
Tensor[self.shape, self.dtype]
"""
read_name = self.get_name("tensor_array_read")
if self.is_cached(read_name):
return
read_var = GlobalVar(read_name)
tensor_array = Var("tensor_array", self.list(self.tensor_type_var()))
n = Var("x", scalar_type("int32"))
self.prelude.mod[read_var] = Function(
[tensor_array, n], self.prelude.nth(tensor_array, n), self.tensor_type_var(), []
)
def is_cached(self, name):
try:
self.prelude.mod.get_global_var(name)
return True
except ValueError:
return False
def define_tensor_array_write(self):
"""Defines a function to update a tensor array at index n with value v.
tensor_array_write(ta, n, v) :
list[static_tensor_t] -> Tensor[(), int32] -> Tensor[self.shape, self.dtype] ->
list[static_tensor_t]
"""
write_name = self.get_name("tensor_array_write")
if self.is_cached(write_name):
return
write_var = GlobalVar(write_name)
tensor_array = Var("tensor_array", self.list(self.tensor_type_var()))
n = Var("x", scalar_type("int32"))
v = Var("v", self.tensor_type_var())
self.prelude.mod[write_var] = Function(
[tensor_array, n, v],
self.prelude.update(tensor_array, n, v),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_unstack(self):
"""Defines a function to unstack the values of a tensor_t in a tensor array.
tensor_array_unstack_tensor(t) : tensor_t -> list[tensor_t]
"""
ndim = len(self.shape)
# We don't register unstack for scalar tensor array
if ndim == 0:
return
helper_name = self.get_name("tensor_array_unstack_helper")
helper_var = self._create_global_var(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType(self.shape, self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
tensor_var = Var("tensor", TensorType(self.shape, self.dtype))
reduced_tensor_type_var, tensor_constructor, _ = self._get_adt_by_shape(self.shape[1:])
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
tensor_constructor(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(reduced_tensor_type_var()), []
)
unstack_name = self.get_name("tensor_array_unstack")
unstack_var = self._create_global_var(unstack_name)
setattr(self.prelude, unstack_name, unstack_var)
shape = op.shape_of(tensor_var)
unstack_length = op.take(shape, const(0))
self.prelude.mod[unstack_var] = Function(
[tensor_var],
helper_var(const(0), unstack_length, tensor_var),
self.list(reduced_tensor_type_var()),
[],
)
def define_tensor_array_scatter(self, indices_shape=None, force_update=False):
"""Defines a function to scatter the values of a tensor_t in indices of a tensor array.
tensor_array_scatter(ta, indices, value) :
list[tensor_t] -> Tensor[(Any), int32] -> tensor_t -> list[tensor_t]
Set static indices shape by specifying indices_shape.
Set force_update to get static indices shape operator.
"""
# When this operator has already been registered, only update
# when force_update is set. This should be used only when we need to
# redefine this op for static indices shape.
extra_shapes = {"indices": indices_shape} if indices_shape is not None else None
tensor_array_scatter_name = self.get_name("tensor_array_scatter", extra_shapes)
if hasattr(self.prelude, tensor_array_scatter_name) and not force_update:
return
tensor_array_scatter_helper_name = self.get_name(
"tensor_array_scatter_helper", extra_shapes
)
tensor_array_scatter_helper_var = self._create_global_var(tensor_array_scatter_helper_name)
ta = Var("ta", self.list(self.tensor_type_var()))
current = Var("current", scalar_type("int32"))
limit = Var("limit", scalar_type("int32"))
indices_ = Var("indices_", TensorType(indices_shape or [Any()], "int32"))
values_ = Var("values_", self.list(self.tensor_type_var()))
write_var = self.get_global_var("tensor_array_write")
read_var = self.get_global_var("tensor_array_read")
helper_body = If(
equal(current, limit),
ta,
tensor_array_scatter_helper_var(
write_var(ta, op.take(indices_, current), read_var(values_, current)),
add(current, const(1)),
limit,
indices_,
values_,
),
)
self.prelude.mod[tensor_array_scatter_helper_var] = Function(
[ta, current, limit, indices_, values_],
helper_body,
self.list(self.tensor_type_var()),
[],
)
tensor_array_scatter_var = self._create_global_var(tensor_array_scatter_name)
setattr(self.prelude, tensor_array_scatter_name, tensor_array_scatter_var)
tensor_array = Var("tensor_array", self.list(self.tensor_type_var()))
indices = Var("indices", TensorType(indices_shape or [Any()], "int32"))
values = Var("values", self.list(self.tensor_type_var()))
if indices_shape is None:
indices_shape = op.shape_of(indices)
limit = op.take(indices_shape, const(0))
else:
limit = const(indices_shape[0])
body = tensor_array_scatter_helper_var(tensor_array, const(0), limit, indices, values)
self.prelude.mod[tensor_array_scatter_var] = Function(
[tensor_array, indices, values], body, self.list(self.tensor_type_var()), []
)
def define_tensor_array_split(self, value_shape=None, lengths_shape=None, force_update=False):
"""Defines a function to split the values of a tensor_t into a tensor array.
tensor_array_split(ta, value, lengths) :
list[tensor_t] -> tensor_t -> Tensor[(Any), int32] -> list[tensor_t]
Set static value and lengths shapes by specifying value_shape and lengths_shape.
Set force_update to get static value and lengths shape operator.
"""
# Skip scalar case
ndim = len(self.shape)
if ndim == 0:
return
# When this operator has already been registered, only update
# when force_update is set. This should be used only when we need to
# redefine this op for static value/indices shape.
split_name = self.get_name("tensor_array_split")
if self.is_cached(split_name):
if not force_update:
return
tensor_array_split_helper_var = self.get_global_var("ta_split_helper")
split_var = self.get_global_var("tensor_array_split")
else:
tensor_array_split_helper_name = self.get_name("ta_split_helper")
tensor_array_split_helper_var = GlobalVar(tensor_array_split_helper_name)
split_var = GlobalVar(split_name)
output_shape = [
Any(),
] + list(self.shape[1:])
output_tensor_type_var, _, output_ops = self._get_adt_by_shape(output_shape)
output_ops.define_tensor_array_write()
write_var = output_ops.get_global_var("tensor_array_write")
if value_shape is None:
value_type_var = self.tensor_type_var
take_var = self.get_global_var("tensor_take")
else:
value_type_var, _, value_adts = self._get_adt_by_shape(value_shape)
value_adts.define_tensor_take()
take_var = value_adts.get_global_var("tensor_take")
ta1 = Var("tensor_array", self.list(output_tensor_type_var()))
value1 = Var("value1", value_type_var())
offset1 = Var("offset1", scalar_type("int32"))
current1 = Var("current1", scalar_type("int32"))
limit1 = Var("limit1", scalar_type("int32"))
lengths1 = Var("lengths", TensorType(lengths_shape or [Any()], "int32"))
helper1_body = If(
equal(current1, limit1),
ta1,
write_var(
tensor_array_split_helper_var(
ta1,
value1,
add(offset1, op.take(lengths1, current1)),
add(current1, const(1)),
limit1,
lengths1,
),
current1,
take_var(value1, offset1, add(op.take(lengths1, current1), offset1)),
),
)
self.prelude.mod[tensor_array_split_helper_var] = Function(
[ta1, value1, offset1, current1, limit1, lengths1],
helper1_body,
self.list(output_tensor_type_var()),
[],
)
tensor_array = Var("tensor_array", self.list(output_tensor_type_var()))
value = Var("value", value_type_var())
lengths = Var("lengths", TensorType(lengths_shape or [Any()], "int32"))
if lengths_shape is None:
lengths_shape = op.shape_of(lengths)
lengths_limit = op.take(lengths_shape, const(0))
else:
lengths_limit = const(lengths_shape[0])
body = tensor_array_split_helper_var(
tensor_array, value, const(0), const(0), lengths_limit, lengths
)
self.prelude.mod[split_var] = Function(
[tensor_array, value, lengths], body, self.list(output_tensor_type_var()), []
)
def define_tensor_array_concat(self):
"""Defines a function to return the values in the tensor array as concatenated tensor_t.
tensor_array_concat(ta) : list[tensor_t] -> tensor_t
"""
# We don't register concat for scalar tensor array.
ndim = len(self.shape)
if ndim == 0:
return
concat_name = self.get_name("tensor_array_concat")
if self.is_cached(concat_name):
return
concat_var = GlobalVar(concat_name)
output_shape = [
Any(),
] + list(self.shape[1:])
tensor_type_var, _, output_ops = self._get_adt_by_shape(output_shape)
# Register tensor concatenate and get tensor_nil var for output shape
output_ops.define_tensor_concatenate()
tensor_concat_var = output_ops.get_global_var("tensor_concatenate")
tensor_nil_var = output_ops.get_ctor("tensor_nil")
tensor_array = Var("tensor_array", self.list(tensor_type_var()))
hd = Var("hd")
tl = Var("tl")
nil_case = Clause(PatternConstructor(self.nil), tensor_nil_var())
cons_case = Clause(
PatternConstructor(self.cons, [PatternVar(hd), PatternVar(tl)]),
Match(
tl,
[
Clause(PatternConstructor(self.nil), hd),
Clause(PatternWildcard(), tensor_concat_var(hd, concat_var(tl))),
],
False,
),
)
self.prelude.mod[concat_var] = Function(
[tensor_array], Match(tensor_array, [nil_case, cons_case], False), tensor_type_var(), []
)
def define_tensor_array_stack(self):
"""Defines a function to get the values in the tensor array as a stack tensor_t.
tensor_array_stack(l) : list[tensor_t] -> tensor_t
"""
stack_name = self.get_name("tensor_array_stack")
stack_var = self._create_global_var(stack_name)
setattr(self.prelude, stack_name, stack_var)
tensor_array = Var("tensor_array", self.list(self.tensor_type_var()))
expand_dims_var = self.get_global_var("tensor_expand_dims")
# Register tensor_concatenate for output_shape
new_axis = Any() if not self.batch_dim or self.batch_dim != 1 else self.batch_dim
output_shape = [
new_axis,
] + list(self.shape)
_, _, output_ops = self._get_adt_by_shape(output_shape)
output_ops.define_tensor_concatenate()
concat_var = output_ops.get_global_var("tensor_concatenate")
tensor_array_expand_dims = self.prelude.map(expand_dims_var, tensor_array)
if self.batch_dim is not None and self.batch_dim == 1:
# only one element
tensors = self.prelude.id(
self.prelude.hd(tensor_array_expand_dims),
)
else:
tensors = self.prelude.foldl(
concat_var,
self.prelude.hd(tensor_array_expand_dims),
self.prelude.tl(tensor_array_expand_dims),
)
output_tensor_type_var, _, _ = self._get_adt_by_shape(output_shape)
self.prelude.mod[stack_var] = Function(
[tensor_array], tensors, output_tensor_type_var(), []
)
def define_tensor_array_gather(self):
"""Defines a function to return the selected values in a tensor array as tensor_t.
tensor_array_gather(ta, indices) : list[tensor_t] -> Tensor[(Any), int32] -> tensor_t
"""
helper_name = self.get_name("tensor_array_gather_helper")
helper_var = self._create_global_var(helper_name)
new_axis = Any() if self.batch_dim is None or self.batch_dim != 1 else self.batch_dim
output_shape = [
new_axis,
] + list(self.shape)
output_tensor_type_var, _, _ = self._get_adt_by_shape(output_shape)
stack_var = self.get_global_var("tensor_array_stack")
read_var = self.get_global_var("tensor_array_read")
ta = Var("ta", self.list(self.tensor_type_var()))
accu = Var("accu", self.list(self.tensor_type_var()))
current = Var("current", scalar_type("int32"))
limit = Var("limit", scalar_type("int32"))
indices_ = Var("indices_", TensorType([Any()], "int32"))
helper_body = If(
equal(current, const(0)),
stack_var(accu),
helper_var(
ta,
self.cons(read_var(ta, op.take(indices_, subtract(current, const(1)))), accu),
subtract(current, const(1)),
limit,
indices_,
),
)
self.prelude.mod[helper_var] = Function(
[ta, accu, current, limit, indices_], helper_body, output_tensor_type_var(), []
)
gather_name = self.get_name("tensor_array_gather")
gather_var = self._create_global_var(gather_name)
tensor_array = Var("tensor_array", self.list(self.tensor_type_var()))
indices = Var("indices", TensorType([Any()], "int32"))
indices_shape = op.shape_of(indices)
limit = op.take(indices_shape, const(0))
body = helper_var(tensor_array, self.nil(), limit, limit, indices)
self.prelude.mod[gather_var] = Function(
[tensor_array, indices], body, output_tensor_type_var(), []
)
def define_tensor_get_data(self):
"""Defines a function to get a Tensor from tensor_t with given shape."""
tensor_get_data_name = self.get_name("tensor_get_data")
tensor_get_data_var = self._create_global_var(tensor_get_data_name)
tensor_constructor = self.get_ctor("tensor_constructor")
t = Var("tensor", self.tensor_type_var())
tvar = Var("t")
case = Clause(PatternConstructor(tensor_constructor, [PatternVar(tvar)]), tvar)
self.prelude.mod[tensor_get_data_var] = Function(
[t], Match(t, [case], False), TensorType(self.shape, self.dtype), []
)
def register(self):
"""Register all tensor array ops in Prelude"""
self.define_tensor_adt()
self.define_tensor_take()
self.define_tensor_concatenate()
self.define_tensor_expand_dims()
self.define_tensor_array()
self.define_tensor_array_read()
self.define_tensor_array_write()
self.define_tensor_array_unstack()
self.define_tensor_array_scatter()
self.define_tensor_array_split()
self.define_tensor_array_concat()
self.define_tensor_array_stack()
self.define_tensor_array_gather()
self.define_tensor_get_data()
def _get_adt_by_shape(self, shape):
"""Get ADT type and constructor with given shape."""
adt_ops = StaticTensorArrayOps(self.prelude, self.dtype, shape, self.batch_dim)
adt_ops.define_tensor_adt()
tensor_type_var = adt_ops.get_type("tensor_t")
tensor_constructor = adt_ops.get_ctor("tensor_constructor")
return tensor_type_var, tensor_constructor, adt_ops
def _create_global_var(self, name):
"""Create a GlobalVar if doesn't exist in prelude."""
global_var_name_set = set()
for g_var_name in self.prelude.mod.get_global_vars():
global_var_name_set.add(g_var_name.name_hint)
if name not in global_var_name_set:
gvar = GlobalVar(name)
else:
gvar = self.prelude.mod.get_global_var(name)
return gvar
class TensorArrayOps(object):
"""Contains tensor array related ops"""
def __init__(self, prelude, dtype):
"""Create tensor array ops registry"""
self.prelude = prelude
self.dtype = dtype
self.list, self.cons, self.nil = self.prelude.mod.get_type("List")
def get_name(self, canonical):
"""Get name corresponding to the canonical name"""
return self.prelude.get_name(canonical, self.dtype)
def get_global_var(self, canonical):
"""Get global corresponding to the canonical name"""
return self.prelude.get_global_var(canonical, self.dtype)
def get_type(self, canonical):
"""Get type corresponding to the canonical name"""
return self.prelude.get_type(canonical, self.dtype)
def get_ctor(self, canonical):
"""Get ctor corresponding to the canonical name"""
return self.prelude.get_ctor(self.tensor_type_var.name_hint, canonical, self.dtype)
def define_tensor_adt(self):
"""Defines the dynamic tensor ADT, which is the container for tensors
with variable shapes."""
tensor_type_name = self.get_name("tensor_t")
self.tensor_type_var = tensor_type_var = GlobalTypeVar(tensor_type_name)
tensor0_type = TensorType([], self.dtype)
tensor1_type = TensorType([Any()], self.dtype)
tensor2_type = TensorType([Any(), Any()], self.dtype)
tensor3_type = TensorType([Any(), Any(), Any()], self.dtype)
tensor4_type = TensorType([Any(), Any(), Any(), Any()], self.dtype)
tensor5_type = TensorType([Any(), Any(), Any(), Any(), Any()], self.dtype)
tensor6_type = TensorType([Any(), Any(), Any(), Any(), Any(), Any()], self.dtype)
tensor_nil_name = self.get_name("tensor_nil")
tensor0_name = self.get_name("tensor0")
tensor1_name = self.get_name("tensor1")
tensor2_name = self.get_name("tensor2")
tensor3_name = self.get_name("tensor3")
tensor4_name = self.get_name("tensor4")
tensor5_name = self.get_name("tensor5")
tensor6_name = self.get_name("tensor6")
tensor_nil_case = Constructor(tensor_nil_name, [], tensor_type_var)
tensor0_case = Constructor(tensor0_name, [tensor0_type], tensor_type_var)
tensor1_case = Constructor(tensor1_name, [tensor1_type], tensor_type_var)
tensor2_case = Constructor(tensor2_name, [tensor2_type], tensor_type_var)
tensor3_case = Constructor(tensor3_name, [tensor3_type], tensor_type_var)
tensor4_case = Constructor(tensor4_name, [tensor4_type], tensor_type_var)
tensor5_case = Constructor(tensor5_name, [tensor5_type], tensor_type_var)
tensor6_case = Constructor(tensor6_name, [tensor6_type], tensor_type_var)
self.prelude.mod[tensor_type_var] = TypeData(
tensor_type_var,
[],
[
tensor_nil_case,
tensor0_case,
tensor1_case,
tensor2_case,
tensor3_case,
tensor4_case,
tensor5_case,
tensor6_case,
],
)
def define_tensor_take(self):
"""Defines a function to return a range of tensor_t on axis 0.
tensor_take(t, lower, upper) :
tensor_t -> Tensor[(), int32] -> Tensor[(), int32] -> tensor_t
"""
take_name = self.get_name("tensor_take")
take_var = GlobalVar(take_name)
tensor_t = self.tensor_type_var
tensor1_var = self.get_ctor("tensor1")
tensor2_var = self.get_ctor("tensor2")
tensor3_var = self.get_ctor("tensor3")
tensor4_var = self.get_ctor("tensor4")
tensor5_var = self.get_ctor("tensor5")
tensor6_var = self.get_ctor("tensor6")
t = Var("tensor", tensor_t())
lower = Var("lower", scalar_type("int32"))
upper = Var("upper", scalar_type("int32"))
t1 = Var("t1")
t2 = Var("t2")
t3 = Var("t3")
t4 = Var("t4")
t5 = Var("t5")
t6 = Var("t6")
tensor1_case = Clause(
PatternConstructor(tensor1_var, [PatternVar(t1)]),
tensor1_var(op.take(t1, op.arange(lower, upper, dtype="int32"))),
)
tensor2_case = Clause(
PatternConstructor(tensor2_var, [PatternVar(t2)]),
tensor2_var(op.take(t2, op.arange(lower, upper, dtype="int32"), axis=0)),
)
tensor3_case = Clause(
PatternConstructor(tensor3_var, [PatternVar(t3)]),
tensor3_var(op.take(t3, op.arange(lower, upper, dtype="int32"), axis=0)),
)
tensor4_case = Clause(
PatternConstructor(tensor4_var, [PatternVar(t4)]),
tensor4_var(op.take(t4, op.arange(lower, upper, dtype="int32"), axis=0)),
)
tensor5_case = Clause(
PatternConstructor(tensor5_var, [PatternVar(t5)]),
tensor5_var(op.take(t5, op.arange(lower, upper, dtype="int32"), axis=0)),
)
tensor6_case = Clause(
PatternConstructor(tensor6_var, [PatternVar(t6)]),
tensor6_var(op.take(t6, op.arange(lower, upper, dtype="int32"), axis=0)),
)
self.prelude.mod[take_var] = Function(
[t, lower, upper],
Match(
t,
[
tensor1_case,
tensor2_case,
tensor3_case,
tensor4_case,
tensor5_case,
tensor6_case,
],
False,
),
tensor_t(),
[],
)
def define_tensor_expand_dims(self):
"""Defines a function to grow a tensor_t's rank by adding one dimension in front
of the original tensor_t.
tensor_expand_dims(t) : tensor_t -> tensor_t
"""
expand_dims_name = self.get_name("tensor_expand_dims")
expand_dims_var = GlobalVar(expand_dims_name)
tensor_type_var = self.tensor_type_var
x = Var("x", tensor_type_var())
t0 = Var("t0")
t1 = Var("t1")
t2 = Var("t2")
t3 = Var("t3")
t4 = Var("t4")
t5 = Var("t5")
tensor0_var = self.get_ctor("tensor0")
tensor1_var = self.get_ctor("tensor1")
tensor2_var = self.get_ctor("tensor2")
tensor3_var = self.get_ctor("tensor3")
tensor4_var = self.get_ctor("tensor4")
tensor5_var = self.get_ctor("tensor5")
tensor6_var = self.get_ctor("tensor6")
tensor0_case = Clause(
PatternConstructor(tensor0_var, [PatternVar(t0)]), tensor1_var(op.expand_dims(t0, 0, 1))
)
tensor1_case = Clause(
PatternConstructor(tensor1_var, [PatternVar(t1)]), tensor2_var(op.expand_dims(t1, 0, 1))
)
tensor2_case = Clause(
PatternConstructor(tensor2_var, [PatternVar(t2)]), tensor3_var(op.expand_dims(t2, 0, 1))
)
tensor3_case = Clause(
PatternConstructor(tensor3_var, [PatternVar(t3)]), tensor4_var(op.expand_dims(t3, 0, 1))
)
tensor4_case = Clause(
PatternConstructor(tensor4_var, [PatternVar(t4)]), tensor5_var(op.expand_dims(t4, 0, 1))
)
tensor5_case = Clause(
PatternConstructor(tensor5_var, [PatternVar(t5)]), tensor6_var(op.expand_dims(t5, 0, 1))
)
self.prelude.mod[expand_dims_var] = Function(
[x],
Match(
x,
[
tensor0_case,
tensor1_case,
tensor2_case,
tensor3_case,
tensor4_case,
tensor5_case,
],
False,
),
tensor_type_var(),
)
def define_tensor_concat(self):
"""Defines a function to concatenate two tensor_t on the first axis
tensor_concatenate(t) : tensor_t -> tensor_t -> tensor_t
"""
concat_name = self.get_name("tensor_concatenate")
concat_var = GlobalVar(concat_name)
tensor_type_var = self.tensor_type_var
x = Var("x", tensor_type_var())
y = Var("y", tensor_type_var())
tensor1_var = self.get_ctor("tensor1")
tensor2_var = self.get_ctor("tensor2")
tensor3_var = self.get_ctor("tensor3")
tensor4_var = self.get_ctor("tensor4")
t11 = Var("t11")
t12 = Var("t12")
t21 = Var("t21")
t22 = Var("t22")
t31 = Var("t31")
t32 = Var("t32")
t41 = Var("t41")
t42 = Var("t42")
tensor1_case = Clause(
PatternConstructor(tensor1_var, [PatternVar(t11)]),
Match(
y,
[
Clause(
PatternConstructor(tensor1_var, [PatternVar(t12)]),
tensor1_var(op.concatenate([t11, t12], axis=0)),
)
],
False,
),
)
tensor2_case = Clause(
PatternConstructor(tensor2_var, [PatternVar(t21)]),
Match(
y,
[
Clause(
PatternConstructor(tensor2_var, [PatternVar(t22)]),
tensor2_var(op.concatenate([t21, t22], axis=0)),
)
],
False,
),
)
tensor3_case = Clause(
PatternConstructor(tensor3_var, [PatternVar(t31)]),
Match(
y,
[
Clause(
PatternConstructor(tensor3_var, [PatternVar(t32)]),
tensor3_var(op.concatenate([t31, t32], axis=0)),
)
],
False,
),
)
tensor4_case = Clause(
PatternConstructor(tensor4_var, [PatternVar(t41)]),
Match(
y,
[
Clause(
PatternConstructor(tensor4_var, [PatternVar(t42)]),
tensor4_var(op.concatenate([t41, t42], axis=0)),
)
],
False,
),
)
# op.concatenate does not support tensor with rank higher than 4
self.prelude.mod[concat_var] = Function(
[x, y],
Match(x, [tensor1_case, tensor2_case, tensor3_case, tensor4_case], False),
tensor_type_var(),
)
def define_tensor_array(self):
"""Defines a function to create a tensor array with size n.
tensor_array(n) : Tensor[(), int32] -> list[tensor_t]
"""
tensor_array_constructor_name = self.get_name("tensor_array")
tensor_array_constructor_var = GlobalVar(tensor_array_constructor_name)
setattr(self.prelude, tensor_array_constructor_name, tensor_array_constructor_var)
tensor_nil_var = self.get_ctor("tensor_nil")
tensor_type_var = self.get_ctor("tensor_t")
n = Var("x", scalar_type("int32"))
body = If(
equal(n, const(0)),
self.nil(),
self.cons(tensor_nil_var(), tensor_array_constructor_var(subtract(n, const(1)))),
)
self.prelude.mod[tensor_array_constructor_var] = Function(
[n], body, self.list(tensor_type_var()), []
)
def define_tensor_array_read(self):
"""Defines a function to get the head of a list. Assume the list has at least one
element.
tensor_array_read(ta, n) : list[tensor_t] -> Tensor[(), int32] -> tensor_t
"""
read_name = self.get_name("tensor_array_read")
read_var = GlobalVar(read_name)
setattr(self.prelude, read_name, read_var)
tensor_type_var = self.tensor_type_var
tensor_array = Var("tensor_array", self.list(tensor_type_var()))
n = Var("x", scalar_type("int32"))
self.prelude.mod[read_var] = Function(
[tensor_array, n], self.prelude.nth(tensor_array, n), tensor_type_var(), []
)
def define_tensor_array_write(self):
"""Defines a function to update a tensor array at index n with value v.
tensor_array_write(ta, n, v) :
list[tensor_t] -> Tensor[(), int32] -> tensor_t -> list[tensor_t]
"""
write_name = self.get_name("tensor_array_write")
write_var = GlobalVar(write_name)
tensor_type_var = self.tensor_type_var
tensor_array = Var("tensor_array", self.list(tensor_type_var()))
n = Var("x", scalar_type("int32"))
v = Var("v", tensor_type_var())
self.prelude.mod[write_var] = Function(
[tensor_array, n, v],
self.prelude.update(tensor_array, n, v),
self.list(tensor_type_var()),
[],
)
def define_tensor_array_unstack_tensor1(self):
"""Defines a function to unstack the values of a tensor_t with rank 1 in a tensor array.
tensor_array_unstack_tensor1(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor1_helper")
helper_var = GlobalVar(helper_name)
tensor = Var("t", TensorType([Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
tensor_type_var = self.tensor_type_var
tensor0_var = self.get_ctor("tensor0")
helper_body = If(
equal(i, up),
self.nil(),
self.cons(tensor0_var(op.take(tensor, i)), helper_var(add(i, const(1)), up, tensor)),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(tensor_type_var()), []
)
unstack_name = self.get_name("tensor_array_unstack_tensor1")
unstack_var = GlobalVar(unstack_name)
tensor1 = Var("tensor", TensorType([Any()], self.dtype))
shape = op.shape_of(tensor1)
ndim = op.take(shape, const(0))
self.prelude.mod[unstack_var] = Function(
[tensor1], helper_var(const(0), ndim, tensor1), self.list(tensor_type_var()), []
)
def define_tensor_array_unstack_tensor2(self):
"""Defines a function to unstack the values of a tensor_t with rank 2 in a tensor array.
tensor_array_unstack_tensor2(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor2_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType([Any(), Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
self.get_ctor("tensor1")(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(self.tensor_type_var()), []
)
tensor_array_unstack_tensor2_name = self.get_name("tensor_array_unstack_tensor2")
tensor_array_unstack_tensor2_var = GlobalVar(tensor_array_unstack_tensor2_name)
setattr(self.prelude, tensor_array_unstack_tensor2_name, tensor_array_unstack_tensor2_var)
tensor2 = Var("tensor", TensorType([Any(), Any()], self.dtype))
shape = op.shape_of(tensor2)
ndim = op.take(shape, const(0))
self.prelude.mod[tensor_array_unstack_tensor2_var] = Function(
[tensor2],
helper_var(const(0), ndim, tensor2),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_unstack_tensor3(self):
"""Defines a function to unstack the values of a tensor_t with rank 3 in a tensor array.
tensor_array_unstack_tensor3(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor3_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType([Any(), Any(), Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
self.get_ctor("tensor2")(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(self.tensor_type_var()), []
)
tensor_array_unstack_tensor3_name = self.get_name("tensor_array_unstack_tensor3")
tensor_array_unstack_tensor3_var = GlobalVar(tensor_array_unstack_tensor3_name)
setattr(self.prelude, tensor_array_unstack_tensor3_name, tensor_array_unstack_tensor3_var)
tensor3 = Var("tensor", TensorType([Any(), Any(), Any()], self.dtype))
shape = op.shape_of(tensor3)
ndim = op.take(shape, const(0))
self.prelude.mod[tensor_array_unstack_tensor3_var] = Function(
[tensor3],
helper_var(const(0), ndim, tensor3),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_unstack_tensor4(self):
"""Defines a function to unstack the values of a tensor_t with rank 4 in a tensor array.
tensor_array_unstack_tensor4(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor4_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType([Any(), Any(), Any(), Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
self.get_ctor("tensor3")(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(self.tensor_type_var()), []
)
tensor_array_unstack_tensor4_name = self.get_name("tensor_array_unstack_tensor4")
tensor_array_unstack_tensor4_var = GlobalVar(tensor_array_unstack_tensor4_name)
setattr(self.prelude, tensor_array_unstack_tensor4_name, tensor_array_unstack_tensor4_var)
tensor4 = Var("tensor", TensorType([Any(), Any(), Any(), Any()], self.dtype))
shape = op.shape_of(tensor4)
ndim = op.take(shape, const(0))
self.prelude.mod[tensor_array_unstack_tensor4_var] = Function(
[tensor4],
helper_var(const(0), ndim, tensor4),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_unstack_tensor5(self):
"""Defines a function to unstack the values of a tensor_t with rank 5 in a tensor array.
tensor_array_unstack_tensor5(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor5_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType([Any(), Any(), Any(), Any(), Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
self.get_ctor("tensor4")(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(self.tensor_type_var()), []
)
tensor_array_unstack_tensor5_name = self.get_name("tensor_array_unstack_tensor5")
tensor_array_unstack_tensor5_var = GlobalVar(tensor_array_unstack_tensor5_name)
setattr(self.prelude, tensor_array_unstack_tensor5_name, tensor_array_unstack_tensor5_var)
tensor5 = Var("tensor", TensorType([Any(), Any(), Any(), Any(), Any()], self.dtype))
shape = op.shape_of(tensor5)
ndim = op.take(shape, const(0))
self.prelude.mod[tensor_array_unstack_tensor5_var] = Function(
[tensor5],
helper_var(const(0), ndim, tensor5),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_unstack_tensor6(self):
"""Defines a function to unstack the values of a tensor_t with rank 6 in a tensor array.
tensor_array_unstack_tensor6(t) : tensor_t -> list[tensor_t]
"""
helper_name = self.get_name("tensor_array_unstack_tensor6_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor = Var("t", TensorType([Any(), Any(), Any(), Any(), Any(), Any()], self.dtype))
up = Var("up", scalar_type("int32"))
i = Var("i", scalar_type("int32"))
helper_body = If(
equal(i, up),
self.nil(),
self.cons(
self.get_ctor("tensor5")(op.take(tensor, i, axis=0)),
helper_var(add(i, const(1)), up, tensor),
),
)
self.prelude.mod[helper_var] = Function(
[i, up, tensor], helper_body, self.list(self.tensor_type_var()), []
)
tensor_array_unstack_tensor6_name = self.get_name("tensor_array_unstack_tensor6")
tensor_array_unstack_tensor6_var = GlobalVar(tensor_array_unstack_tensor6_name)
setattr(self.prelude, tensor_array_unstack_tensor6_name, tensor_array_unstack_tensor6_var)
tensor6 = Var("tensor", TensorType([Any(), Any(), Any(), Any(), Any(), Any()], self.dtype))
shape = op.shape_of(tensor6)
ndim = op.take(shape, const(0))
self.prelude.mod[tensor_array_unstack_tensor6_var] = Function(
[tensor6],
helper_var(const(0), ndim, tensor6),
self.list(self.tensor_type_var()),
[],
)
def define_tensor_array_scatter(self):
"""Defines a function to scatter the values of a tensor_t in indices of a tensor array.
tensor_array_scatter(ta, indices, value) :
list[tensor_t] -> Tensor[(Any), int32] -> tensor_t -> list[tensor_t]
"""
tensor_array_scatter_helper_name = self.get_name("tensor_array_scatter_helper")
tensor_array_scatter_helper_var = GlobalVar(tensor_array_scatter_helper_name)
tensor_t = self.tensor_type_var
ta = Var("ta", self.list(tensor_t()))
current = Var("current", scalar_type("int32"))
limit = Var("limit", scalar_type("int32"))
indices_ = Var("indices_", TensorType([Any()], "int32"))
values_ = Var("values_", self.list(tensor_t()))
write_var = self.get_global_var("tensor_array_write")
read_var = self.get_global_var("tensor_array_read")
helper_body = If(
equal(current, limit),
ta,
tensor_array_scatter_helper_var(
write_var(ta, op.take(indices_, current), read_var(values_, current)),
add(current, const(1)),
limit,
indices_,
values_,
),
)
self.prelude.mod[tensor_array_scatter_helper_var] = Function(
[ta, current, limit, indices_, values_], helper_body, self.list(tensor_t()), []
)
tensor_array_scatter_name = self.get_name("tensor_array_scatter")
tensor_array_scatter_var = GlobalVar(tensor_array_scatter_name)
setattr(self.prelude, tensor_array_scatter_name, tensor_array_scatter_var)
tensor_array = Var("tensor_array", self.list(tensor_t()))
indices = Var("indices", TensorType([Any()], "int32"))
values = Var("values", self.list(tensor_t()))
indices_shape = op.shape_of(indices)
limit = op.take(indices_shape, const(0))
body = tensor_array_scatter_helper_var(tensor_array, const(0), limit, indices, values)
self.prelude.mod[tensor_array_scatter_var] = Function(
[tensor_array, indices, values], body, self.list(tensor_t()), []
)
def define_tensor_array_split(self):
"""Defines a function to split the values of a tensor_t into a tensor array.
tensor_array_split(ta, value, lengths) :
list[tensor_t] -> tensor_t -> Tensor[(Any), int32] -> list[tensor_t]
"""
tensor_t = self.tensor_type_var
tensor_array_split_helper_name = self.get_name("ta_split_helper")
tensor_array_split_helper_var = GlobalVar(tensor_array_split_helper_name)
setattr(self.prelude, tensor_array_split_helper_name, tensor_array_split_helper_var)
ta1 = Var("tensor_array", self.list(tensor_t()))
value1 = Var("value1", tensor_t())
offset1 = Var("offset1", scalar_type("int32"))
current1 = Var("current1", scalar_type("int32"))
limit1 = Var("limit1", scalar_type("int32"))
lengths1 = Var("lengths", TensorType([Any()], "int32"))
write_var = self.get_global_var("tensor_array_write")
take_var = self.get_global_var("tensor_take")
helper1_body = If(
equal(current1, limit1),
ta1,
write_var(
tensor_array_split_helper_var(
ta1,
value1,
add(offset1, op.take(lengths1, current1)),
add(current1, const(1)),
limit1,
lengths1,
),
current1,
take_var(value1, offset1, add(op.take(lengths1, current1), offset1)),
),
)
self.prelude.mod[tensor_array_split_helper_var] = Function(
[ta1, value1, offset1, current1, limit1, lengths1],
helper1_body,
self.list(tensor_t()),
[],
)
split_name = self.get_name("tensor_array_split")
split_var = GlobalVar(split_name)
setattr(self.prelude, split_name, split_var)
tensor_array = Var("tensor_array", self.list(tensor_t()))
value = Var("value", tensor_t())
lengths = Var("lengths", TensorType([Any()], "int32"))
lengths_shape = op.shape_of(lengths)
lengths_limit = op.take(lengths_shape, const(0))
body = tensor_array_split_helper_var(
tensor_array, value, const(0), const(0), lengths_limit, lengths
)
self.prelude.mod[split_var] = Function(
[tensor_array, value, lengths], body, self.list(tensor_t()), []
)
def define_tensor_array_concat(self):
"""Defines a function to return the values in the tensor array as concatenated tensor_t.
tensor_array_concat(ta) : list[tensor_t] -> tensor_t
"""
concat_name = self.get_name("tensor_array_concat")
concat_var = GlobalVar(concat_name)
setattr(self.prelude, concat_name, concat_var)
tensor_concat_var = self.get_global_var("tensor_concatenate")
tensor_t = self.tensor_type_var
tensor_nil_var = self.get_ctor("tensor_nil")
tensor_array = Var("tensor_array", self.list(tensor_t()))
hd = Var("hd")
tl = Var("tl")
nil_case = Clause(PatternConstructor(self.nil), tensor_nil_var())
cons_case = Clause(
PatternConstructor(self.cons, [PatternVar(hd), PatternVar(tl)]),
Match(
tl,
[
Clause(PatternConstructor(self.nil), hd),
Clause(PatternWildcard(), tensor_concat_var(hd, concat_var(tl))),
],
False,
),
)
self.prelude.mod[concat_var] = Function(
[tensor_array], Match(tensor_array, [nil_case, cons_case], False), tensor_t(), []
)
def define_tensor_array_gather(self):
"""Defines a function to return the selected values in a tensor array as tensor_t.
tensor_array_gather(ta, indices) : list[tensor_t] -> Tensor[(Any), int32] -> tensor_t
"""
helper_name = self.get_name("tensor_array_gather_helper")
helper_var = GlobalVar(helper_name)
setattr(self.prelude, helper_name, helper_var)
tensor_type_var = self.tensor_type_var
stack_var = self.get_var("tensor_array_stack")
read_var = self.get_var("tensor_array_read")
ta = Var("ta", self.list(tensor_type_var()))
accu = Var("accu", self.list(tensor_type_var()))
current = Var("current", scalar_type("int32"))
limit = Var("limit", scalar_type("int32"))
indices_ = Var("indices_", TensorType([Any()], "int32"))
helper_body = If(
equal(current, const(0)),
stack_var(accu),
helper_var(
ta,
self.cons(read_var(ta, op.take(indices_, subtract(current, const(1)))), accu),
subtract(current, const(1)),
limit,
indices_,
),
)
self.prelude.mod[helper_var] = Function(
[ta, accu, current, limit, indices_], helper_body, tensor_type_var(), []
)
gather_name = self.get_name("tensor_array_gather")
gather_var = GlobalVar(gather_name)
setattr(self.prelude, gather_name, gather_var)
tensor_array = Var("tensor_array", self.list(tensor_type_var()))
indices = Var("indices", TensorType([Any()], "int32"))
indices_shape = op.shape_of(indices)
limit = op.take(indices_shape, const(0))
body = helper_var(tensor_array, self.nil(), limit, limit, indices)
self.prelude.mod[gather_var] = Function(
[tensor_array, indices], body, tensor_type_var(), []
)
def define_tensor_array_stack(self):
"""Defines a function to get the values in the tensor array as a stack tensor_t.
tensor_array_stack(l) : list[tensor_t] -> tensor_t
"""
stack_name = self.get_name("tensor_array_stack")
stack_var = GlobalVar(stack_name)
setattr(self.prelude, stack_name, stack_var)
tensor_type_var = self.tensor_type_var
tensor_array = Var("tensor_array", self.list(tensor_type_var()))
expand_dims_var = self.get_global_var("tensor_expand_dims")
concat_var = self.get_global_var("tensor_concatenate")
tensor_array_expand_dims = self.prelude.map(expand_dims_var, tensor_array)
tensors = self.prelude.foldl(
concat_var,
self.prelude.hd(tensor_array_expand_dims),
self.prelude.tl(tensor_array_expand_dims),
)
self.prelude.mod[stack_var] = Function(
[tensor_array], ToANormalFormExpr(tensors), tensor_type_var(), []
)
def register(self):
"""Register all tensor array ops in Prelude"""
self.define_tensor_adt()
self.define_tensor_take()
self.define_tensor_expand_dims()
self.define_tensor_concat()
self.define_tensor_array()
self.define_tensor_array_read()
self.define_tensor_array_write()
self.define_tensor_array_unstack_tensor1()
self.define_tensor_array_unstack_tensor2()
self.define_tensor_array_unstack_tensor3()
self.define_tensor_array_unstack_tensor4()
self.define_tensor_array_unstack_tensor5()
self.define_tensor_array_unstack_tensor6()
self.define_tensor_array_scatter()
self.define_tensor_array_split()
self.define_tensor_array_concat()
self.define_tensor_array_stack()
# TODO(wweic): Gather fails in PartialEvaluate
# self.define_tensor_array_gather()
class Prelude:
"""Contains standard definitions."""
def __init__(self, mod=None):
if mod is None:
mod = IRModule()
self.mod = mod
self.load_prelude()
def get_name(self, canonical, dtype):
"""Get name corresponding to the canonical name"""
if canonical == "tensor_t":
return "tensor_{}_t".format(dtype)
return "{}_{}".format(canonical, dtype)
def get_global_var(self, canonical, dtype):
"""Get global var corresponding to the canonical name"""
name = self.get_name(canonical, dtype)
return self.mod.get_global_var(name)
def get_type(self, canonical, dtype):
"""Get type corresponding to the canonical name"""
name = self.get_name(canonical, dtype)
return self.mod.get_global_type_var(name)
def get_ctor(self, ty_name, canonical, dtype):
"""Get constructor corresponding to the canonical name"""
name = self.get_name(canonical, dtype)
ctors = self.mod.get_type(ty_name)
for ctor in ctors:
if ctor.name_hint == name:
return ctor
raise Exception(f"could not find {name}")
def get_tensor_ctor(self, canonical, dtype):
ty = self.get_type("tensor_t", dtype)
return self.get_ctor(ty.name_hint, canonical, dtype)
def get_name_static(self, canonical, dtype, shape, batch_dim=None):
"""Get name corresponding to the canonical name"""
return _get_name_static(canonical, dtype, shape, batch_dim)
def get_global_var_static(self, canonical, dtype, shape, batch_dim=None):
"""Get var corresponding to the canonical name"""
name = self.get_name_static(canonical, dtype, shape, batch_dim)
return self.mod.get_global_var(name)
def get_type_static(self, canonical, dtype, shape):
"""Get type corresponding to the canonical name"""
name = self.get_name_static(canonical, dtype, shape)
return self.mod.get_global_type_var(name)
def get_ctor_static(self, ty_name, name, dtype, shape):
"""Get constructor corresponding to the canonical name"""
ty_name = self.get_name_static(ty_name, dtype, shape)
name = self.get_name_static(name, dtype, shape)
ctors = self.mod.get_type(ty_name)
for ctor in ctors:
if ctor.name_hint == name:
return ctor
raise Exception(f"could not find {name}")
def get_tensor_ctor_static(self, name, dtype, shape):
"""Get constructor corresponding to the canonical name"""
return self.get_ctor_static("tensor_t", name, dtype, shape)
def load_prelude(self):
"""Parses the Prelude from Relay's text format into a module."""
# TODO(@jroesch): we should remove this helper when we port over prelude
self.mod.import_from_std("prelude.rly")
GLOBAL_DEFS = [
"id",
"compose",
"flip",
"hd",
"tl",
"nth",
"update",
"map",
"foldl",
"foldr",
"foldr1",
"concat",
"filter",
"zip",
"rev",
"map_accuml",
"map_accumr",
"unfoldl",
"unfoldr",
"sum",
"length",
"tmap",
"size",
"iterate",
]
for global_def in GLOBAL_DEFS:
setattr(self, global_def, self.mod.get_global_var(global_def))
for dtype in [
"float32",
"float16",
"float64",
"int32",
"uint8",
"int8",
"int16",
"uint16",
"int64",
]:
tensor_array_ops = TensorArrayOps(self, dtype)
tensor_array_ops.register()
# Renamer doesn't properly deal with constructors, etc
# self.mod = AnnotateSpans()(self.mod)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""QNN dialect operators and IR passes."""
from __future__ import absolute_import as _abs
from . import op
from . import transform
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""QNN dialect related operators."""
from __future__ import absolute_import as _abs
from .qnn import *
from .op import register_qnn_legalize, register_qnn_canonicalize
from . import _qnn, legalizations, layout_conversions, canonicalizations
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/_make.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Constructor APIs"""
import tvm._ffi
tvm._ffi._init_api("relay.qnn.op._make", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/_qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, len-as-condition
"""QNN operator feature registration"""
from tvm import topi
from .. import strategy
from ...op.op import register_compute
from ...op.op import register_injective_schedule
from ...op.op import register_strategy, register_pattern, OpPattern
@register_compute("qnn.simulated_quantize")
def simulated_quantize_compute(attrs, inputs, output_type):
assert len(inputs) == 4
return [
topi.nn.simulated_quantize(
inputs[0], inputs[1], inputs[2], inputs[3], axis=attrs.get_int("axis")
)
]
register_injective_schedule("qnn.simulated_quantize")
register_pattern("qnn.simulated_quantize", OpPattern.ELEMWISE)
@register_compute("qnn.simulated_dequantize")
def simulated_dequantize_compute(attrs, inputs, output_type):
assert len(inputs) == 4
return [
topi.nn.simulated_dequantize(
inputs[0], inputs[1], inputs[2], inputs[3], axis=attrs.get_int("axis")
)
]
register_injective_schedule("qnn.simulated_dequantize")
register_pattern("qnn.simulated_dequantize", OpPattern.ELEMWISE)
# qnn.quantize
register_strategy("qnn.quantize", strategy.qnn_quantize_strategy)
register_pattern("qnn.quantize", OpPattern.ELEMWISE)
# qnn.dequantize
register_strategy("qnn.dequantize", strategy.qnn_dequantize_strategy)
register_pattern("qnn.dequantize", OpPattern.ELEMWISE)
# qnn.requantize
register_strategy("qnn.requantize", strategy.qnn_requantize_strategy)
register_pattern("qnn.requantize", OpPattern.ELEMWISE)
# qnn.add
register_strategy("qnn.add", strategy.qnn_add_strategy)
register_pattern("qnn.add", OpPattern.BROADCAST)
# qnn.concatenate
register_strategy("qnn.concatenate", strategy.qnn_concatenate_strategy)
register_pattern("qnn.concatenate", OpPattern.INJECTIVE)
# qnn.conv2d
register_strategy("qnn.conv2d", strategy.qnn_conv2d_strategy)
register_pattern("qnn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# qnn.dense
register_strategy("qnn.dense", strategy.qnn_dense_strategy)
register_pattern("qnn.dense", OpPattern.OUT_ELEMWISE_FUSABLE)
# qnn.batch_matmul
register_strategy("qnn.batch_matmul", strategy.qnn_batch_matmul_strategy)
register_pattern("qnn.batch_matmul", OpPattern.OUT_ELEMWISE_FUSABLE)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/_requantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Internal module for qnn requantization."""
import tvm._ffi
tvm._ffi._init_api("relay._requantize", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/canonicalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Consist of utilities and methods for lowering QNN into mainline relay."""
from typing import Callable
import numpy as np
import tvm
from tvm import relay
def run_const_expr(expr: "relay.Expr") -> np.ndarray:
"""Evaluate a const expression, receiving result as np array."""
mod = tvm.IRModule.from_expr(expr)
vm_exe = relay.create_executor("vm", mod=mod)
return vm_exe.evaluate()().asnumpy()
def create_integer_lookup_table(
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_scale: "relay.Expr",
input_zero_point: "relay.Expr",
output_scale: "relay.Expr",
output_zero_point: "relay.Expr",
in_axis: int = -1,
out_axis: int = -1,
in_dtype: str = "uint8",
out_dtype: str = "uint8",
) -> np.ndarray:
"""
Return a table where each input indexes to the output quantizing the given function.
Note this also supports mapping unsigned and signed integers to each other.
Args:
floating_point_func: The numpy function which this table is to approximate
input_scale: The scale of the quantized input tensor.
input_zero_point: The zero point of the quantized input tensor.
output_scale: The scale of the quantized output tensor.
output_zero_point: The zero point of the quantized output tensor.
in_axis: The axis for multi-channel quantization of the input if applicable.
out_axis: The axis for multi-channel quantization of the output if applicable.
in_dtype: The dtype of the input tensor.
out_dtype: The wanted dtype of the output tensor.
Returns:
A numpy array where values in quantized space will index to the output in quantized space
approximating the given function.
"""
if not np.issubdtype(np.dtype(in_dtype), np.integer) or not np.issubdtype(
np.dtype(out_dtype), np.integer
):
raise ValueError(
f"Only integer dtypes allowed got {in_dtype} and {out_dtype} for in and out dtypes."
)
dtype_info = np.iinfo(in_dtype)
num_bits = dtype_info.bits
# Use TVMs quantization methods via relay to be consistent
# inputs_quantized = np.array(range(dtype_info.min, dtype_info.max + 1)).astype(in_dtype)
# First generate a list of all num_bit integer patterns
inputs_quantized = np.array(range(0, 2**num_bits), dtype=f"uint{num_bits}")
# Reinterpret bits as the real datatype
# Note what we are doing here is a bit tricky, the canonical view of our lookup table
# is using the uintX version. When we run the lookup in the relay graph, we cast the
# bit pattern back into this form.
inputs_quantized = inputs_quantized.view(in_dtype)
inputs_quantized = relay.const(inputs_quantized, dtype=in_dtype)
inputs_dequantized = run_const_expr(
relay.qnn.op.dequantize(
inputs_quantized,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=in_axis,
)
)
output_dequantized = relay.const(floating_point_func(inputs_dequantized))
output_quantized = run_const_expr(
relay.qnn.op.quantize(
output_dequantized, output_scale, output_zero_point, out_axis, out_dtype
)
)
return output_quantized
def create_integer_lookup_op(
input_arg: "relay.Expr",
floating_point_func: Callable[[np.array], np.array],
in_scale: "relay.Expr",
in_zero_point: "relay.Expr",
out_scale: "relay.Expr",
out_zero_point: "relay.Expr",
in_axis: int = -1,
out_axis: int = -1,
in_dtype: str = "uint8",
out_dtype: str = "uint8",
) -> "relay.Expr":
"""
Create a quantized version of the given floating point unary operation using table lookup.
Args:
input_arg: The quantized input to the final function.
floating_point_func: The numpy function which this table is to approximate
in_scale: The scale of the quantized input tensor.
in_zero_point: The zero point of the quantized input tensor.
out_scale: The scale of the quantized output tensor.
out_zero_point: The zero point of the quantized output tensor.
in_axis: The axis for multi-channel quantization of the input if applicable.
out_axis: The axis for multi-channel quantization of the output if applicable.
in_dtype: The dtype of the input tensor.
out_dtype: The wanted dtype of the output tensor.
Returns:
A Relay expression representing a quantized version of the given function.
"""
# TODO: handle multi-channel q, below will fail with multi-channel q
in_scale = in_scale.data.numpy().item()
in_zero_point = in_zero_point.data.numpy().item()
out_scale = out_scale.data.numpy().item()
out_zero_point = out_zero_point.data.numpy().item()
lookup_table = create_integer_lookup_table(
floating_point_func,
relay.const(in_scale),
relay.const(in_zero_point, dtype="int32"),
relay.const(out_scale),
relay.const(out_zero_point, dtype="int32"),
in_axis=in_axis,
in_dtype=in_dtype,
out_axis=out_axis,
out_dtype=out_dtype,
)
in_dtype_info = np.iinfo(in_dtype)
in_dtype_num_bits = in_dtype_info.bits
lookup_table = relay.const(lookup_table)
index_tensor = relay.reinterpret(input_arg, f"uint{in_dtype_num_bits}")
result = relay.take(lookup_table, index_tensor, axis=0, mode="fast")
return result
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/layout_conversions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Convert layout related registration"""
from __future__ import absolute_import
from tvm.relay.op import op as reg
from ...op.strategy.generic import is_depthwise_conv2d
@reg.register_convert_op_layout("qnn.conv2d")
def convert_qnn_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert len(desired_layouts) == 2, "A desired layout is expected for both of qnn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info = tinfos[0]
weight_info = tinfos[1]
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d(*inputs, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
@reg.register_convert_op_layout("qnn.conv2d_transpose")
def convert_qnn_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for QNN conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
assert (
len(desired_layouts) == 2
), "A desired layout is expected for both of qnn.conv2d_transpose's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs = dict(attrs)
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "IOHW"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
if desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.qnn.op.conv2d_transpose(*inputs, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/legalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Backend QNN related feature registration"""
import numpy as np
from scipy import special
import tvm
from tvm import relay
from tvm._ffi.base import TVMError
from tvm.relay.qnn.op.canonicalizations import create_integer_lookup_op
from ....topi.x86.utils import target_has_sse42
from ....topi.utils import is_target
from .. import op as reg
#################################################
# Register the functions for different operators.
#################################################
# Registering QNN Conv2D legalization function.
@reg.register_qnn_legalize("qnn.conv2d")
def legalize_qnn_conv2d(attrs, inputs, types):
return qnn_conv2d_legalize(attrs, inputs, types)
# Registering QNN Conv2DTranspose legalization function.
@reg.register_qnn_legalize("qnn.conv2d_transpose")
def legalize_qnn_conv2d_transpose(attrs, inputs, types):
return qnn_conv2d_transpose_legalize(attrs, inputs, types)
# Registering QNN dense legalization function.
@reg.register_qnn_legalize("qnn.dense")
def legalize_qnn_dense(attrs, inputs, types):
return qnn_dense_legalize(attrs, inputs, types)
def register_qnn_unary_op_legalize(op_name, floating_point_func):
"""Register unary qnn op for legalization via table lookup op."""
def legalize_qnn_unary_op(attrs, inputs, types):
return create_integer_lookup_op(
input_arg=inputs[0],
floating_point_func=floating_point_func,
in_scale=inputs[1],
in_zero_point=inputs[2],
out_scale=inputs[3],
out_zero_point=inputs[4],
in_dtype=types[0].dtype,
out_dtype=types[0].dtype,
)
return reg.register_qnn_legalize(op_name, legalize_qnn_unary_op)
def hardswish_func(x):
x2 = x + 3.0
x2 = np.clip(x2, 0.0, 6.0)
return x * x2 / 6.0
register_qnn_unary_op_legalize("qnn.sqrt", np.sqrt)
register_qnn_unary_op_legalize("qnn.rsqrt", lambda arr: 1 / np.sqrt(arr))
register_qnn_unary_op_legalize("qnn.exp", np.exp)
register_qnn_unary_op_legalize("qnn.erf", special.erf)
register_qnn_unary_op_legalize("qnn.sigmoid", lambda arr: 1 / (1 + np.exp(-arr)))
register_qnn_unary_op_legalize("qnn.hardswish", hardswish_func)
register_qnn_unary_op_legalize("qnn.tanh", np.tanh)
register_qnn_unary_op_legalize("qnn.log", np.log)
register_qnn_unary_op_legalize("qnn.abs", np.abs)
# Default to None. If overridden by target, this will not be run.
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_conv2d_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
# Generic QNN Conv2DTranspose legalization function.
@tvm.target.generic_func
def qnn_conv2d_transpose_legalize(attrs, inputs, types):
"""Convert kernel and data to int16, subtract offsets upfront
and calls into relay.nn.conv2d_transpose."""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
# If input zero point is a scalar, we can directly subtract it.
if len(types[2].shape) == 0:
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, "int16")
)
# Otherwise it needs to be broadcast.
else:
shift_data = relay.nn.bias_add(
relay.cast(data, dtype="int16"),
-relay.cast(input_zero_point, dtype="int16"),
)
# If kernel zero point is a scalar, we can directly subtract it.
if len(types[3].shape) == 0:
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, "int16")
)
# Otherwise it needs to be broadcast.
else:
shift_kernel = relay.nn.bias_add(
relay.cast(kernel, dtype="int16"),
-relay.cast(kernel_zero_point, dtype="int16"),
)
return relay.nn.conv2d_transpose(shift_data, shift_kernel, **attrs)
# Generic QNN Conv2D legalization function.
@tvm.target.generic_func
def qnn_dense_legalize(attrs, inputs, types):
"""Default legalization is None."""
return None
###################
# Helper functions.
###################
def get_scalar_from_constant(expr):
"""Returns scalar value from Relay constant scalar."""
assert (
isinstance(expr, relay.Constant) and not expr.data.shape
), "Expr is not a constant scalar."
value = expr.data.numpy()
assert value.dtype == np.dtype(np.int32) or value.dtype == np.dtype(
np.float32
), "value must be float32/int32"
return value.item(0)
def _shift(data, zero_point, out_dtype):
"""Shifts (add/subtracts) the qnn tensor with +/-128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
if isinstance(zero_point, relay.Constant):
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
else:
zero_point_modified = zero_point + relay.const(shift, "int32")
return (data_modified, zero_point_modified)
# Helper function for lowering in the abscence of fast Int8 arithmetic units.
def helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay_op):
"""Converts QNN operators into a sequence of Relay operators that are friendly to HW that do
not have fast Int8 arithmetic. For example, for ARM, LLVM utilizes the assembly instructions
much more efficiently if the convolution or dense operator input datatypes are int16 instead of
int8. More details are present at https://github.com/apache/tvm/pull/4277.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, _, _ = inputs
shift_data = relay.subtract(
relay.cast(data, dtype="int16"), relay.cast(input_zero_point, dtype="int16")
)
# If kernel zero point is a scalar we can directly subtract it.
if len(types[3].shape) == 0:
shift_kernel = relay.subtract(
relay.cast(kernel, dtype="int16"), relay.cast(kernel_zero_point, dtype="int16")
)
# Otherwise it needs to be broadcast.
else:
# Determine output axis of kernel for spatial operations.
if hasattr(attrs, "kernel_layout"):
output_axis = tvm.tir.layout(attrs["kernel_layout"]).index_of("O")
# For dense operations, broadcast to [N, K] layout.
elif isinstance(attrs, relay.op.op_attrs.DenseAttrs):
output_axis = 0
# For matrix multiplication instead expand to [K, N] layout.
elif isinstance(attrs, relay.op.op_attrs.MatmulAttrs):
output_axis = 1
else:
raise TVMError(
"Legalization of %s is not yet supported with per channel parameters"
% str(type(attrs))
)
shift_kernel = relay.nn.bias_add(
relay.cast(kernel, dtype="int16"),
-relay.cast(kernel_zero_point, dtype="int16"),
output_axis,
)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(shift_data, shift_kernel, **new_attrs)
# Helper function to change dtypes to uint8 x int8. Intel VNNI instructions prefer this setting.
def helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Intel HW. VNNI supports u8 x i8 fast conv/MM. If the dtypes
are already good, we dont transform. Else, we shift the tensor values and zero points to change
the dtype.
Converting from int8 to uint8 can be done in following manner.
Original equation
scale * (QA - zp_a)
scale * (QA + 128 - 128 - zp_a)
scale * ( (QA + 128) - (zp_a + 128))
Replacing QA + 128 with QA' and (zp_a + 128) with zp_a'
We get our new quantized uint8 tensor - scale * (QA' - zp_a')
Similarly we can convert from int8 to uint8.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# VNNI supports u8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "uint8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "int8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "uint8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to int8 x int8. Cuda dp4a instructions prefer this setting.
def helper_change_dtypes_to_int8(attrs, inputs, types, relay_op):
"""Legalizes QNN conv2d/dense op for Nvidia HW. dp4a supports i8 x i8 fast conv/MM. If the
dtypes are already good, we dont transform. Else, we shift the tensor values and zero points
to change the dtype.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
# dp4a supports i8 x i8 fast conv/MM. Don't do anything if it is already satisfied.
if data_dtype == "int8" and kernel_dtype == "int8":
return None
# Shift input if necessary.
if data_dtype == "uint8":
# Compute (QA + 128) and (zp_a + 128)
data, input_zero_point = _shift(data, input_zero_point, "int8")
# Shift kernel if necessary.
if kernel_dtype == "uint8":
# Compute (QA - 128) and (zp_a - 128)
kernel, kernel_zero_point = _shift(kernel, kernel_zero_point, "int8")
# Call qnn.conv2d with modified inputs and zero points.
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
# Helper function to change dtypes to be same. ARM dotprod instructions prefer this setting.
def helper_change_dtypes_to_be_same(attrs, inputs, types, relay_op):
"""Sometimes MxNet + MLDNN can lead to uint8 x int8 datatypes for the conv inputs. However,
many devices like ARM prefer the datatypes to be same for the HW units. This helper transforms
conv2d/dense such that both the dtypes are same.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
def _shift(data, zero_point, out_dtype):
"""Shifts (adds/subtracts) the qnn tensor by 128)"""
if out_dtype == "uint8":
shift = 128
elif out_dtype == "int8":
shift = -128
else:
raise ValueError("Unsupported out dtype.")
data_modified = relay.cast(data, "int32")
data_modified = relay.add(data_modified, relay.const(shift, "int32"))
data_modified = relay.cast(data_modified, out_dtype)
zero_point_val = get_scalar_from_constant(zero_point)
zero_point_modified = relay.const(zero_point_val + shift, "int32")
return (data_modified, zero_point_modified)
# Collect the dtypes.
data_dtype = types[0].dtype
kernel_dtype = types[1].dtype
if data_dtype == kernel_dtype:
return None
# Collect the input exprs.
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale = inputs
assert (
"int8" in data_dtype and "int8" in kernel_dtype
), "Qnn Conv2D/Dense only accepts uint8 or int8 inputs"
# Shift input if necessary.
data, input_zero_point = _shift(data, input_zero_point, kernel_dtype)
new_attrs = {k: attrs[k] for k in attrs.keys()}
return relay_op(
data, kernel, input_zero_point, kernel_zero_point, input_scale, kernel_scale, **new_attrs
)
def is_fast_int8_on_intel():
"""Checks whether the hardware has support for fast Int8 arithmetic operations."""
target = tvm.target.Target.current(allow_none=False)
return target_has_sse42(target.mcpu)
########################
# ARM CPU legalizations.
########################
@qnn_conv2d_legalize.register("arm_cpu")
def _qnn_conv2d_legalize_arm_cpu(attrs, inputs, types):
target = tvm.target.Target.current(allow_none=False)
is_depthwise = relay.op.strategy.is_depthwise_conv2d(
types[0].shape,
attrs["data_layout"],
types[1].shape,
attrs["kernel_layout"],
attrs["groups"],
)
use_int8_on_arm = (not is_depthwise) and attrs["data_layout"] == "NHWC"
other_options = use_int8_on_arm or target.features.has_dotprod
if target.features.has_asimd and not other_options:
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
# ARM prefers the dtypes to be same.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.conv2d)
@qnn_dense_legalize.register("arm_cpu")
def _qnn_dense_legalize_arm_cpu(attrs, inputs, types):
target = tvm.target.Target.current(allow_none=False)
if target.features.has_asimd and not target.features.has_dotprod:
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
# ARM prefers the dtypes to be same.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.dense)
##########################
# Intel CPU legalizations.
##########################
@qnn_conv2d_legalize.register("cpu")
def _qnn_conv2d_legalize_intel_cpu(attrs, inputs, types):
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.conv2d)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.conv2d)
@qnn_dense_legalize.register("cpu")
def _qnn_dense_legalize_intel_cpu(attrs, inputs, types):
# The VNNI transformations prefer uint8 x int8 datatypes.
if is_fast_int8_on_intel():
return helper_change_dtypes_to_uint8_int8(attrs, inputs, types, relay.qnn.op.dense)
return helper_no_fast_int8_hw_legalization(attrs, inputs, types, relay.nn.dense)
#####################
# CUDA and vulkan legalizations.
#####################
@qnn_conv2d_legalize.register(["cuda", "gpu"])
def _qnn_conv2d_legalize_cuda(attrs, inputs, types):
if is_target("vulkan"):
# prefers the dtypes to be same. Mixed type is not yet supported.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.conv2d)
if is_target(["cuda", "rocm"]):
# CUDA prefers both datatypes to be int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.conv2d)
return None
@qnn_dense_legalize.register(["cuda", "gpu"])
def _qnn_dense_legalize_cuda(attrs, inputs, types):
if is_target("vulkan"):
# prefers the dtypes to be same. Mixed type is not yet supported.
return helper_change_dtypes_to_be_same(attrs, inputs, types, relay.qnn.op.dense)
if is_target(["cuda", "rocm"]):
# CUDA prefers both datatypes to be the int8.
return helper_change_dtypes_to_int8(attrs, inputs, types, relay.qnn.op.dense)
return None
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""The register functions for the QNN dialect."""
import tvm.ir
def register_qnn_legalize(op_name, legal_op=None, level=10):
"""Register legal transformation function for a QNN op.
This helps QNN match hardware intrinsics better and is run before
canonicalization.
Parameters
----------
op_name : str
The name of the operator
legal_op: function (attrs: Attrs, inputs: List[Expr]) -> new_expr: Expr
The function for transforming an expr to another expr.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMQnnLegalize", legal_op, level)
def register_qnn_canonicalize(op_name, legal_op=None, level=10):
"""Register canonicalization function for a QNN op.
This transforms QNN ops to mainline Relay components.
Parameters
----------
op_name : str
The name of the operator
legal_op: function (Attrs, List[Expr], List[relay.Type]) -> Expr
The function for transforming an expr to another expr.
level : int
The priority level
"""
return tvm.ir.register_op_attr(op_name, "FTVMQnnCanonicalize", legal_op, level)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/op/qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument, not-context-manager
"""QNN dialect operators."""
from __future__ import absolute_import as _abs
import tvm
import tvm.ir
from tvm import relay
from tvm.relay.expr import Tuple, TupleWrapper
from tvm.relay.op.nn.utils import get_pad_tuple2d
from tvm.runtime import Object
from tvm.target import Target
from tvm.topi.nn.qnn import SQNN_DTYPE_TO_CODE
from tvm.topi.x86.utils import target_has_sse41
from . import _make, _requantize
@tvm._ffi.register_object("relay.qnn.op.RequantizeConfig")
class RequantizeConfig(Object):
"""Configure the requantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use requantize_config instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. Use _node_defaults getters to get results for the fields.
"""
@staticmethod
def _get_node_default_rounding():
return "UPWARD"
@staticmethod
def _get_node_default_compute_dtype():
target = Target.current(True)
if target and str(target.kind) == "llvm" and target_has_sse41(target.mcpu):
return "float32"
return "int64"
_node_defaults = {
"rounding": _get_node_default_rounding.__func__,
"compute_dtype": _get_node_default_compute_dtype.__func__,
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(RequantizeConfig, self).__init__(handle)
self.handle = handle
def __enter__(self):
# pylint: disable=protected-access
_requantize._EnterRequantizeConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_requantize._ExitRequantizeConfigScope()
def __setattr__(self, name, value):
if name in RequantizeConfig._node_defaults:
raise AttributeError("'%s' object cannot set attribute '%s'" % (str(type(self)), name))
return super(RequantizeConfig, self).__setattr__(name, value)
def current_requantize_config():
"""Get the current requantization configuration."""
return _requantize._GetCurrentRequantizeConfig()
def requantize_config(**kwargs):
"""Configure the requantization behavior by setting config variables.
Parameters
---------
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
compute_dtype:
Specifies the data type used during requantize.
Supported options: \"int64\", \"float32\", \"float64\"
Returns
-------
config: RequantizeConfig
The requantization configuration
"""
node_args = {
k: v() if k not in kwargs else kwargs[k] for k, v in RequantizeConfig._node_defaults.items()
}
return tvm.ir.make_node("relay.qnn.op.RequantizeConfig", **node_args)
def requantize(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
axis=-1,
rounding="None",
compute_dtype="None",
out_dtype="int8",
):
r"""Requantized operator.
The requantize operator converts one quantized tensor representation to
another quantized tensor representation. For the output tensor, we are
provided with output scale and zero point. The computation is as follows
Q_output = zp_output + (scale_input)/(scale_output) * (Q_input - zp_input)
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
input_scale: tvm.relay.Expr
The quantization scale for the input tensor.
input_zero_point: tvm.relay.Expr
The zero point of the input tensor.
output_scale: tvm.relay.Expr
The quantization scale for the output tensor.
output_zero_point: tvm.relay.Expr
The zero point of the output tensor.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
rounding : string, optional
Defines the rounding direction when the value is midway between two
representable values.
compute_dtype:
Specifies the data type used during requantize.
Supported options: \"int64\", \"float32\", \"float64\"
out_dtype : str, optional
Specifies the output data type.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.requantize(
data,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
axis,
rounding,
compute_dtype,
out_dtype,
)
def quantize(data, output_scale, output_zero_point, axis=-1, out_dtype="int8"):
r"""Quantize op
This operator takes float32 as input and produces quantized int8 or unit8 as output.
The input tensor can be of any shape. The output shape is the same as input shape.
Q_output = clamp((round(input_tensor/output_scale) + output_zero_point),
out_dtype::min,
out_dtype::max)
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
output_scale : tvm.relay.Expr
The output scale.
output_zero_point : tvm.relay.Expr
The output zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
out_dtype : str, optional
The data type of the input tensor. Can be [int8, uint8, int32]
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.quantize(data, output_scale, output_zero_point, axis, out_dtype)
def simulated_quantize(data, output_scale, output_zero_point, axis=-1, out_dtype="int8"):
r"""Simulated Quantize op
Mimics the quantize op but has more flexibility in valid inputs and always
outputs the same type as the input. This can be useful for
calibrating or training a quantized network.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be quantized. Can be of type float32.
out_dtype : string or tvm.relay.Expr
A string or tensor indicating which datatype to quantize to.
output_scale : tvm.relay.Expr
The output scale.
output_zero_point : tvm.relay.Expr
The output zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# Convert string dtype to a constant if needed.
if isinstance(out_dtype, str):
type_code = SQNN_DTYPE_TO_CODE[out_dtype]
out_dtype = relay.const(type_code, dtype="int32")
# Wrap reshapes around qnn parameter tensors to guarantee shape compatibility.
output_scale = relay.op.reshape(output_scale, [-1])
output_zero_point = relay.op.reshape(output_zero_point, [-1])
return _make.simulated_quantize(data, out_dtype, output_scale, output_zero_point, axis)
def dequantize(data, input_scale, input_zero_point, axis=-1):
r"""Dequantize op
This operator takes quantized int8 and unit8 as input and produces
dequantized float32 as output. The output shape is the same as input shape. The input
tensor can be of any shape.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be dequantized. Can be of type [int8, uint8, int32].
input_scale : tvm.relay.Expr
The input scale.
input_zero_point : tvm.relay.Expr
The input zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dequantize(data, input_scale, input_zero_point, axis)
def simulated_dequantize(data, input_scale, input_zero_point, axis=-1, in_dtype="int8"):
r"""Simulated Dequantize op
Mimics the dequantize op but has more flexibility in valid inputs and always
outputs the same type as the input. This can be useful for calibrating or
training a quantized network.
Parameters
----------
data : tvm.relay.Expr
The input tensor to be dequantized.
in_dtype : string or tvm.relay.Expr
A string or tensor indicating which datatype to dequantize from.
input_scale : tvm.relay.Expr
The input scale.
input_zero_point : tvm.relay.Expr
The input zero_point.
axis : int
The channel axis for quantization. Default value is -1 which corresponds to the last axis.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# Convert string dtype to a constant if needed.
if isinstance(in_dtype, str):
type_code = SQNN_DTYPE_TO_CODE[in_dtype]
in_dtype = relay.const(type_code, dtype="int32")
# Wrap reshapes around qnn parameter tensors to guarantee shape compatibility.
input_scale = relay.op.reshape(input_scale, [-1])
input_zero_point = relay.op.reshape(input_zero_point, [-1])
return _make.simulated_dequantize(data, in_dtype, input_scale, input_zero_point, axis)
def concatenate(data, input_scales, input_zero_points, output_scale, output_zero_point, axis):
"""Concatenate the quantized input tensors along the given axis.
Parameters
----------
data : Union(List[relay.Expr], Tuple[relay.Expr], TupleWrapper[relay.Expr])
The list of quantized tensors.
input_scales : List[relay.Expr]
The list of scales of input quantized tensors.
input_zero_points : List[relay.Expr]
The list of zero points of input quantized tensors.
output_scale : relay.Expr
The scale of the output quantized tensor.
output_zero_point : relay.Expr
The zero point of the output quantized tensor.
axis : int
The axis along which the tensors are concatenated.
Returns
-------
result: relay.Expr
The concatenated quantized tensor.
"""
if isinstance(data, (list, tuple)):
data = Tuple(data)
elif isinstance(data, TupleWrapper):
data = data.tuple_value
if not isinstance(axis, int):
raise ValueError("For now, we only support integer axis")
input_scales = list(input_scales)
input_zero_points = list(input_zero_points)
return _make.concatenate(
data, Tuple(input_scales), Tuple(input_zero_points), output_scale, output_zero_point, axis
)
def conv2d(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
channels,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
data_layout="NCHW",
kernel_layout="OIHW",
out_layout="",
out_dtype="int32",
):
r"""Quantized 2D convolution.
This operator convolves quantized data with quantized kernel.
If doing Per-channel quantization, qnn expects the kernel_zero_scale
and optionally the kernel_zero_point will be 1-D vectors instead of scalars.
The scale of the output quantized tensor is the product of the kernel_scale and
input_scale of the input quantized tensors. The zero point of the output
quantized tensor is 0. By default, the dtype of output is int32. Please also
refer to Requantize operator to understand how to scale back the int32
output to (u)int8.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
kernel : tvm.relay.Expr
The kernel expressions.
input_zero_point: tvm.relay.Expr
The zero point of the data distribution.
kernel_zero_point: tvm.relay.Expr
The zero point of the quantized_kernel distribution.
input_scale: tvm.relay.Expr
The scale for the input tensor. The scale for the input tensor is
stored purely for convenience here. See more commentary below.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d is lowered to the
sequence of steps as in nn.conv2d. See also input_scale in Requantize.
kernel_size : tuple of int
The spatial width and height of the convolution kernel.
channels : int
Number of output channels of this convolution.
strides : tuple of int, optional
The strides of convolution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the kernel.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# TODO enforce 4-way padding in topi/nn/conv2d after #4644 merged
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
out_dtype,
)
def conv2d_transpose(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
kernel_layout="IOHW",
out_layout="",
output_padding=(0, 0),
out_dtype="int32",
):
"""This operator deconvolves quantized data with quantized kernel. The scale of
the output quantized tensor is the product of the kernel_scale and
input_scale of the input quantized tensors. The zero point of the output
quantized tensor is 0. By default, the dtype of output is int32. Please also
refer to Requantize operator to understand how to scale back the int32
output to (u)int8.
Parameters
----------
data : tvm.relay.Expr
The input data to the operator.
weight : tvm.relay.Expr
The weight expressions.
input_zero_point: tvm.relay.Expr
The zero point of the data distribution.
kernel_zero_point: tvm.relay.Expr
The zero point of the quantized_kernel distribution.
input_scale: tvm.relay.Expr
The scale for the input tensor. The scale for the input tensor is
stored purely for convenience here. See more commentary below.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d_transpose is lowered to the
sequence of steps as in nn.conv2d_transpose. See also input_scale in Requantize.
strides : Tuple[int], optional
The strides of convolution.
padding : Tuple[int], optional
The padding of convolution.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial dimensions of the convolution kernel.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
kernel_layout : str, optional
Layout of the weight.
out_layout : Optional[str]
Layout of the output, by default, out_layout is the same as data_layout
output_padding : Tuple[int], optional
Used to identify the padding within the output shape
(only used in training, where transpose_conv represents the gradient of a convolution )
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
# convert 2-way padding to 4-way padding
padding = get_pad_tuple2d(padding)
return _make.conv2d_transpose(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
strides,
padding,
dilation,
groups,
channels,
kernel_size,
data_layout,
kernel_layout,
out_layout,
output_padding,
out_dtype,
)
def add(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized addition with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.add(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def dense(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
out_dtype="int32",
):
"""Qnn Dense operator.
Applies a quantized linear transformation
.. math::
`Y = X * W`
If doing Per-channel quantization, qnn expects the kernel_zero_scale
and optionally the kernel_zero_point will be 1-D vectors instead of scalars.
Parameters
----------
data : tvm.relay.Expr
The quantized input data to the operator.
weight : tvm.relay.Expr
The quantized weight expressions.
input_zero_point: tvm.relay.Expr
The input zero point.
kernel_zero_point: tvm.relay.Expr
The kernel zero point.
input_scale: tvm.relay.Expr
The scale for the input tensor.
kernel_scale: tvm.relay.Expr
The scale for the weight tensor. The scale for the weight tensor is
stored for access to this during relay. This information is not
needed in the pass pipeline after qnn.conv2d is lowered to the
sequence of steps as in nn.conv2d. See also input_scale in Requantize.
units : int
Number of hidden units of the dense transformation.
out_dtype : str, optional
Specifies the output data type for mixed precision dense can be int32 or int16.
Returns
-------
result : tvm.relay.Expr
The computed result.
"""
return _make.dense(
data,
weight,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
out_dtype,
)
def mul(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized multiplication with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.mul(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def tanh(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized tanh.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.tanh(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def exp(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized exponential function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.exp(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def sqrt(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized square root.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sqrt(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def rsqrt(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized reciprocal square root.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.rsqrt(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def erf(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized error function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.erf(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
# pylint: disable=redefined-builtin
def abs(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized abs function.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.abs(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def sigmoid(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized sigmoid.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.sigmoid(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def hardswish(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized hardswish.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.hardswish(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def log(x, scale, zero_point, output_scale, output_zero_point):
"""Quantized log.
Parameters
----------
x : relay.Expr
The quantized input tensor.
scale: relay.Expr
The scale of the quantized expr.
zero_point: relay.Expr
The zero point of quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log(
x,
scale,
zero_point,
output_scale,
output_zero_point,
)
def subtract(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis=-1,
rhs_axis=-1,
):
"""Quantized subtraction with numpy-style broadcasting.
Parameters
----------
lhs : relay.Expr
The left hand side quantized input data.
rhs : relay.Expr
The right hand side quantized input data.
lhs_scale: relay.Expr
The scale of the lhs quantized expr.
lhs_zero_point: relay.Expr
The zero point of lhs quantized expr.
rhs_scale: relay.Expr
The scale of the rhs quantized expr.
rhs_zero_point: relay.Expr
The zero point of rhs quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
lhs_axis: int
The channel axis for lhs quantization. Default value is -1 which corresponds
to the last axis.
rhs_axis: int
The channel axis for rhs quantization. Default value is -1 which corresponds
to the last axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.subtract(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
lhs_axis,
rhs_axis,
)
def batch_matmul(x, y, x_zero_point, y_zero_point, x_scale, y_scale, out_dtype="int32"):
r"""
Computes batch matrix multiplication of `x` and `y` when `x` and `y` are data
in batch.
.. math::
\mbox{batch_matmul}(x, y)[i, :, :] = \mbox{matmul}(x[i, :, :], y[i, :, :]^T)
Parameters
----------
x : tvm.relay.Expr
The first quantized input.
A quantized tensor is represented in following manner
`A = scale_a x (QA - zp_A)`
where QA is quantized tensor, scale_a and zp_A are quantization
params.
y : tvm.relay.Expr
The second quantized input.
x_zero_point: tvm.relay.Expr
The first input zero point.
y_zero_point: tvm.relay.Expr
The second input zero point.
x_scale: tvm.relay.Expr
The scale for the first input tensor.
y_scale: tvm.relay.Expr
The scale for the second input tensor.
out_dtype : str, optional
Specifies the output data type for mixed precision dense can be int32 or int16.
Returns
-------
result: tvm.relay.Expr
The computed result.
"""
return _make.batch_matmul(x, y, x_zero_point, y_zero_point, x_scale, y_scale, out_dtype)
def leaky_relu(x, alpha, input_scale, input_zero_point, output_scale, output_zero_point):
"""Quantized leaky relu.
Parameters
----------
x : relay.Expr
The quantized input tensor.
alpha: double
The alpha value.
input_scale: relay.Expr
The scale of the input quantized expr.
input_zero_point: relay.Expr
The zero point of input quantized expr.
output_scale: relay.Expr
The scale of the output quantized expr.
output_zero_point: relay.Expr
The zero point of output quantized expr.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.leaky_relu(
x,
alpha,
input_scale,
input_zero_point,
output_scale,
output_zero_point,
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/strategy/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import
"""QNN op strategies."""
from __future__ import absolute_import as _abs
from .generic import *
from . import hexagon
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/strategy/generic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of generic operator strategy."""
from tvm.target import override_native_generic_func
def wrap_topi_schedule(topi_schedule):
"""Wrap TOPI schedule which doesn't use attrs"""
def wrapper(_attrs, outs, target):
with target:
return topi_schedule(outs)
return wrapper
def wrap_topi_compute(topi_compute):
"""Wrap TOPI compute which doesn't use attrs"""
def wrapper(_attrs, inputs, _out_type):
return [topi_compute(*inputs)]
return wrapper
def wrap_compute_quantize(topi_compute):
"""Wrap TOPI compute which use axis and out data type from attrs"""
def wrapper(attrs, inputs, _out_type):
axis = attrs.axis
out_dtype = attrs.out_dtype
args = [*inputs, axis, out_dtype]
return [topi_compute(*args)]
return wrapper
def wrap_compute_dequantize(topi_compute):
"""Wrap TOPI compute which use axis from attrs"""
def wrapper(attrs, inputs, _out_type):
args = [*inputs, attrs.axis]
return [topi_compute(*args)]
return wrapper
def wrap_topi_qnn_conv2d(topi_compute):
"""Wrap TOPI compute which use conv2d attrs and output data type"""
def wrapper(attrs, inputs, out_type):
out_dtype = out_type.dtype
oshape = out_type.shape
strides = attrs.strides
padding = attrs.padding
dilation = attrs.dilation
if len([*inputs]) == 11:
args = [*inputs, strides, padding, dilation, oshape, out_dtype]
elif len([*inputs]) == 10:
args = [ # QNN Conv2d params:
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
inputs[5],
# Bias argument
None,
# Requantization params:
inputs[6],
inputs[7],
inputs[8],
inputs[9],
# Conv2d attrs:
strides,
padding,
dilation,
oshape,
out_dtype,
]
else:
assert len([*inputs]) == 6
args = [ # QNN Conv2d params:
*inputs,
# Bias argument:
None,
# Requantization params:
None,
None,
None,
None,
strides,
padding,
dilation,
oshape,
out_dtype,
]
return [topi_compute(*args)]
return wrapper
def wrap_topi_qnn_dense(topi_compute):
"""Wrap TOPI compute which use qnn.dense attrs"""
def wrapper(_attrs, inputs, out_type):
out_dtype = out_type.dtype
if len([*inputs]) == 11:
args = [*inputs, out_dtype]
elif len([*inputs]) == 10:
args = [ # QNN Dense params:
inputs[0],
inputs[1],
inputs[2],
inputs[3],
inputs[4],
inputs[5],
# Bias argument
None,
# Requantization params:
inputs[6],
inputs[7],
inputs[8],
inputs[9],
out_dtype,
]
else:
assert len([*inputs]) == 6
args = [ # QNN Dense params:
*inputs,
# Bias argument:
None,
# Requantization params:
None,
None,
None,
None,
out_dtype,
]
return [topi_compute(*args)]
return wrapper
def wrap_topi_concatenate(topi_compute):
"""Wrap TOPI compute which use qnn.concatenate attrs"""
def wrapper(attrs, inputs, out_type):
return [topi_compute(inputs, attrs.axis, out_type.dtype)]
return wrapper
def wrap_topi_qnn_batch_matmul(topi_compute):
"""Wrap TOPI compute which use qnn.batch_matmul attrs"""
def wrapper(attrs, inputs, _out_type):
assert len([*inputs]) == 6
args = [*inputs, attrs.transpose_a, attrs.transpose_b, attrs.out_dtype]
return [topi_compute(*args)]
return wrapper
@override_native_generic_func("qnn_quantize_strategy")
def qnn_quantize_strategy(attrs, inputs, out_type, target):
"""qnn.quantize generic strategy"""
raise RuntimeError(
"qnn.quantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_dequantize_strategy")
def qnn_dequantize_strategy(attrs, inputs, out_type, target):
"""qnn.dequantize generic strategy"""
raise RuntimeError(
"qnn.dequantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_requantize_strategy")
def qnn_requantize_strategy(attrs, inputs, out_type, target):
"""qnn.requantize generic strategy"""
raise RuntimeError(
"qnn.requantize is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_add_strategy")
def qnn_add_strategy(attrs, inputs, out_type, target):
"""qnn.add generic strategy"""
raise RuntimeError(
"qnn.add is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_concatenate_strategy")
def qnn_concatenate_strategy(attrs, inputs, out_type, target):
"""qnn.concatenate generic strategy"""
raise RuntimeError(
"qnn.concatenate is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_conv2d_strategy")
def qnn_conv2d_strategy(attrs, inputs, out_type, target):
"""qnn.conv2d generic strategy"""
raise RuntimeError(
"qnn.conv2d is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_dense_strategy")
def qnn_dense_strategy(attrs, inputs, out_type, target):
"""qnn.dense generic strategy"""
raise RuntimeError(
"qnn.dense is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
@override_native_generic_func("qnn_batch_matmul_strategy")
def qnn_batch_matmul_strategy(attrs, inputs, out_type, target):
"""qnn.batch_matmul generic strategy"""
raise RuntimeError(
"qnn.batch_matmul is currently only supported with Hexagon. "
"Please run QNN Canonicalize pass to decompose this op into supported ops."
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/strategy/hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of Hexagon operator strategy."""
# pylint: disable=unused-argument,wildcard-import,unused-wildcard-import
from tvm import topi
from .generic import *
from ... import op as _op
from ...op.strategy.generic import is_depthwise_conv2d
@qnn_quantize_strategy.register("hexagon")
def qnn_quantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.quantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_quantize(topi.hexagon.qnn_quantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_quantize),
name="qnn_quantize.hexagon",
)
return strategy
@qnn_dequantize_strategy.register("hexagon")
def qnn_dequantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.dequantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dequantize(topi.hexagon.qnn_dequantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dequantize),
name="qnn_dequantize.hexagon",
)
return strategy
@qnn_requantize_strategy.register("hexagon")
def qnn_requantize_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.requantize strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_quantize(topi.hexagon.qnn_requantize),
wrap_topi_schedule(topi.hexagon.schedule_qnn_requantize),
name="qnn_requantize.hexagon",
)
return strategy
@qnn_add_strategy.register("hexagon")
def qnn_add_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.add strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_compute(topi.hexagon.qnn_add),
wrap_topi_schedule(topi.hexagon.schedule_qnn_add),
name="qnn_add.hexagon",
)
return strategy
@qnn_concatenate_strategy.register("hexagon")
def qnn_concatenate_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.concatenate strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_concatenate(topi.hexagon.qnn_concatenate),
wrap_topi_schedule(topi.hexagon.schedule_qnn_concatenate),
name="qnn_concatenate.hexagon",
)
return strategy
@qnn_conv2d_strategy.register("hexagon")
def qnn_conv2d_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.conv2d strategy for Hexagon"""
data = inputs[0]
kernel = inputs[1]
data_layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
groups = attrs.groups
strategy = _op.OpStrategy()
if groups == 1:
if data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_topi_qnn_conv2d(topi.hexagon.qnn_conv2d),
wrap_topi_schedule(topi.hexagon.schedule_qnn_conv2d),
name="qnn_conv2d.hexagon",
)
elif is_depthwise_conv2d(data.shape, data_layout, kernel.shape, kernel_layout, groups):
if data_layout == "NCHW" and kernel_layout == "OIHW":
strategy.add_implementation(
wrap_topi_qnn_conv2d(topi.hexagon.qnn_depthwise_conv2d),
wrap_topi_schedule(topi.hexagon.schedule_qnn_depthwise_conv2d),
name="qnn_depthwise_conv2d.hexagon",
)
else:
raise RuntimeError("Unsupported strategy for group qnn.conv2d")
return strategy
@qnn_dense_strategy.register("hexagon")
def qnn_dense_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.dense strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_qnn_dense(topi.hexagon.qnn_dense),
wrap_topi_schedule(topi.hexagon.schedule_qnn_dense),
name="qnn_dense.hexagon",
)
return strategy
@qnn_batch_matmul_strategy.register("hexagon")
def qnn_batch_matmul_strategy_hexagon(attrs, inputs, out_type, target):
"""qnn.batch_matmul strategy for Hexagon"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_topi_qnn_batch_matmul(topi.hexagon.qnn_batch_matmul),
wrap_topi_schedule(topi.hexagon.schedule_qnn_batch_matmul),
name="qnn_batch_matmul.hexagon",
)
return strategy
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/qnn/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,arguments-differ,no-else-return,unused-argument,missing-docstring
"""
QNN pass transformation infrastructure.
"""
from tvm import relay
def CanonicalizeOps():
"""Converts/Lowers an expression containing QNN ops to an expression containing only core
(non-Dialect) Relay ops. Each QNN op is lowered to a sequence of existing Relay ops. This is a
target-independent pass. One can register the lowering/transformation function for this op using
FTVMQnnCanonicalize attr_name for FTVMLegalize op attribute. An example of this transformation
is below
Examples
________
.. code-block:: python
# Original expression
qnn_expr = relay.qnn.op.requantize(y,
input_scale=1,
input_zero_point=0,
output_scale=1,
output_zero_point=0,
out_dtype='int8')
# We want to utilize all the existing Relay infrastructure. So, instead of supporting this
# QNN requantize op, we convert it into a sequence of existing Relay operators.
mod = tvm.IRModule.from_expr(qnn_expr)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay_expr = mod['main']
print(relay_expr)
def @main(%quantized_data: Tensor[(200), int32]) -> Tensor[(200), int8] {
%0 = cast(%quantized_data, dtype="int64") /* ty=Tensor[(200), int64] */;
%1 = multiply(%0, 2 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%2 = multiply(%1, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%3 = add(%2, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%4 = right_shift(%3, 31 /* ty=int64 */) /* ty=Tensor[(200), int64] */;
%5 = add(0 /* ty=int64 */, %4) /* ty=Tensor[(200), int64] */;
%6 = clip(%5, a_min=-128f, a_max=127f) /* ty=Tensor[(200), int64] */;
cast(%6, dtype="int8") /* ty=Tensor[(200), int8] */
}
Returns
-------
ret : tvm.transform.Pass
The registered pass that canonicalizes QNN ops to Relay ops.
"""
return relay.transform.Legalize("FTVMQnnCanonicalize")
def Legalize():
"""Legalizes QNN ops. As opposed to Relay Legalize, this one legalizes only QNN ops. One can
register a transformation/legalization function for an op by using the FTVMQnnLegalize attr_name
for FTVMLegalize op attribute. The isolation of QNN and Relay Legalize gives us separation of
concerns, leading to a better software practice. The legalization can be configured to happen
per target. An example of this type of legalization is shown below.
Examples
________
Suppose the original graph is as follows
data(u8) weight(u8)
| |
| |
qnn.conv2d (int32)
|
|
nn.relu (int32)
Now, we know that Intel Cascade Lake has VNNI instructions to speedup convolution. However, it
only works on u8 x i8 inputs. So, here, we can use QNN Legalize to transform the above graph as
follows
data(u8) weight(u8)
| |
| |
| requantize(i8)
| |
| |
qnn.conv2d (int32)
|
|
nn.relu (int32)
In this legalization, since we have isolated legalization for QNN ops, it will only trigger the
transformation for qnn.conv2d (and not nn.relu). This pass can be followed by CanonicalizeOps to
further lower the qnn.requantize and qnn.conv2d into an expr containing only Relay ops.
Returns
-------
ret : tvm.transform.Pass
The registered pass that legalizes QNN ops.
"""
return relay.transform.Legalize("FTVMQnnLegalize")
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin
"""Automatic quantization utilities."""
from __future__ import absolute_import as _abs
from .quantize import *
from ._partition import register_partition_function
from ._annotate import register_annotate_function
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/_annotate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
import warnings
from tvm import topi
import tvm._ffi
from tvm.relay.op import op as _reg
from .. import expr as _expr
from .. import analysis as _analysis
from .. import op as _op
from . import _quantize
from .quantize import QAnnotateKind, current_qconfig, quantize_context
from .quantize import _forward_op
@_op.register_compute("relay.op.annotation.simulated_quantize")
def simulated_quantize_compute(attrs, inputs, out_type):
"""Compiler for simulated_quantize."""
assert len(inputs) == 4
assert attrs.sign
assert attrs.rounding == "round"
data, scale, clip_min, clip_max = inputs
if attrs.kind == QAnnotateKind.IDENTITY:
return [topi.identity(data)]
# simulate rounding error
scaled_data = topi.divide(data, scale)
clipped_data = topi.maximum(topi.minimum(scaled_data, clip_max), clip_min)
round_data = topi.round(clipped_data)
# recover data
rdata = topi.multiply(round_data, scale)
return [rdata]
_reg.register_injective_schedule("relay.op.annotation.simulated_quantize")
_reg.register_pattern("relay.op.annotation.simulated_quantize", _reg.OpPattern.ELEMWISE)
_reg.register_injective_schedule("annotation.cast_hint")
@tvm._ffi.register_object("relay.QAnnotateExpr")
class QAnnotateExpr(_expr.TempExpr):
"""A special kind of Expr for Annotating.
Parameters
---------
expr: Expr
the original relay ir expr.
kind: QAnnotateKind
the kind of annotation field.
"""
def __init__(self, expr, kind):
self.__init_handle_by_constructor__(_quantize.make_annotate_expr, expr, kind)
def _get_expr_kind(anno):
"""Get the expression and QAnnotateKind from QAnnotateExpr or Expr"""
if isinstance(anno, QAnnotateExpr):
return anno.expr, anno.kind
return anno, None
def register_annotate_function(op_name, frewrite=None, level=10):
"""register a rewrite function for operator, used by annotation.
Parameters
---------
op_name: str
The name of operation
frewrite : function, optional
The function to be registered.
level : int, optional
The priority level
"""
def default_rewrite(ref_call, new_args, ctx):
# recover from QAnnotateExpr
args = [_get_expr_kind(x)[0] for x in new_args]
return _forward_op(ref_call, args)
def _register(func):
"""internal register function"""
def frewrite_with_guard(ref_call, new_args, ctx):
if not current_qconfig().guard(ref_call):
return default_rewrite(ref_call, new_args, ctx)
return func(ref_call, new_args, ctx)
return tvm.ir.register_op_attr(op_name, "FQAnnotateRewrite", frewrite_with_guard, level)
return _register(frewrite) if frewrite is not None else _register
def attach_simulated_quantize(data, kind, sign=True, rounding="round"):
"""Attach a simulated quantize operation after input data expr.
Parameters
---------
data: Expr
the original data expr.
kind: QAnnotateKind
the kind of annotation field.
"""
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
if isinstance(data, _expr.Call) and data.op == quantize_op:
if data.attrs.kind == kind and data.attrs.sign == sign and data.attrs.rounding == rounding:
return data
qctx = quantize_context()
key = tuple([data, kind, sign, rounding])
if key in qctx.qnode_map:
return qctx.qnode_map[key]
dom_scale = _expr.var("dom_scale")
clip_min = _expr.var("clip_min")
clip_max = _expr.var("clip_max")
qnode = _quantize.simulated_quantize(data, dom_scale, clip_min, clip_max, kind, sign, rounding)
qctx.qnode_map[key] = qnode
return qnode
tvm._ffi.register_func("relay.quantize.attach_simulated_quantize", attach_simulated_quantize)
@register_annotate_function("nn.contrib_conv2d_NCHWc")
def conv2d_nchwc_rewrite(ref_call, new_args, ctx):
warnings.warn(
"NCHWc layout Conv2D detected, please use a lower "
"optimization level before applying the quantization "
"pass as quantization will have no effect here..."
)
@register_annotate_function("nn.conv2d")
def conv2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv2d. Lhs of conv will be quantized to
input field, and rhs of conv will be quantized to weight field.
Output would be in activation field"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.conv1d")
def conv1d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for conv1d. Lhs of conv will be quantized to
input field, and rhs of conv will be quantized to weight field.
Output would be in activation field"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.dense")
def dense_rewrite(ref_call, new_args, ctx):
"""Rewrite function for dense. Lhs of dense will be quantized to input field, and rhs of
dense will be quantized to weight field. Output would be in activation field."""
if current_qconfig().skip_dense_layer:
return None
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
assert rhs_kind is None
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("multiply")
def multiply_rewrite(ref_call, new_args, ctx):
"""Rewrite function for multiply."""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
return None
if lhs_kind in [QAnnotateKind.ACTIVATION, QAnnotateKind.INPUT] and rhs_kind is None:
# quantize lhs to INPUT field
if lhs_kind == QAnnotateKind.ACTIVATION:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
if _analysis.check_constant(rhs_expr):
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError
@register_annotate_function("add")
def add_rewrite(ref_call, new_args, ctx):
"""Rewrite function for add."""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None and rhs_kind is None:
# trivial case
return None
if lhs_kind is None and rhs_kind is not None:
# quantize lhs to INPUT field if it is normal expression
assert rhs_kind in [QAnnotateKind.INPUT, QAnnotateKind.ACTIVATION]
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind is not None and rhs_kind is None:
if _analysis.check_constant(rhs_expr):
# - introduced by batch_norm: add(out, const)
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if lhs_kind is not None and rhs_kind is not None:
if lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.INPUT:
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
if lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.ACTIVATION:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
if (lhs_kind == QAnnotateKind.ACTIVATION and rhs_kind == QAnnotateKind.INPUT) or (
lhs_kind == QAnnotateKind.INPUT and rhs_kind == QAnnotateKind.ACTIVATION
):
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
raise ValueError()
def identity_rewrite(ref_call, new_args, ctx):
"""Simply forward the original operation"""
if quantize_context().check_to_skip(ref_call):
return None
x_expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
ret_expr = _forward_op(ref_call, [x_expr])
return QAnnotateExpr(ret_expr, x_kind)
register_annotate_function("reshape", identity_rewrite)
register_annotate_function("clip", identity_rewrite)
register_annotate_function("nn.relu", identity_rewrite)
register_annotate_function("strided_slice", identity_rewrite)
register_annotate_function("nn.avg_pool2d", identity_rewrite)
register_annotate_function("nn.batch_flatten", identity_rewrite)
register_annotate_function("transpose", identity_rewrite)
register_annotate_function("annotation.stop_fusion", identity_rewrite)
def pool2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for max pool2d"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
register_annotate_function("nn.max_pool2d", pool2d_rewrite)
def pool1d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for max pool1d"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
register_annotate_function("nn.max_pool1d", pool1d_rewrite)
@register_annotate_function("annotation.cast_hint")
def cast_hint_rewrite(ref_call, new_args, ctx):
"""Rewrite function to force cast"""
expr, x_kind = _get_expr_kind(new_args[0])
if quantize_context().check_to_skip(ref_call):
return expr
if x_kind is None:
return new_args[0]
if x_kind == QAnnotateKind.ACTIVATION:
expr = attach_simulated_quantize(expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [expr])
return QAnnotateExpr(expr, QAnnotateKind.INPUT)
@register_annotate_function("concatenate")
def concatenate_rewrite(ref_call, new_args, ctx):
"""Rewrite function for concatenate"""
if quantize_context().check_to_skip(ref_call):
return None
input_tuple = new_args[0]
expr_list = [_get_expr_kind(x)[0] for x in input_tuple]
kind_list = [_get_expr_kind(x)[1] for x in input_tuple]
# make sure the inputs of concatenate are all normal
# expression or annotate expression
if all([k is None for k in kind_list]):
return None
for i, k in enumerate(kind_list):
if k is None:
expr_list[i] = attach_simulated_quantize(expr_list[i], QAnnotateKind.ACTIVATION)
expr = _forward_op(ref_call, [_expr.Tuple(expr_list)])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
@register_annotate_function("nn.global_avg_pool2d")
def global_avg_pool2d_rewrite(ref_call, new_args, ctx):
"""Rewrite function for global_avg_pool2d for stopping quantize"""
if quantize_context().check_to_skip(ref_call):
return None
expr, x_kind = _get_expr_kind(new_args[0])
if x_kind is None:
return None
expr = _forward_op(ref_call, [new_args[0].realize()])
# stop quantize after global_avg_pool2d
quantize_context().stop_quantize()
return expr
@register_annotate_function("nn.batch_matmul")
def batch_matmul_rewrite(ref_call, new_args, ctx):
"""Rewrite function for batch_matmul"""
if quantize_context().check_to_skip(ref_call):
return None
lhs_expr, lhs_kind = _get_expr_kind(new_args[0])
rhs_expr, rhs_kind = _get_expr_kind(new_args[1])
if lhs_kind is None or lhs_kind == QAnnotateKind.ACTIVATION:
if _analysis.check_constant(lhs_expr):
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.WEIGHT)
else:
lhs_expr = attach_simulated_quantize(lhs_expr, QAnnotateKind.INPUT)
if rhs_kind is None or rhs_kind == QAnnotateKind.ACTIVATION:
if _analysis.check_constant(rhs_expr):
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.WEIGHT)
else:
rhs_expr = attach_simulated_quantize(rhs_expr, QAnnotateKind.INPUT)
expr = _forward_op(ref_call, [lhs_expr, rhs_expr])
return QAnnotateExpr(expr, QAnnotateKind.ACTIVATION)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/_calibrate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find scales for quantization on the dataset."""
from __future__ import absolute_import
import logging
import multiprocessing as mp
import numpy as np
import tvm
import tvm.driver
from tvm.ir import IRModule
from . import _quantize
from . import quantize
from .. import op as _op
from .. import expr as _expr
from .. import analysis as _analysis
from .. import build_module as _build_module
from ...contrib import graph_executor
from .kl_divergence import _find_scale_by_kl
def _get_profile_runtime(mod):
func = mod["main"]
func = _quantize.CreateStatsCollector(func)
if tvm.target.Target.current():
target = tvm.target.Target.current()
dev = tvm.device(target.kind.name)
else:
target = "llvm"
dev = tvm.device(target)
with tvm.transform.PassContext(opt_level=3):
lib = _build_module.build(func, target=target)
runtime = graph_executor.GraphModule(lib["default"](dev))
return runtime
def collect_stats(mod, dataset, chunk_by=-1):
"""Given an annotated graph, create a profile graph to collect profile data from the
calibration dataset. This pass collects simulated_quantize op input into a tuple.
Simulated_quantize ops are rewritten to identity mode. The tuple is the output of the profile
graph.
Parameters
----------
mod: Module
The simulation graph after annotation.
dataset: Iterable[NDArray]
The calibration dataset.
chunk_by: optional, int
The size of chunk to be returned in one iteration. It is meant to be
used for reducing memory usage. If not specified, return samples for
all layers in one chunk.
Returns
-------
ret: Iterable[list of ndarray]
List of output data of each layer, chunked by the chunk_by parameter
"""
logging.info("collecting statistics for calibration...")
runtime = _get_profile_runtime(mod)
num_outputs = runtime.get_num_outputs()
chunk_by = num_outputs if chunk_by == -1 else chunk_by
for i in range(0, num_outputs, chunk_by):
outputs = [[] for i in range(min(chunk_by, num_outputs - i))]
for batch in dataset:
runtime.set_input(**batch)
runtime.run()
for j in range(i, min(i + chunk_by, num_outputs)):
outputs[j - i].append(runtime.get_output(j).numpy())
yield [np.concatenate(output).reshape(-1) for output in outputs]
def _kl_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with kl for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(_find_scale_by_kl, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def _find_scale_by_percentile(arr, percentile=0.99999):
assert isinstance(arr, np.ndarray)
x = np.abs(arr)
max_k = int(x.size * percentile)
return np.partition(x, max_k)[max_k]
def _percentile_scale(mod, dataset):
cfg = quantize.current_qconfig()
chunk_by = cfg.calibrate_chunk_by
scales = []
for samples in collect_stats(mod, dataset, chunk_by):
logging.info("finding threshold with percentile for calibration...")
with mp.Pool() as pool:
scales += list(pool.map(_find_scale_by_percentile, samples))
def func(_):
scale = scales[func.scale_idx]
func.scale_idx += 1
return scale
func.scale_idx = 0
return func
def _set_params(mod, input_scale_func, weight_scale_func):
quantize_op = _op.get("relay.op.annotation.simulated_quantize")
cfg = quantize.current_qconfig()
const_params = {}
def visit_func(expr):
"""visitor function for traverse"""
if isinstance(expr, _expr.Call) and expr.op == quantize_op:
_, ndom_scale, nclip_min, nclip_max = expr.args
attrs = expr.attrs
kind = attrs.kind
nbit = cfg.get_nbit_by_kind(kind)
valid_bit = nbit - attrs.sign
# set scale
if kind == quantize.QAnnotateKind.WEIGHT:
assert isinstance(expr.args[0], _expr.Constant)
scale = weight_scale_func(expr)
else:
scale = input_scale_func(expr)
def _make_const(val):
return _expr.const(val, "float32")
valid_range = 2**valid_bit
const_params[ndom_scale] = _make_const(scale / valid_range)
const_params[nclip_min] = _make_const(-(valid_range - 1))
const_params[nclip_max] = _make_const((valid_range - 1))
main_func = mod["main"]
_analysis.post_order_visit(main_func, visit_func)
main_func = _expr.bind(main_func, const_params)
func_dict = {}
for global_var, func in mod.functions.items():
if global_var.name_hint != "main":
func_dict[global_var] = func
return IRModule.from_expr(main_func, func_dict)
# weight scale functions
def _power2_scale(sq_call): # pylint: disable=unused-argument
"""calculate weight scale with nearest mode-2 scale"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return 2 ** np.math.ceil(np.math.log(val, 2)) if val > 0 else 1.0
def _max_scale(sq_call):
"""calculate weight scale with maximum absolute value"""
var = sq_call.args[0]
assert isinstance(var, _expr.Constant)
val = np.amax(np.abs(var.data.numpy()))
return val
# input scale functions
def _global_scale(sq_call): # pylint: disable=unused-argument
cfg = quantize.current_qconfig()
return cfg.global_scale
def calibrate(dataset=None):
"""The calibrate procedure will try to calculate the content of
dom_scale, nbit, clip_min, clip_max for every `simulated_quantize`
operator.
Parameters
---------
dataset: Optional[Iterable[NDArray]]
The calibration dataset.
Returns
-------
ret: Function
The module pass function.
"""
def wrapped_func(mod, _):
"""make transform.module pass happy"""
cfg = quantize.current_qconfig()
if cfg.calibrate_mode == "kl_divergence":
input_scale_func = _kl_scale(mod, dataset)
elif cfg.calibrate_mode == "global_scale":
input_scale_func = _global_scale
elif cfg.calibrate_mode == "percentile":
input_scale_func = _percentile_scale(mod, dataset)
else:
raise ValueError("Unknown calibrate mode {}".format(cfg.calibrate_mode))
if cfg.weight_scale == "max":
weight_scale_func = _max_scale
elif cfg.weight_scale == "power2":
weight_scale_func = _power2_scale
else:
raise ValueError("Unknown weight scale mode {}".format(cfg.weight_scale))
return _set_params(mod, input_scale_func, weight_scale_func)
return wrapped_func
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument,inconsistent-return-statements
"""Internal module for registering attribute for annotation."""
import tvm
from .. import expr as _expr
from .. import analysis as _analysis
from . import _quantize
from .quantize import _forward_op
def register_partition_function(op_name, frewrite=None, level=10):
return tvm.ir.register_op_attr(op_name, "FQPartitionRewrite", frewrite, level)
@tvm._ffi.register_object("relay.QPartitionExpr")
class QPartitionExpr(_expr.TempExpr):
def __init__(self, expr):
self.__init_handle_by_constructor__(_quantize.make_partition_expr, expr)
def partition_expr_check(expr):
if isinstance(expr, QPartitionExpr):
return True, expr.expr
return False, expr
@register_partition_function("nn.conv2d")
def conv2d_partition_function(ref_call, new_args, ctx):
"""Rewrite function for conv2d for partition"""
data_cond, data = partition_expr_check(new_args[0])
kernel_cond, kernel = partition_expr_check(new_args[1])
assert not kernel_cond
if data_cond:
data = new_args[0].realize()
ret = _forward_op(ref_call, [data, kernel])
return QPartitionExpr(ret)
def identity_partition_function(ref_call, new_args, ctx):
cond, expr = partition_expr_check(new_args[0])
if cond:
return QPartitionExpr(_forward_op(ref_call, [expr]))
return None
register_partition_function("clip", identity_partition_function)
register_partition_function("nn.relu", identity_partition_function)
register_partition_function("nn.max_pool2d", identity_partition_function)
def add_partition_generic(ref_call, new_args, ctx):
"""Rewrite function for ewise add for partition for generic devices"""
lhs_cond, lhs = partition_expr_check(new_args[0])
rhs_cond, rhs = partition_expr_check(new_args[1])
if lhs_cond and rhs_cond:
# - introduced by ResNet, when for the first residual connection
# ...
# %0 = nn.conv2d(%data, %meta[relay.Constant])
# %1 = add(%0, %meta[relay.Constant])
# %2 = nn.relu(%1)
# %3 = nn.max_pool2d(%2)
# ...
# %9 = nn.conv2d(%8, %meta[relay.Constant])
# %10 = add(%9, %meta[relay.Constant])
# %11 = add(%3, %10) <- need to insert annotations for %3, %10
# ...
lhs = new_args[0].realize()
rhs = new_args[1].realize()
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
if not lhs_cond and rhs_cond:
# - introduced by residual connection in ResNet
# ...
# %13 = nn.conv2d(%12, %meta[relay.Constant])
# %14 = add(%13, %meta[relay.Constant])
# %15 = annotation.cast_hint(%15, 'int8')
# %16 = annotation.stop_fusion(%16)
# %17 = add(%5, %16)
# %18 = nn.relu(%17)
# ...
# %24 = nn.conv2d(%23, %meta[relay.Constant])
# %25 = add(%24, %meta[relay.Constant])
# %26 = add(%18, %25) <- need to insert annotations for %25
# ...
rhs = new_args[1].realize()
return _forward_op(ref_call, [lhs, rhs])
if lhs_cond and not rhs_cond:
if _analysis.check_constant(rhs):
# - introduced by batch_norm: add(out, bias)
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
# - introduced by residual connection in MobileNetV2
# ...
# %81 = add(%80, meta[relay.Constant])
# %82 = annotation.cast_hint(%81, 'int8')
# %83 = annotation.stop_fusion(%82)
# %84 = add(%79, %83)
# ...
# %96 = nn.conv2d(%94, %meta[relay.Constant])
# %96 = add(%95, %meta[relay.Constant])
# %97 = add(%96, %84) <- need to insert annotations for %96
# ...
lhs = new_args[0].realize()
return _forward_op(ref_call, [lhs, rhs])
if not lhs_cond and not rhs_cond:
# trivial case
return None
raise ValueError
def mul_partition_generic(ref_call, new_args, ctx):
"""Rewrite function for ewise mul for partition for generic devices"""
lhs_cond, lhs = partition_expr_check(new_args[0])
rhs_cond, rhs = partition_expr_check(new_args[1])
if lhs_cond:
# introduced by bn: multiply(out, scale)
lhs = new_args[0].realize()
return QPartitionExpr(_forward_op(ref_call, [lhs, rhs]))
if not lhs_cond and not rhs_cond:
# trivial case
return None
raise ValueError
# TODO(ziheng) enhance `register_partition_function` to dispatch
# for target automatically
@register_partition_function("add")
def add_partition_function(ref_call, new_args, ctx):
"""Rewrite function for ewise add for partition"""
target = tvm.target.Target.current()
if target and "cuda" in target.keys:
# TODO(wuwei/ziheng) cuda specific rules
return add_partition_generic(ref_call, new_args, ctx)
return add_partition_generic(ref_call, new_args, ctx)
@register_partition_function("multiply")
def multiply_partition_function(ref_call, new_args, ctx):
"""Rewrite function for ewise multiply for partition"""
return mul_partition_generic(ref_call, new_args, ctx)
# add cast after the relu op to make it run on vta
@register_partition_function("nn.global_avg_pool2d")
def global_avg_pool2d_partition_function(ref_call, new_args, ctx):
cond, expr = partition_expr_check(new_args[0])
if cond:
expr = new_args[0].realize()
else:
expr = QPartitionExpr(new_args[0]).realize()
return _forward_op(ref_call, [expr])
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/_partition_conversions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Utilities for partitioning input quantization and output dequantization expressions."""
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator, ExprVisitor
# operators that are allowed in prefix/suffix partitions, because they are used
# to quantize/dequantize
ALLOWED_CONVERSION_OPS = ["add", "multiply", "right_shift", "clip", "round", "cast"]
def partition_conversions(mod, quantized_dtypes, ensure_fully_integral):
"""Partition mod into input quantization, core quantized inference, and output dequantization.
The resulting module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
mod : tvm.IRModule
Quantized module to partition
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
ensure_fully_integral : bool
Whether to raise an exception if there are unquantized operators in the result
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization (`quantize_inputs`), core
quantized inference (`quantized_main`), output dequantization
(`dequantize_outputs`), and full quantized inference functions
"""
# Partitioning is implemented as in the diagram below:
#
# +----------------------------+
# |Quantized Inference Function|
# +--------------+-------------+
# |
# partition_prefix
# |
# +-----+-------------------------+
# | |
# +--------v---------+ +-----------------v------------------+
# |Input Quantization| |Rest of Quantized Inference Function|
# +------------------+ +-----------------+------------------+
# |
# partition_suffix
# |
# +------+---------------------+
# | |
# +------------------+ +----------v------------+ +-----------v---------+
# |Input Quantization| |Core Quantized Function| |Output Dequantization|
# +------------------+ +-----------------------+ +---------------------+
#
# The final module contains all three partitions, as well as a
# `main` function that composes these three functions (depicted below).
#
# +--------------------+-------------------------+-----------------------+
# | Input Quantization | Core Quantized Function | Output Dequantization |
# +--------------------+-------------------------+-----------------------+
assert len(mod.functions) == 1
pre_mod, mid_mod = partition_prefix(mod, quantized_dtypes)
mid_mod, post_mod = partition_suffix(mid_mod, quantized_dtypes)
if ensure_fully_integral:
assert has_only_conversion_ops(pre_mod["main"])
assert relay.analysis.all_dtypes(mid_mod["main"]).issubset(quantized_dtypes)
assert has_only_conversion_ops(post_mod["main"])
return fuse_partitions(pre_mod, mid_mod, post_mod)
def fuse_partitions(pre_mod, mid_mod, post_mod):
"""Combine prefix, middle, and suffix modules into a single module.
The combined module includes an additional `main` that fuses all three
partitions together.
Parameters
----------
pre_mod : tvm.IRModule
Module containing an input quantization function
mid_mod : tvm.IRModule
Module containing core of a quantized inference function
post_mod : tvm.IRModule
Module containing an output dequantization function
Returns
-------
fused_mod : tvm.IRModule
Module containing the input quantization, core quantized inference,
output dequantization, and full quantized inference functions
"""
pre_func = pre_mod["main"]
mid_func = mid_mod["main"]
post_func = post_mod["main"]
# create a module containing the prefix, middle, and suffix partitions
fused_mod = tvm.IRModule(
functions={
relay.GlobalVar("quantize_inputs"): pre_func,
relay.GlobalVar("quantized_main"): mid_func,
relay.GlobalVar("dequantize_outputs"): post_func,
}
)
# construct a `main` that strings together the partitions, such that its
# behaviour is equivalent to `main` in an *unpartitioned* module
scope_builder = relay.ScopeBuilder()
fused_mod_main_params = [relay.Var(param.name_hint) for param in pre_func.params]
quantized_inputs = scope_builder.let(
"quantized_inputs",
relay.Call(fused_mod.get_global_var("quantize_inputs"), fused_mod_main_params),
)
quantized_outputs = scope_builder.let(
"quantized_outputs",
relay.Call(
fused_mod.get_global_var("quantized_main"),
[relay.TupleGetItem(quantized_inputs, i) for i in range(len(pre_func.ret_type.fields))],
),
)
dequantized_outputs = scope_builder.let(
"dequantized_outputs",
relay.Call(fused_mod.get_global_var("dequantize_outputs"), [quantized_outputs]),
)
scope_builder.ret(dequantized_outputs)
fused_mod["main"] = relay.Function(fused_mod_main_params, scope_builder.get())
return relay.transform.InferType()(fused_mod)
class PrefixCutter(ExprMutator):
"""A mutator for extracting input quantization expressions from a function
The result of `visit` is the core function, and the input quantization
expressions are stored in the `prefix_sb` scope builder.
"""
def __init__(self, params, quantized_dtypes):
ExprMutator.__init__(self)
self.params = set(params)
self.quantized_dtypes = quantized_dtypes
self.subtree_params = set()
self.new_func_params = []
self.prefix_sb = relay.ScopeBuilder()
self.prefix_binding_map = {}
def visit_var(self, var):
if var in self.params:
self.subtree_params.add(var)
return var
def visit_call(self, call):
# TODO(weberlo) use graph pattern matching?
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
new_args = []
for arg in call.args:
new_arg = self.visit(arg)
if len(self.subtree_params) == 0:
new_args.append(new_arg)
else:
assert len(self.subtree_params) == 1
param = next(iter(self.subtree_params))
pre_param = self.prefix_sb.let(param.name_hint, new_arg)
self.subtree_params.clear()
mid_param = relay.Var(param.name_hint, arg.checked_type)
self.prefix_binding_map[mid_param] = pre_param
# return new parameter, then we can use
# relay.analysis.free_vars at the end of the pass to generate
# new `mid_func` type signature
new_args.append(mid_param)
return relay.Call(call.op, new_args, call.attrs)
return super().visit_call(call)
def partition_prefix(mod, quantized_dtypes):
"""Extract input quantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
prefix_cutter = PrefixCutter(func.params, quantized_dtypes)
mid_body = prefix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
mid_func = relay.Function(relay.analysis.free_vars(mid_body), mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = relay.transform.InferType()(mid_mod)
scope_builder = prefix_cutter.prefix_sb
# make sure we pass through all inputs in the prefix function's return expr
# (even those that don't require quantization)
ret_expr = []
for param in mid_func.params:
if param in prefix_cutter.prefix_binding_map:
# this param required a conversion, so we collected it in the
# prefix cutter pass, and we can use the pass's mapping from mid
# func params to pre func params
ret_expr.append(prefix_cutter.prefix_binding_map[param])
else:
# there was no detected conversion for this argument, so we thread
# it through the prefix function untouched
ret_expr.append(relay.Var(param.name_hint, param.checked_type))
ret_expr = relay.Tuple(ret_expr)
scope_builder.ret(ret_expr)
pre_func_body = scope_builder.get()
pre_func = relay.Function(relay.analysis.free_vars(pre_func_body), pre_func_body)
pre_mod = tvm.IRModule.from_expr(pre_func)
pre_mod = relay.transform.InferType()(pre_mod)
return pre_mod, mid_mod
class SuffixCutter(ExprMutator):
"""A mutator for extracting output dequantization expressions from a function
The result of `visit` is a function containing the output dequantization
expressions, and the middle of the function is stored in `mid_body`.
"""
def __init__(self, quantized_dtypes):
ExprMutator.__init__(self)
self.mid_body = None
self.quantized_dtypes = quantized_dtypes
def visit(self, expr):
if hasattr(expr, "checked_type") and expr.checked_type.dtype in self.quantized_dtypes:
self.mid_body = expr
return relay.Var("input", expr.checked_type)
return super().visit(expr)
def partition_suffix(mod, quantized_dtypes):
"""Extract output dequantization expressions from `mod['main']`.
Parameters
----------
mod : tvm.IRModule
Module containing a quantized inference function
quantized_dtypes : Set[str]
Set of data types allowed in quantized operators
Returns
-------
pre_mod : tvm.IRModule
Module containing the input quantization function
mid_mod : tvm.IRModule
Module containing a function with everything except for input quantization
"""
assert len(mod.functions) == 1
func = mod["main"]
suffix_cutter = SuffixCutter(quantized_dtypes)
post_body = suffix_cutter.visit(func.body)
assert not func.type_params, "unimplemented"
assert func.attrs is None, "unimplemented"
post_func = relay.Function(relay.analysis.free_vars(post_body), post_body, func.ret_type)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = relay.transform.InferType()(post_mod)
mid_body = suffix_cutter.mid_body
if mid_body is None:
# The suffix contains the entire function, meaning there was no
# quantization boundary in the given mod. In this case, we use the
# suffix mod as the middle mod and make the suffix an identity function.
mid_mod = post_mod
post_body = relay.Var("input", mid_mod["main"].ret_type)
post_func = relay.Function([post_body], post_body)
post_mod = tvm.IRModule.from_expr(post_func)
post_mod = relay.transform.InferType()(post_mod)
else:
mid_func = relay.Function(func.params, mid_body)
mid_mod = tvm.IRModule.from_expr(mid_func)
mid_mod = relay.transform.InferType()(mid_mod)
return mid_mod, post_mod
class ConversionOpChecker(ExprVisitor):
"""A pass for checking that the visited function contains only conversion ops"""
def __init__(self):
ExprVisitor.__init__(self)
self.valid = True
def visit_call(self, call):
if not hasattr(call.op, "name") or call.op.name not in ALLOWED_CONVERSION_OPS:
self.valid = False
super().visit_call(call)
def has_only_conversion_ops(func):
"""Return true iff the given function contains only quantization/dequantization ops.
Parameters
----------
func : relay.Function
Function being checked
Returns
-------
valid : bool
Whether the function contains only conversion ops
"""
checker = ConversionOpChecker()
checker.visit(func)
return checker.valid
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Internal module for quantization."""
import tvm._ffi
tvm._ffi._init_api("relay._quantize", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/kl_divergence.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Find optimal scale for quantization by minimizing KL-divergence"""
import ctypes
import numpy as np
from . import _quantize
def _find_scale_by_kl(arr, quantized_dtype="int8", num_bins=8001, num_quantized_bins=255):
"""Given a tensor, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref:
http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
assert isinstance(arr, np.ndarray)
min_val = np.min(arr)
max_val = np.max(arr)
thres = max(abs(min_val), abs(max_val))
if min_val >= 0 and quantized_dtype in ["uint8"]:
# We need to move negative bins to positive bins to fit uint8 range.
num_quantized_bins = num_quantized_bins * 2 + 1
def get_pointer(arr, ctypes_type):
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes_type))
return ctypes.cast(ptr, ctypes.c_void_p)
hist, hist_edges = np.histogram(arr, bins=num_bins, range=(-thres, thres))
hist_ptr = get_pointer(hist.astype(np.int32), ctypes.c_int)
hist_edges_ptr = get_pointer(hist_edges, ctypes.c_float)
return _quantize.FindScaleByKLMinimization(
hist_ptr, hist_edges_ptr, num_bins, num_quantized_bins
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/quantize/quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic quantization toolkit."""
import tvm.ir
import tvm
from tvm.runtime import Object
from . import _quantize
from ._calibrate import calibrate
from ._partition_conversions import partition_conversions
from .. import expr as _expr
from .. import transform as _transform
class QAnnotateKind(object):
"""Denote the kind of annotation field, corresponding
to different nbit configure."""
IDENTITY = 0
INPUT = 1
WEIGHT = 2
ACTIVATION = 3
def kind2str(kind):
"""Convert a `QAnnotateKind` to string"""
str_map = {
QAnnotateKind.INPUT: "input",
QAnnotateKind.WEIGHT: "weight",
QAnnotateKind.ACTIVATION: "activation",
QAnnotateKind.IDENTITY: "identity",
}
assert kind in str_map
return str_map[kind]
def _forward_op(ref_call, args):
"""forward the operator of ref_call with provided arguments"""
return _expr.Call(ref_call.op, args, ref_call.attrs, ref_call.type_args, ref_call.span)
@tvm._ffi.register_object("relay.quantize.QConfig")
class QConfig(Object):
"""Configure the quantization behavior by setting config variables.
Note
----
This object is backed by node system in C++, with arguments that can be
exchanged between python and C++.
Do not construct directly, use qconfig instead.
The fields that are backed by the C++ node are immutable once an instance
is constructed. See _node_defaults for the fields.
"""
_node_defaults = {
"nbit_input": 8,
"nbit_weight": 8,
"nbit_activation": 32,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
"calibrate_mode": "global_scale",
"global_scale": 8.0,
"weight_scale": "power2",
"skip_dense_layer": True,
"skip_conv_layers": [0],
"do_simulation": False,
"round_for_shift": True,
"debug_enabled_ops": None,
"rounding": "UPWARD",
"calibrate_chunk_by": -1,
"partition_conversions": "disabled",
}
# pylint: disable=no-member
def __init__(self, handle):
"""Initialize the function with handle
Parameters
----------
handle : SymbolHandle
the handle to the underlying C++ Symbol
"""
super(QConfig, self).__init__(handle)
self.handle = handle
def guard(self, ref_call):
"""Return true if op is enabled, otherwise return false"""
op_name = ref_call.op.name
if self.debug_enabled_ops is not None:
name_list = [x.value for x in self.debug_enabled_ops]
if op_name not in name_list:
return False
return True
def get_nbit_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "nbit_" + name)
def get_dtype_by_kind(self, kind):
name = kind2str(kind)
return getattr(self, "dtype_" + name)
def __enter__(self):
# pylint: disable=protected-access
_quantize._EnterQConfigScope(self)
return self
def __exit__(self, ptype, value, trace):
_quantize._ExitQConfigScope()
def __setattr__(self, name, value):
if name in QConfig._node_defaults:
raise AttributeError("'%s' object cannot set attribute '%s'" % (str(type(self)), name))
return super(QConfig, self).__setattr__(name, value)
def current_qconfig():
"""Get the current quantization configuration."""
return _quantize._GetCurrentQConfig()
def qconfig(**kwargs):
"""Configure the quantization behavior by setting config variables.
Parameters
---------
nbit_dict: dict of QAnnotateKind -> int
Number of bit for every kind of annotate field.
calibrate_mode: str
The calibration mode. 'global_scale' or 'kl_divergence'.
global_scale: use global scale
kl_divergence: find scales by kl divergence on the dataset.
global_scale: float
The global scale for calibration.
weight_scale: str
The way to calculate scales for weights (annotated with QAnnotateKind.WEIGHT).
power2: Find the maximum of the absolute value of the tensor, and then round up to power
of two.
max: Find the maximum of the absolute value of the tensor
skip_dense_layer: boolean
Whether to skip all nn.dense layer type. By default are skipped.
skip_conv_layers: list
Specifying which layers to be skipped. Provide a list of indices
that indicate which conv2d layers to leave untouched. Start from 0.
do_simulation: boolean
Whether to do simulation with float operation only.
round_for_shift: boolean
Whether to add bias for rounding during shift.
debug_enabled_ops: None or list of str
Partially quantize specified operators for debugging. The default value
is None, which means will try to call all operartors' annotate rewrite
function.
rounding: "UPWARD" or "TONEAREST"
Rounding direction for fixed point multiplications.
partition_conversions: 'disabled', 'enabled', or 'fully_integral'
If set to 'enabled' or 'fully_integral', partitions a quantized
result into a module containing
a prefix function (consisting of input conversion into the quantized data space),
a middle function (consisting of the core quantized network),
a suffix function (consisting of output dequantization),
and a main function (that calls the prefix, middle, and suffix functions in succession).
If set to 'fully_integral' and there are unquantized operators in the result,
an exception is raised.
The default value is 'disabled'.
Returns
-------
config: QConfig
The quantization configuration
"""
node_args = {k: v if k not in kwargs else kwargs[k] for k, v in QConfig._node_defaults.items()}
return tvm.ir.make_node("relay.quantize.QConfig", **node_args)
class QuantizeContext(object):
"""An internal used global context object for annotation,
for putting some state variables like `conv2d_counter`."""
Current = None
def __init__(self):
self.qnode_map = dict()
self._conv2d_counter = 0
self._stop_quantize = False
def check_to_skip(self, ref_call):
"""Check the index of conv2d layer to decide whether to
skip the current operator."""
if self._stop_quantize:
return True
if current_qconfig().skip_conv_layers is not None:
# check skip conv layers
skipped_indices = [int(x) for x in current_qconfig().skip_conv_layers]
if self._conv2d_counter in skipped_indices and ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return True
if ref_call.op.name == "nn.conv2d":
self._conv2d_counter += 1
return False
def stop_quantize(self):
self._stop_quantize = True
def reset(self):
self._conv2d_counter = 0
self._stop_quantize = False
def __enter__(self):
self.reset()
return self
def __exit__(self, ptype, value, traceback):
pass
def quantize_context():
"""Get the global singleton scope"""
if QuantizeContext.Current is None:
QuantizeContext.Current = QuantizeContext()
return QuantizeContext.Current
def partition():
"""Partition graph into small low-precision sections by `cast_hint` and
`stop_fusion`.
Returns
-------
ret: tvm.transform.Pass
The registered pass for VTA rewrite.
"""
return _quantize.QuantizePartition()
def annotate():
"""Given a float32 graph, this pass will rewrite the graph and return
a graph which simulates the error brought by the current quantization
scheme.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization annotation.
"""
return _quantize.QuantizeAnnotate()
def realize():
"""The realize pass will transform the simulated quantized graph, which
actually computes with float32, to a real low-bit integer graph. It will
replace the `simulated_quantize` with several fine-grained operators like
add, multiply, and shift as much as possible for better performance.
Returns
-------
ret: tvm.transform.Pass
The registered pass for quantization realization.
"""
return _quantize.QuantizeRealize()
def _bind_params(func, params):
"""Bind the params to the expression."""
name_dict = {}
for arg in func.params:
name = arg.name_hint
if name in name_dict:
name_dict[name] = None
else:
name_dict[name] = arg
bind_dict = {}
for k, v in params.items():
if k not in name_dict:
continue
arg = name_dict[k]
if arg is None:
raise ValueError("Multiple args in the function have name %s" % k)
bind_dict[arg] = _expr.const(v)
return _expr.bind(func, bind_dict)
def prerequisite_optimize(mod, params=None):
"""Prerequisite optimization passes for quantization. Perform
"SimplifyInference", "FoldScaleAxis", "FoldConstant", and
"CanonicalizeOps" optimization before quantization."""
optimize = tvm.transform.Sequential(
[
_transform.SimplifyInference(),
_transform.FoldConstant(),
_transform.FoldScaleAxis(),
_transform.CanonicalizeOps(),
_transform.FoldConstant(),
]
)
if params:
mod["main"] = _bind_params(mod["main"], params)
mod = optimize(mod)
return mod
def quantize(mod, params=None, dataset=None):
"""The quantization procedure. Before running the three main
procedure of quantization, "annotate", "calibrate" and "realize"
, we need to do "SimplifyInference", "FoldScaleAxis", "FoldConstant"
first for optimizing.
Parameters
---------
mod: Module
The original module.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
dataset: list of dict of Var -> NDArray
The calibration dataset.
Returns
-------
ret: Function
The graph after quantization
"""
mod = prerequisite_optimize(mod, params)
calibrate_pass = tvm.transform.module_pass(
calibrate(dataset), opt_level=1, name="QuantizeCalibrate"
)
quant_passes = [partition(), annotate(), calibrate_pass, tvm.relay.transform.InferType()]
if not current_qconfig().do_simulation:
quant_passes.append(realize())
quant_passes.append(_transform.FoldConstant())
quantize_seq = tvm.transform.Sequential(quant_passes)
with tvm.transform.PassContext(
opt_level=3, required_pass=["QuantizeAnnotate", "QuantizeCalibrate", "QuantizeRealize"]
):
with quantize_context():
mod = quantize_seq(mod)
q_cfg = current_qconfig()
assert q_cfg.partition_conversions in ["disabled", "enabled", "fully_integral"]
if q_cfg.partition_conversions != "disabled":
quantized_dtypes = {q_cfg.dtype_input, q_cfg.dtype_weight, q_cfg.dtype_activation}
ensure_fully_integral = q_cfg.partition_conversions == "fully_integral"
return partition_conversions(mod, quantized_dtypes, ensure_fully_integral)
return mod
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/scope_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The scope builder interface."""
from __future__ import absolute_import
from . import ty as _ty
from . import expr as _expr
from .._ffi import base as _base
class WithScope(object):
"""A wrapper for builder methods which introduce scoping.
Parameters
----------
enter_value: object
The value returned by enter.
"""
def __init__(self, enter_value, exit_cb):
self._enter_value = enter_value
self._exit_cb = exit_cb
def __enter__(self):
return self._enter_value
def __exit__(self, ptype, value, trace):
if value:
raise value
self._exit_cb()
def _make_lets(bindings, ret_value):
"""Make a nested let expressions.
Parameters
----------
bindings: List[Tuple[tvm.relay.Var,tvm.relay.Expr]]
The sequence of let bindings
ret_value: tvm.relay.Expr
The final value of the expression.
Returns
-------
lets: tvm.relay.Expr
A nested let expression.
"""
if ret_value is None:
raise RuntimeError("ret is not called in this scope")
if isinstance(ret_value, _expr.If) and ret_value.false_branch is None:
raise RuntimeError("Creating an If expression without else.")
let_expr = ret_value
for var, value in reversed(bindings):
let_expr = _expr.Let(var, value, let_expr)
return let_expr
class ScopeBuilder(object):
"""Scope builder class.
Enables users to build up a nested
scope(let, if) expression easily.
Examples
--------
.. code-block: python
sb = relay.ScopeBuilder()
cond = relay.var("cond", 'bool')
x = relay.var("x")
y = relay.var("y")
with sb.if_scope(cond):
one = relay.const(1, "float32")
t1 = sb.let(t1, relay.add(x, one))
sb.ret(t1)
with sb.else_scope():
sb.ret(y)
print(sb.get().astext())
"""
def __init__(self):
self._bindings = [[]]
self._ret_values = [None]
def _enter_scope(self):
self._bindings.append([])
self._ret_values.append(None)
def _exit_scope(self):
bindings = self._bindings.pop()
ret_value = self._ret_values.pop()
return bindings, ret_value
def let(self, var, value):
"""Create a new let binding.
Parameters
----------
var: Union[Tuple[str, relay.Type], tvm.relay.Var]
The variable or name of variable.
value: tvm.relay.Expr
The value to be bound
"""
if isinstance(var, (tuple, list)):
if len(var) > 2:
raise ValueError("Expect var to be Tuple[str, relay.Type]")
var = _expr.var(*var)
elif isinstance(var, _base.string_types):
var = _expr.var(var)
self._bindings[-1].append((var, value))
return var
def if_scope(self, cond):
"""Create a new if scope.
Parameters
----------
cond: tvm.relay.expr.Expr
The condition
Returns
-------
scope: WithScope
The if scope.
Note
----
The user must follows with an else scope.
"""
self._enter_scope()
def _on_exit():
bindings, ret_value = self._exit_scope()
if self._ret_values[-1] is not None:
raise RuntimeError("result already returned before if scope")
true_branch = _make_lets(bindings, ret_value)
self._ret_values[-1] = _expr.If(cond, true_branch, None)
return WithScope(None, _on_exit)
def else_scope(self):
"""Create a new else scope.
Returns
-------
scope: WithScope
The if scope.
"""
self._enter_scope()
def _on_exit():
bindings, ret_value = self._exit_scope()
partial_if = self._ret_values[-1]
no_else = not isinstance(partial_if, _expr.If) or partial_if.false_branch is not None
if no_else:
raise RuntimeError("else scope must follows")
false_branch = _make_lets(bindings, ret_value)
self._ret_values[-1] = _expr.If(partial_if.cond, partial_if.true_branch, false_branch)
return WithScope(None, _on_exit)
def type_of(self, expr):
"""
Compute the type of an expression.
Parameters
----------
expr: relay.Expr
The expression to compute the type of.
"""
if isinstance(expr, _expr.Var):
return expr.type_annotation
ity = _ty.IncompleteType()
var = _expr.var("unify", ity)
self.let(var, expr)
return ity
def ret(self, value):
"""Set the return value of this scope.
Parameters
----------
value: tvm.relay.expr.Expr
The return value.
"""
if self._ret_values[-1] is not None:
raise RuntimeError("ret value is already set in this scope.")
self._ret_values[-1] = value
def get(self):
"""Get the generated result.
Returns
-------
value: tvm.relay.expr.Expr
The final result of the expression.
"""
if len(self._bindings) != 1:
raise RuntimeError("can only call get at the outmost scope")
return _make_lets(self._bindings[-1], self._ret_values[-1])
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utilities for testing and benchmarks"""
from __future__ import absolute_import as _abs
import collections
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import op
from tvm.relay.prelude import Prelude
from tvm.testing import enabled_targets
from . import mlp
from . import resnet
from . import resnet_3d
from . import dqn
from . import dcgan
from . import mobilenet
from . import lstm
from . import inception_v3
from . import squeezenet
from . import vgg
from . import densenet
from . import yolo_detection
from . import temp_op_attr
from . import synthetic
from .init import create_workload
from .nat import count, make_nat_value, make_nat_expr
from .py_converter import to_python, run_as_python
from ..transform import gradient
def run_opt_pass(expr, opt_pass, import_prelude=False):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
if import_prelude:
Prelude(mod)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def run_infer_type(expr):
return run_opt_pass(expr, relay.transform.InferType())
def _np_randn_from_type(t, scale=1, mean=0):
res = mean + (scale * np.random.randn(*(int(d) for d in t.shape)))
# if t.shape == (), then randn returns a scalar so we need to wrap for dtype conversion
if np.isscalar(res):
res = np.array(res)
return res.astype(t.dtype)
def check_grad(
func,
inputs=None,
test_inputs=None,
eps=1e-6,
atol=1e-5,
rtol=1e-3,
scale=None,
mean=0,
mode="higher_order",
target_devices=None,
executor_kind="debug",
):
"""Perform numerical gradient checking given a relay function.
Compare analytical gradients to numerical gradients derived from two-sided approximation. Note
that this test may fail if your function input types are not of high enough precision.
Parameters
----------
func : tvm.relay.Function
The relay function to test.
inputs: List[np.array]
Optional user-provided input parameters to use. If not given, will generate random normal
inputs scaled to be close to the chosen epsilon value to avoid numerical precision loss.
test_inputs: List[np.array]
The inputs to test for gradient matching. Useful in cases where some inputs are not
differentiable, such as symbolic inputs to dynamic ops. If not given, all inputs are
tested.
eps: float
The epsilon value to use for computing numerical gradient approximation.
atol: float
The absolute tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps and inputs.
rtol: float
The relative tolerance on difference between numerical and analytical gradients. Note that
this needs to be scaled appropriately relative to the chosen eps.
scale: float
The standard deviation of the inputs.
mean: float
The mean of the inputs.
target_devices: Optional[List[Tuple[tvm.target.Target, tvm.runtime.Device]]]
A list of targets/devices on which the gradient should be
tested. If not specified, will default to `tvm.testing.enabled_targets()`.
"""
fwd_func = run_infer_type(func)
bwd_func = run_infer_type(gradient(fwd_func, mode=mode))
bwd_func = run_opt_pass(bwd_func, relay.transform.Legalize())
if scale is None:
scale = 10 * eps
if inputs is None:
params = fwd_func.params
# Generate random inputs on the same scale as epsilon to avoid numerical precision loss.
inputs = [_np_randn_from_type(x.checked_type, scale=scale, mean=mean) for x in params]
if test_inputs is None:
test_inputs = inputs
if target_devices is None:
target_devices = enabled_targets()
for target, dev in target_devices:
# Eval the backward and forward functions
# TODO(mbs): Evaluate a pair of functions so can share preparation between them.
bwd_func_compiled = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)
fwd_func_compiled = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(fwd_func)
# Get analytic gradients.
_, grads = bwd_func_compiled(*inputs)
grads = [grad.numpy().astype("float64") for grad in grads]
# Throw out gradients we aren't testing
if inputs != test_inputs:
tmp = []
# find the gradient that corresponds to every test input
for test_input in test_inputs:
for i, grad in enumerate(grads):
if inputs[i] is test_input:
tmp.append(grad)
break
grads = tmp
assert len(grads) > 0, "You must test at least one gradient."
# Get numeric gradients for each dimension of each param, using two-sided approximation.
approx_grads = []
for x in test_inputs:
approx_grad = np.zeros(x.shape)
for i in np.ndindex(*x.shape):
x_i = x[i]
x[i] = x_i + eps
fwd_plus = fwd_func_compiled(*inputs).numpy().astype("float64")
x[i] = x_i - eps
fwd_minus = fwd_func_compiled(*inputs).numpy().astype("float64")
x[i] = x_i
approx_grad[i] = np.sum((fwd_plus - fwd_minus) / (2 * eps))
approx_grads.append(approx_grad)
# Compare gradients by checking that relative difference is below tolerance.
for grad, approx_grad in zip(grads, approx_grads):
np.testing.assert_allclose(grad, approx_grad, atol=atol, rtol=rtol)
def rand(dtype, *shape):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def count_ops(expr):
"""count number of times a given op is called in the graph"""
class OpCounter(tvm.relay.ExprVisitor):
"""OpCounter"""
def visit_call(self, call):
if hasattr(call, "op"):
self.node_counter[call.op.name] += 1
return super().visit_call(call)
def count(self, expr):
self.node_set = {}
self.node_counter = collections.Counter()
self.visit(expr)
return self.node_counter
return OpCounter().count(expr)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/byoc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines test utilties useful for testing BYOC flows."""
from tvm import relay
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.op.annotation import compiler_begin, compiler_end
class CcompilerAnnotator(ExprMutator):
"""
This is used to create external functions for ccompiler.
A simple annotator that creates the following program:
|
-- begin --
|
add
|
subtract
|
multiply
|
-- end --
|
"""
def __init__(self):
super(CcompilerAnnotator, self).__init__()
self.in_compiler = 0
def visit_call(self, call):
if call.op.name == "add": # Annotate begin at args
if self.in_compiler == 1:
lhs = compiler_begin(super().visit(call.args[0]), "ccompiler")
rhs = compiler_begin(super().visit(call.args[1]), "ccompiler")
op = relay.add(lhs, rhs)
self.in_compiler = 2
return op
elif call.op.name == "subtract":
if self.in_compiler == 1:
lhs = super().visit(call.args[0])
rhs = super().visit(call.args[1])
if isinstance(lhs, relay.expr.Var):
lhs = compiler_begin(lhs, "ccompiler")
if isinstance(rhs, relay.expr.Var):
rhs = compiler_begin(rhs, "ccompiler")
return relay.subtract(lhs, rhs)
elif call.op.name == "multiply": # Annotate end at output
self.in_compiler = 1
lhs = super().visit(call.args[0])
rhs = super().visit(call.args[1])
if isinstance(lhs, relay.expr.Var):
lhs = compiler_begin(lhs, "ccompiler")
if isinstance(rhs, relay.expr.Var):
rhs = compiler_begin(rhs, "ccompiler")
op = relay.multiply(lhs, rhs)
if self.in_compiler == 2:
op = compiler_end(op, "ccompiler")
self.in_compiler = 0
return op
return super().visit_call(call)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/darknet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init
"""
Compile DarkNet Models
====================
DarkNet helper functions for darknet model parsing and image loading.
This functions will not be loaded by default.
These are utility functions used for testing and tutorial file.
"""
from __future__ import division
from cffi import FFI
import numpy as np
import cv2
def convert_image(image):
"""Convert the image with numpy."""
imagex = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imagex = np.array(imagex)
imagex = imagex.transpose((2, 0, 1))
imagex = np.divide(imagex, 255.0)
imagex = np.flip(imagex, 0)
return imagex
def load_image_color(test_image):
"""To load the image using opencv api and do preprocessing."""
imagex = cv2.imread(test_image)
return convert_image(imagex)
def _letterbox_image(img, w_in, h_in):
"""To get the image in boxed format."""
imh, imw, imc = img.shape
if (w_in / imw) < (h_in / imh):
new_w = w_in
new_h = imh * w_in // imw
else:
new_h = h_in
new_w = imw * h_in // imh
dim = (new_w, new_h)
# Default interpolation method is INTER_LINEAR
# Other methods are INTER_AREA, INTER_NEAREST, INTER_CUBIC and INTER_LANCZOS4
# For more information see:
# https://docs.opencv.org/2.4/modules/imgproc/doc/geometric_transformations.html#resize
resized = cv2.resize(src=img, dsize=dim, interpolation=cv2.INTER_CUBIC)
resized = convert_image(resized)
boxed = np.full((imc, h_in, w_in), 0.5, dtype=float)
_, resizedh, resizedw = resized.shape
boxed[
:,
int((h_in - new_h) / 2) : int((h_in - new_h) / 2) + resizedh,
int((w_in - new_w) / 2) : int((w_in - new_w) / 2) + resizedw,
] = resized
return boxed
def load_image(img, resize_width, resize_height):
"""Load the image and convert to the darknet model format.
The image processing of darknet is different from normal.
Parameters
----------
image : string
The image file name with path
resize_width : integer
The width to which the image needs to be resized
resize_height : integer
The height to which the image needs to be resized
Returns
-------
img : Float array
Array of processed image
"""
imagex = cv2.imread(img)
return _letterbox_image(imagex, resize_width, resize_height)
class LAYERTYPE(object):
"""Darknet LAYERTYPE Class constant."""
CONVOLUTIONAL = 0
DECONVOLUTIONAL = 1
CONNECTED = 2
MAXPOOL = 3
SOFTMAX = 4
DETECTION = 5
DROPOUT = 6
CROP = 7
ROUTE = 8
COST = 9
NORMALIZATION = 10
AVGPOOL = 11
LOCAL = 12
SHORTCUT = 13
ACTIVE = 14
RNN = 15
GRU = 16
LSTM = 17
CRNN = 18
BATCHNORM = 19
NETWORK = 20
XNOR = 21
REGION = 22
YOLO = 23
REORG = 24
UPSAMPLE = 25
LOGXENT = 26
L2NORM = 27
BLANK = 28
class ACTIVATION(object):
"""Darknet ACTIVATION Class constant."""
LOGISTIC = 0
RELU = 1
RELIE = 2
LINEAR = 3
RAMP = 4
TANH = 5
PLSE = 6
LEAKY = 7
ELU = 8
LOGGY = 9
STAIR = 10
HARDTAN = 11
LHTAN = 12
__darknetffi__ = FFI()
__darknetffi__.cdef(
"""
typedef struct network network;
typedef struct layer layer;
typedef struct{
int *leaf;
int n;
int *parent;
int *child;
int *group;
char **name;
int groups;
int *group_size;
int *group_offset;
} tree;
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYERTYPE;
typedef enum{
SSE, MASKED, L1, SEG, SMOOTH, WGAN
} COSTTYPE;
struct layer{
LAYERTYPE type;
ACTIVATION activation;
COSTTYPE cost_type;
void (*forward);
void (*backward);
void (*update);
void (*forward_gpu);
void (*backward_gpu);
void (*update_gpu);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
int dontload;
int dontsave;
int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
tree *softmax_tree;
size_t workspace_size;
};
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} LEARNINGRATEPOLICY;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
LEARNINGRATEPOLICY policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
network *load_network(char *cfg, char *weights, int clear);
image letterbox_image(image im, int w, int h);
int resize_network(network *net, int w, int h);
void top_predictions(network *net, int n, int *index);
void free_image(image m);
image load_image_color(char *filename, int w, int h);
float *network_predict_image(network *net, image im);
float *network_predict(network *net, float *input);
network *make_network(int n);
layer make_convolutional_layer(
int batch,
int h, int w, int c, int n,
int groups, int size, int stride, int padding,
ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam);
layer make_connected_layer(int batch, int inputs, int outputs,
ACTIVATION activation, int batch_normalize, int adam);
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding);
layer make_avgpool_layer(int batch, int w, int h, int c);
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
layer make_batchnorm_layer(int batch, int w, int h, int c);
layer make_reorg_layer(
int batch, int w, int h, int c,
int stride, int reverse, int flatten, int extra);
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords);
layer make_softmax_layer(int batch, int inputs, int groups);
layer make_rnn_layer(int batch, int inputs, int outputs,
int steps, ACTIVATION activation, int batch_normalize, int adam);
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes);
layer make_crnn_layer(
int batch, int h, int w, int c,
int hidden_filters, int output_filters, int steps,
ACTIVATION activation, int batch_normalize);
layer make_lstm_layer(
int batch, int inputs, int outputs, int steps,
int batch_normalize, int adam);
layer make_gru_layer(int batch, int inputs,
int outputs, int steps, int batch_normalize, int adam);
layer make_upsample_layer(int batch, int w, int h, int c, int stride);
layer make_l2norm_layer(int batch, int inputs);
void free_network(network *net);
"""
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/dcgan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
Net of the generator of DCGAN
Adopted from:
https://github.com/tqchen/mxnet-gan/blob/main/mxgan/generator.py
Reference:
Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional generative adversarial networks."
arXiv preprint arXiv:1511.06434 (2015).
"""
from tvm import relay
from . import layers
from .init import create_workload
def deconv2d(data, ishape, oshape, kshape, layout, name, stride=(2, 2)):
"""a deconv layer that enlarges the feature map"""
target_shape = (oshape[-2], oshape[-1])
pad_y = (kshape[0] - 1) // 2
pad_x = (kshape[1] - 1) // 2
adj_y = (target_shape[0] + 2 * pad_y - kshape[0]) % stride[0]
adj_x = (target_shape[1] + 2 * pad_x - kshape[1]) % stride[1]
if layout == "NCHW":
kernel_layout = "IOHW"
elif layout == "NHWC":
kernel_layout = "HWOI"
else:
raise ValueError("Invalid layout: " + layout)
net = layers.conv2d_transpose(
data,
kernel_size=kshape,
strides=stride,
channels=oshape[0],
padding=(pad_y, pad_x),
output_padding=(adj_y, adj_x),
data_layout=layout,
kernel_layout=kernel_layout,
name=name,
)
return net
def deconv2d_bn_relu(data, prefix, **kwargs):
"""a block of deconv + batch norm + relu"""
eps = 1e-5 + 1e-12
net = deconv2d(data, name="%s_deconv" % prefix, **kwargs)
bn_axis = kwargs.get("layout", "NCHW").index("C")
net = layers.batch_norm_infer(
net, epsilon=eps, scale=False, axis=bn_axis, name="%s_batch_norm" % prefix
)
net = relay.nn.relu(net)
return net
def get_net(
batch_size,
random_len=100,
oshape=(3, 64, 64),
ngf=128,
code=None,
layout="NCHW",
dtype="float32",
):
"""get net of dcgan generator"""
assert oshape[-1] == 64, "Only support 64x64 image"
assert oshape[-2] == 64, "Only support 64x64 image"
code = relay.var("data", dtype=dtype, shape=(batch_size, random_len)) if code is None else code
dense_weight = relay.var("dense_weight")
dense = relay.nn.dense(code, weight=dense_weight, units=4 * 4 * ngf * 8)
relu = relay.nn.relu(dense)
# 4 x 4
if layout == "NCHW":
reshape = relay.reshape(relu, newshape=(-1, ngf * 8, 4, 4))
elif layout == "NHWC":
reshape = relay.reshape(relu, newshape=(-1, 4, 4, ngf * 8))
else:
raise ValueError("Invalid layout: " + layout)
# 8 x 8
dc8 = deconv2d_bn_relu(
reshape,
ishape=(ngf * 8, 4, 4),
oshape=(ngf * 4, 8, 8),
kshape=(4, 4),
layout=layout,
prefix="g2",
)
# 16x16
dc16 = deconv2d_bn_relu(
dc8,
ishape=(ngf * 4, 8, 8),
oshape=(ngf * 2, 16, 16),
kshape=(4, 4),
layout=layout,
prefix="g3",
)
# 32x32
dc32 = deconv2d_bn_relu(
dc16,
ishape=(ngf * 2, 16, 16),
oshape=(ngf, 32, 32),
kshape=(4, 4),
layout=layout,
prefix="g4",
)
# 64x64
dc64 = deconv2d(
dc32,
ishape=(ngf, 32, 32),
oshape=oshape[-3:],
kshape=(4, 4),
layout=layout,
name="g5_deconv",
)
tanh = relay.tanh(dc64)
args = relay.analysis.free_vars(tanh)
return relay.Function(args, tanh)
def get_workload(
batch_size, oshape=(3, 64, 64), ngf=128, random_len=100, layout="NCHW", dtype="float32"
):
"""Get benchmark workload for a DCGAN generator
Parameters
----------
batch_size : int
The batch size used in the model
oshape : tuple, optional
The shape of output image, layout="CHW"
ngf: int, optional
The number of final feature maps in the generator
random_len : int, optional
The length of random input
layout: str, optional
The layout of conv2d transpose
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a DCGAN network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, random_len, oshape=oshape, ngf=ngf, layout=layout, dtype=dtype)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/densenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, line-too-long
"""
Port of MxNet version of Densenet to Relay.
https://github.com/apache/incubator-mxnet/blob/master/python/mxnet/gluon/model_zoo/vision/densenet.py
"""
# pylint: enable=line-too-long
from tvm import relay
from . import layers
from .init import create_workload
def _make_dense_layer(data, growth_rate, bn_size, index):
"""Single densenet layer."""
bn1 = layers.batch_norm_infer(data, name="batch_1_%s" % index)
relu1 = relay.nn.relu(bn1)
conv1 = layers.conv2d(
relu1, channels=bn_size * growth_rate, kernel_size=(1, 1), name="conv2d_1_%s" % index
)
bn2 = layers.batch_norm_infer(conv1, name="batch_2_" + index)
relu2 = relay.nn.relu(bn2)
conv2 = layers.conv2d(
relu2, channels=growth_rate, kernel_size=(3, 3), padding=(1, 1), name="conv2d_2_%s" % index
)
return conv2
def _make_dense_block(data, num_layers, bn_size, growth_rate, index):
"""Makes a block of dense layers of the specified size."""
layer_out = data
blocks = []
for i in range(num_layers):
layer_out = _make_dense_layer(layer_out, growth_rate, bn_size, "%s_%s" % (index, i))
blocks.append(layer_out)
block_out = relay.concatenate(blocks, 1)
return block_out
def _make_transition(data, num_output_features, index):
"""Transition between layers."""
bn = layers.batch_norm_infer(data, name="batch_t_%s" % index)
relu = relay.nn.relu(bn)
conv = layers.conv2d(
relu, channels=num_output_features, kernel_size=(1, 1), name="conv_t_%s" % index
)
return relay.nn.avg_pool2d(conv, pool_size=(2, 2), strides=(2, 2))
def _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, data_dtype, bn_size=4, classes=1000
):
"""Builds up a densenet."""
data = relay.Var(
"data", relay.TensorType(data_shape, data_dtype)
) # (batch_size, 3, 224, 224)))
conv1 = layers.conv2d(
data,
channels=num_init_features,
kernel_size=(7, 7),
strides=(2, 2),
padding=(3, 3),
name="conv1",
)
bn1 = layers.batch_norm_infer(conv1, name="batch1")
relu1 = relay.nn.relu(bn1)
mp = relay.nn.max_pool2d(relu1, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
num_features = num_init_features
layer_out = mp
for i, num_layers in enumerate(block_config):
layer_out = _make_dense_block(layer_out, num_layers, bn_size, growth_rate, i)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
layer_out = _make_transition(layer_out, num_features // 2, i)
num_features = num_features // 2
bn2 = layers.batch_norm_infer(layer_out, name="batch2")
relu2 = relay.nn.relu(bn2)
avg = relay.nn.avg_pool2d(relu2, pool_size=(7, 7))
flat = relay.nn.batch_flatten(avg)
ret = layers.dense_add_bias(flat, units=classes, name="dense")
return relay.Function(relay.analysis.free_vars(ret), ret)
def get_workload(
densenet_size=121, classes=1000, batch_size=4, image_shape=(3, 224, 224), dtype="float32"
):
"""Gets benchmark workload for densenet.
Parameters
----------
densenet_size : int, optional (default 121)
Parameter for the network size. The supported sizes
are 121, 161, 169, and 201.
classes : int, optional (default 1000)
The number of classes.
batch_size : int, optional (detault 4)
The batch size for the network.
image_shape : shape, optional (default (3, 224, 224))
The shape of the input data.
dtype : data type, optional (default 'float32')
The data type of the input data.
Returns
-------
mod: tvm.IRModule
The relay module that contains a DenseNet network.
params : dict of str to NDArray
The benchmark paraeters.
"""
specs = {
121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (69, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32]),
}
bn_size = 4
num_init_features, growth_rate, block_config = specs[densenet_size]
data_shape = tuple([batch_size] + list(image_shape))
net = _make_dense_net(
num_init_features, growth_rate, block_config, data_shape, dtype, bn_size, classes
)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/dqn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Net of Nature DQN
Reference:
Mnih, Volodymyr, et al. "Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529.
"""
from tvm import relay
from . import layers
from .init import create_workload
def get_net(batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="float32", layout="NCHW"):
"""get symbol of nature dqn"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
bias_axis = layout.index("C")
conv1_bias = relay.var("conv1_bias")
conv1 = layers.conv2d(
data,
kernel_size=(8, 8),
strides=(4, 4),
padding=(0, 0),
channels=32,
name="conv1",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv1 = relay.nn.bias_add(conv1, conv1_bias, bias_axis)
relu1 = relay.nn.relu(conv1)
conv2_bias = relay.var("conv2_bias")
conv2 = layers.conv2d(
relu1,
kernel_size=(4, 4),
strides=(2, 2),
padding=(0, 0),
channels=64,
name="conv2",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv2 = relay.nn.bias_add(conv2, conv2_bias, bias_axis)
relu2 = relay.nn.relu(conv2)
conv3_bias = relay.var("conv3_bias")
conv3 = layers.conv2d(
relu2,
kernel_size=(3, 3),
strides=(1, 1),
padding=(0, 0),
channels=64,
name="conv3",
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
)
conv3 = relay.nn.bias_add(conv3, conv3_bias, bias_axis)
relu3 = relay.nn.relu(conv3)
bf1 = relay.nn.batch_flatten(relu3)
dense1 = layers.dense_add_bias(bf1, units=512, name="dense1")
relu4 = relay.nn.relu(dense1)
dense2 = layers.dense_add_bias(relu4, units=num_actions, name="dense2")
args = relay.analysis.free_vars(dense2)
return relay.Function(args, dense2)
def get_workload(
batch_size, num_actions=18, image_shape=(4, 84, 84), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for a Deep Q Network
Parameters
----------
batch_size : int
The batch size used in the model
num_actions : int, optional
Number of actions
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a DQN network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size, num_actions=num_actions, image_shape=image_shape, dtype=dtype, layout=layout
)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/inception_v3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision."
arXiv preprint arXiv:1512.00567 (2015).
Adopted from https://github.com/apache/incubator-mxnet/blob/master/
example/image-classification/symbols/inception-v3.py
"""
# pylint: disable=invalid-name,missing-docstring,unused-argument
from tvm import relay
from .init import create_workload
from . import layers
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=""):
conv = layers.conv2d(
data=data,
channels=int(num_filter),
kernel_size=kernel,
strides=stride,
padding=pad,
name="%s%s_conv1" % (name, suffix),
)
bn = layers.batch_norm_infer(
data=conv, epsilon=2e-5, scale=False, name="%s%s_bn" % (name, suffix)
)
act = relay.nn.relu(data=bn)
return act
def Pooling(data, kernel, stride, pad, pool_type, name):
if pool_type == "max":
return relay.nn.max_pool2d(data=data, pool_size=kernel, strides=stride, padding=pad)
if pool_type == "avg":
return relay.nn.avg_pool2d(
data=data, pool_size=kernel, strides=stride, padding=pad, count_include_pad=True
)
raise ValueError("Invalid pooling type: " + pool_type)
def Inception7A(
data, num_1x1, num_3x3_red, num_3x3_1, num_3x3_2, num_5x5_red, num_5x5, pool, proj, name
):
tower_1x1 = Conv(data, num_1x1, name=("%s_conv" % name))
tower_5x5 = Conv(data, num_5x5_red, name=("%s_tower" % name), suffix="_conv")
tower_5x5 = Conv(
tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=("%s_tower" % name), suffix="_conv_1"
)
tower_3x3 = Conv(data, num_3x3_red, name=("%s_tower_1" % name), suffix="_conv")
tower_3x3 = Conv(
tower_3x3,
num_3x3_1,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_3x3 = Conv(
tower_3x3,
num_3x3_2,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(pooling, proj, name=("%s_tower_2" % name), suffix="_conv")
concat = relay.concatenate((tower_1x1, tower_5x5, tower_3x3, cproj), axis=1)
return concat
# First Downsample
def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, num_d3x3_2, pool, name):
tower_3x3 = Conv(
data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=("%s_conv" % name)
)
tower_d3x3 = Conv(data, num_d3x3_red, name=("%s_tower" % name), suffix="_conv")
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_1,
kernel=(3, 3),
pad=(1, 1),
stride=(1, 1),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_2,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=("%s_tower" % name),
suffix="_conv_2",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pad=(0, 0),
pool_type="max",
name=("max_pool_%s_pool" % name),
)
concat = relay.concatenate((tower_3x3, tower_d3x3, pooling), axis=1)
return concat
def Inception7C(
data,
num_1x1,
num_d7_red,
num_d7_1,
num_d7_2,
num_q7_red,
num_q7_1,
num_q7_2,
num_q7_3,
num_q7_4,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=("%s_conv" % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=("%s_tower" % name), suffix="_conv")
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower" % name),
suffix="_conv_2",
)
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=("%s_tower_1" % name), suffix="_conv")
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_1,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_2,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_3,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_3",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_4,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_4",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % name), suffix="_conv"
)
# concat
concat = relay.concatenate((tower_1x1, tower_d7, tower_q7, cproj), axis=1)
return concat
def Inception7D(
data, num_3x3_red, num_3x3, num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3, pool, name
):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=("%s_tower" % name), suffix="_conv")
tower_3x3 = Conv(
data=tower_3x3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=data, num_filter=num_d7_3x3_red, name=("%s_tower_1" % name), suffix="_conv"
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_3x3,
kernel=(3, 3),
stride=(2, 2),
name=("%s_tower_1" % name),
suffix="_conv_3",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pool_type=pool,
pad=(0, 0),
name=("%s_pool_%s_pool" % (pool, name)),
)
# concat
concat = relay.concatenate((tower_3x3, tower_d7_3x3, pooling), axis=1)
return concat
def Inception7E(
data,
num_1x1,
num_d3_red,
num_d3_1,
num_d3_2,
num_3x3_d3_red,
num_3x3,
num_3x3_d3_1,
num_3x3_d3_2,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=("%s_conv" % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=("%s_tower" % name), suffix="_conv")
tower_d3_a = Conv(
data=tower_d3,
num_filter=num_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=("%s_tower" % name),
suffix="_mixed_conv",
)
tower_d3_b = Conv(
data=tower_d3,
num_filter=num_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=("%s_tower" % name),
suffix="_mixed_conv_1",
)
tower_3x3_d3 = Conv(
data=data, num_filter=num_3x3_d3_red, name=("%s_tower_1" % name), suffix="_conv"
)
tower_3x3_d3 = Conv(
data=tower_3x3_d3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_3x3_d3_a = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=("%s_tower_1" % name),
suffix="_mixed_conv",
)
tower_3x3_d3_b = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=("%s_tower_1" % name),
suffix="_mixed_conv_1",
)
pooling = Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % name), suffix="_conv"
)
# concat
concat = relay.concatenate(
(tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj), axis=1
)
return concat
def get_net(batch_size, num_classes, image_shape, dtype):
"""Get network a Inception v3 network.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = Pooling(
data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool"
)
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = Pooling(
data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", pad=(0, 0), name="pool1"
)
# stage 3
in3a = Inception7A(pool1, 64, 64, 96, 96, 48, 64, "avg", 32, "mixed")
in3b = Inception7A(in3a, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384, 64, 96, 96, "max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192, "avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192, "avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320, 192, 192, 192, 192, "max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max", 192, "mixed_10")
# pool
pool = Pooling(
data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", pad=(0, 0), name="global_pool"
)
flatten = relay.nn.batch_flatten(pool)
fc1 = relay.nn.dense(flatten, relay.var("fc1_weight"), units=num_classes)
fc1 = relay.nn.bias_add(fc1, relay.var("fc2_bias"), axis=-1)
inception_v3 = relay.nn.softmax(data=fc1)
args = relay.analysis.free_vars(inception_v3)
return relay.Function(args, inception_v3)
def get_workload(batch_size=1, num_classes=1000, image_shape=(3, 299, 299), dtype="float32"):
"""Get benchmark workload for InceptionV3
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains an Inception V3 network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/init.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Initializer of parameters."""
from functools import reduce
import numpy as np
import tvm
from tvm import relay
class Initializer(object):
"""The base class of an initializer."""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __call__(self, desc, arr):
"""Initialize an array
Parameters
----------
desc : str
Initialization pattern descriptor.
arr : NDArray
The array to be initialized.
"""
if desc.endswith("weight"):
self._init_weight(desc, arr)
elif desc.endswith("bias"):
self._init_bias(desc, arr)
elif desc.endswith("gamma"):
self._init_gamma(desc, arr)
elif desc.endswith("beta"):
self._init_beta(desc, arr)
elif desc.endswith("mean"):
self._init_mean(desc, arr)
elif desc.endswith("var"):
self._init_var(desc, arr)
else:
self._init_default(desc, arr)
def _init_bias(self, _, arr):
arr[:] = 0.0
def _init_gamma(self, _, arr):
arr[:] = 1.0
def _init_beta(self, _, arr):
arr[:] = 0.0
def _init_mean(self, _, arr):
arr[:] = 0.0
def _init_var(self, _, arr):
arr[:] = 1.0
def _init_weight(self, name, arr):
"""Abstract method to Initialize weight."""
raise NotImplementedError("Must override it")
def _init_default(self, name, _):
raise ValueError(
"Unknown initialization pattern for %s. "
"Default initialization is now limited to "
'"weight", "bias", "gamma" (1.0), and "beta" (0.0).'
"Please use mx.sym.Variable(init=mx.init.*) to set initialization pattern" % name
)
class Xavier(Initializer):
""" "Xavier" initialization for weights
Parameters
----------
rnd_type: str, optional
Random generator type, can be ``'gaussian'`` or ``'uniform'``.
factor_type: str, optional
Can be ``'avg'``, ``'in'``, or ``'out'``.
magnitude: float, optional
Scale of random number.
"""
def __init__(self, rnd_type="uniform", factor_type="avg", magnitude=3):
super(Xavier, self).__init__(
rnd_type=rnd_type, factor_type=factor_type, magnitude=magnitude
)
self.rnd_type = rnd_type
self.factor_type = factor_type
self.magnitude = float(magnitude)
def _init_weight(self, name, arr):
shape = arr.shape
hw_scale = 1.0
if len(shape) < 2:
raise ValueError(
"Xavier initializer cannot be applied to vector {0}. It requires at"
" least 2D.".format(name)
)
if len(shape) > 2:
hw_scale = np.prod(shape[2:])
fan_in, fan_out = shape[1] * hw_scale, shape[0] * hw_scale
factor = 1.0
if self.factor_type == "avg":
factor = (fan_in + fan_out) / 2.0
elif self.factor_type == "in":
factor = fan_in
elif self.factor_type == "out":
factor = fan_out
else:
raise ValueError("Incorrect factor type")
# Hack for mobilenet, because there is less connectivity
if "depthwise" in name:
factor = hw_scale
scale = np.sqrt(self.magnitude / factor)
if self.rnd_type == "uniform":
arr[:] = np.random.uniform(-scale, scale, size=arr.shape)
else:
raise ValueError("Unknown random type")
class Constant(Initializer):
"""Constant initialization of weights. Sum of weights in the matrix is 1."""
def _init_weight(self, name, arr):
num_elements = reduce(lambda x, y: x * y, arr.shape)
arr[:] = 1.0 / num_elements
def create_workload(net, initializer=None, seed=0):
"""Helper function to create benchmark image classification workload.
Parameters
----------
net : tvm.relay.Function
The selected function of the network.
initializer : Initializer
The initializer used
seed : int
The seed used in initialization.
Returns
-------
mod : tvm.IRModule
The created relay module.
params : dict of str to NDArray
The parameters.
"""
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.InferType()(mod)
shape_dict = {v.name_hint: v.checked_type for v in mod["main"].params}
np.random.seed(seed)
initializer = initializer if initializer else Xavier()
params = {}
for k, v in shape_dict.items():
if k == "data":
continue
init_value = np.zeros(v.concrete_shape).astype(v.dtype)
initializer(k, init_value)
params[k] = tvm.nd.array(init_value, device=tvm.cpu(0))
return mod, params
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/layers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Simple Layer DSL wrapper to ease creation of neural nets."""
from tvm import relay
def batch_norm_infer(data, gamma=None, beta=None, moving_mean=None, moving_var=None, **kwargs):
"""Wrapper of batch_norm.
This function automatically creates weights and return
the first output(normalized result).
Parameters
----------
data : relay.Expr
The input expression.
gamma : relay.Expr
The gamma scale factor.
beta : relay.Expr
The beta offset factor.
moving_mean : relay.Expr
Running mean of input,
moving_var : relay.Expr
Running variance of input.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not gamma:
gamma = relay.var(name + "_gamma")
if not beta:
beta = relay.var(name + "_beta")
if not moving_mean:
moving_mean = relay.var(name + "_moving_mean")
if not moving_var:
moving_var = relay.var(name + "_moving_var")
return relay.nn.batch_norm(
data, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, **kwargs
)[0]
def conv2d(data, weight=None, **kwargs):
"""Wrapper of conv2d which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv2d(data, weight, **kwargs)
def conv3d(data, weight=None, **kwargs):
"""Wrapper of conv3d which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv3d.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv3d(data, weight, **kwargs)
def conv2d_transpose(data, weight=None, **kwargs):
"""Wrapper of conv2d_transpose which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d_transpose.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
return relay.nn.conv2d_transpose(data, weight, **kwargs)
def dense_add_bias(data, weight=None, bias=None, units=None, **kwargs):
"""Wrapper of dense which automatically creates weights if not given.
Parameters
----------
data : relay.Expr
The input expression.
weight : relay.Expr
The weight to conv2d.
bias : relay.Expr
The bias.
kwargs : dict
Additional arguments.
Returns
-------
result : relay.Expr
The result.
"""
name = kwargs.get("name")
kwargs.pop("name")
if not weight:
weight = relay.var(name + "_weight")
if not bias:
bias = relay.var(name + "_bias")
data = relay.nn.dense(data, weight, units, **kwargs)
data = relay.nn.bias_add(data, bias, axis=-1)
return data
def conv_kernel_layout(data_layout, is_depthwise=False):
"""Map the data layout to corresponding kernel layout.
Arbitrary layout is not fully supported in TOPI yet.
Parameters
----------
data_layout : str
The data_layout, can be 'NCHW', 'NHWC'.
is_depthwise : bool, optional
Whether the conv is a depthwise convolution.
Returns
-------
result : str
The corresponding kernel layout.
"""
conv_layout_map = {
"NCHW": "OIHW",
"NHWC": "HWIO",
}
depthwise_conv_layout_map = {
"NCHW": "OIHW",
"NHWC": "HWOI",
}
mapping = depthwise_conv_layout_map if is_depthwise else conv_layout_map
assert data_layout in mapping, "Unknown data layout %s" % data_layout
return mapping[data_layout]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Implementation of a Long Short-Term Memory (LSTM) cell.
Adapted from:
https://gist.github.com/merrymercy/5eb24e3b019f84200645bd001e9caae9
"""
from tvm import relay
from . import layers
from .init import create_workload
def lstm_cell(num_hidden, batch_size=1, dtype="float32", name=""):
"""Long-Short Term Memory (LSTM) network cell.
Parameters
----------
num_hidden : int
Number of units in output symbol.
batch_size : int
Batch size (length of states).
Returns
-------
result : tvm.relay.Function
A Relay function that evaluates an LSTM cell.
The function takes in a tensor of input data, a tuple of two
states, and weights and biases for dense operations on the
inputs and on the state. It returns a tuple with two members,
an output tensor and a tuple of two new states.
"""
builder = relay.ScopeBuilder()
input_type = relay.TensorType((batch_size, num_hidden), dtype)
weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
bias_type = relay.TensorType((4 * num_hidden,), dtype)
dense_type = relay.TensorType((batch_size, 4 * num_hidden), dtype)
slice_type = relay.TupleType([input_type, input_type, input_type, input_type])
ret_type = relay.TupleType([input_type, relay.TupleType([input_type, input_type])])
inputs = relay.Var("inputs", input_type)
states = relay.Var("states", relay.TupleType([input_type, input_type]))
i2h_weight = relay.Var("i2h_weight", weight_type)
i2h_bias = relay.Var("i2h_bias", bias_type)
h2h_weight = relay.Var("h2h_weight", weight_type)
h2h_bias = relay.Var("h2h_bias", bias_type)
i2h = builder.let(
("i2h", dense_type),
layers.dense_add_bias(
data=inputs, units=num_hidden * 4, weight=i2h_weight, bias=i2h_bias, name="%si2h" % name
),
)
h2h = builder.let(
("h2h", dense_type),
layers.dense_add_bias(
data=relay.TupleGetItem(states, 0),
units=num_hidden * 4,
weight=h2h_weight,
bias=h2h_bias,
name="%sh2h" % name,
),
)
gates = builder.let(("gates", dense_type), relay.add(i2h, h2h))
slice_gates = builder.let(
("slice_gates", slice_type), relay.split(gates, indices_or_sections=4, axis=1).astuple()
)
in_gate = builder.let(
("in_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 0))
)
forget_gate = builder.let(
("forget_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 1))
)
in_transform = builder.let(
("in_transform", input_type), relay.tanh(relay.TupleGetItem(slice_gates, 2))
)
out_gate = builder.let(
("out_gate", input_type), relay.sigmoid(relay.TupleGetItem(slice_gates, 3))
)
next_c = builder.let(
("next_c", input_type),
relay.add(
relay.multiply(forget_gate, relay.TupleGetItem(states, 1)),
relay.multiply(in_gate, in_transform),
),
)
next_h = builder.let(("next_h", input_type), relay.multiply(out_gate, relay.tanh(next_c)))
ret = builder.let(("ret", ret_type), relay.Tuple([next_h, relay.Tuple([next_h, next_c])]))
builder.ret(ret)
body = builder.get()
return relay.Function(
[inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias], body, ret_type
)
def get_net(iterations, num_hidden, batch_size=1, dtype="float32"):
"""Constructs an unrolled RNN with LSTM cells"""
input_type = relay.TensorType((batch_size, num_hidden), dtype)
weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
bias_type = relay.TensorType((4 * num_hidden,), dtype)
state_type = relay.TupleType([input_type, input_type])
cell_type = relay.TupleType([input_type, state_type])
builder = relay.ScopeBuilder()
zeros = builder.let(("zeros", input_type), relay.zeros((batch_size, num_hidden), dtype))
init_states = builder.let(("init_states", state_type), relay.Tuple([zeros, zeros]))
states = init_states
out = None
for i in range(iterations):
inputs = relay.Var("data", input_type)
i2h_weight = relay.Var("i2h_%s_weight" % i, weight_type)
i2h_bias = relay.Var("i2h_%i_bias" % i, bias_type)
h2h_weight = relay.Var("h2h_%s_weight" % i, weight_type)
h2h_bias = relay.Var("h2h_%s_bias" % i, bias_type)
cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_%s" % i)
call = builder.let(
("call_%s" % i, cell_type),
relay.Call(cell_fn, [inputs, states, i2h_weight, i2h_bias, h2h_weight, h2h_bias]),
)
new_out = builder.let(("out_%s" % i, input_type), relay.TupleGetItem(call, 0))
new_states = builder.let(("states_%s" % i, state_type), relay.TupleGetItem(call, 1))
states = new_states
out = new_out
builder.ret(out)
body = builder.get()
args = relay.analysis.free_vars(body)
return relay.Function(args, body, input_type)
def get_workload(iterations, num_hidden, batch_size=1, dtype="float32"):
"""Get benchmark workload for an LSTM RNN.
Parameters
----------
iterations : int
The number of iterations in the desired LSTM RNN.
num_hidden : int
The size of the hiddxen state
batch_size : int, optional (default 1)
The batch size used in the model
dtype : str, optional (default "float32")
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a LSTM network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(iterations, num_hidden, batch_size, dtype)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/mlp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
from __future__ import absolute_import
from tvm import relay
from .init import create_workload
def get_net(batch_size, num_classes=10, image_shape=(1, 28, 28), dtype="float32"):
"""Get network a simple multilayer perceptron.
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
data = relay.nn.batch_flatten(data)
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=128)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=64)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
fc3 = relay.nn.dense(act2, relay.var("fc3_weight"), units=num_classes)
fc3 = relay.nn.bias_add(fc3, relay.var("fc3_bias"), axis=-1)
mlp = relay.nn.softmax(data=fc3)
args = relay.analysis.free_vars(mlp)
return relay.Function(args, mlp)
def get_workload(batch_size, num_classes=10, image_shape=(1, 28, 28), dtype="float32"):
"""Get benchmark workload for a simple multilayer perceptron.
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a mlp network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, num_classes, image_shape, dtype)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/mobilenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Port of NNVM version of MobileNet to Relay.
"""
# pylint: disable=invalid-name
from tvm import relay
from . import layers
from .init import create_workload
def conv_block(
data,
name,
channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
):
"""Helper function to construct conv_bn-relu"""
# convolution + bn + relu
conv = layers.conv2d(
data=data,
channels=channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv",
)
bn = layers.batch_norm_infer(data=conv, epsilon=epsilon, name=name + "_bn")
act = relay.nn.relu(data=bn)
return act
def separable_conv_block(
data,
name,
depthwise_channels,
pointwise_channels,
kernel_size=(3, 3),
downsample=False,
padding=(1, 1),
epsilon=1e-5,
layout="NCHW",
dtype="float32",
):
"""Helper function to get a separable conv block"""
if downsample:
strides = (2, 2)
else:
strides = (1, 1)
# depthwise convolution + bn + relu
if layout == "NCHW":
wshape = (depthwise_channels, 1) + kernel_size
elif layout == "NHWC":
wshape = kernel_size + (depthwise_channels, 1)
else:
raise ValueError("Invalid layout: " + layout)
bn_axis = layout.index("C")
weight = relay.var(name + "_weight", shape=wshape, dtype=dtype)
conv1 = layers.conv2d(
data=data,
weight=weight,
channels=depthwise_channels,
groups=depthwise_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout, True),
name=name + "_depthwise_conv1",
)
bn1 = layers.batch_norm_infer(data=conv1, epsilon=epsilon, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
# pointwise convolution + bn + relu
conv2 = layers.conv2d(
data=act1,
channels=pointwise_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
data_layout=layout,
kernel_layout=layers.conv_kernel_layout(layout),
name=name + "_conv2",
)
bn2 = layers.batch_norm_infer(data=conv2, epsilon=epsilon, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
return act2
def mobile_net(
num_classes=1000,
data_shape=(1, 3, 224, 224),
dtype="float32",
alpha=1.0,
is_shallow=False,
layout="NCHW",
):
"""Function to construct a MobileNet"""
data = relay.var("data", shape=data_shape, dtype=dtype)
body = conv_block(data, "conv_block_1", int(32 * alpha), strides=(2, 2), layout=layout)
body = separable_conv_block(
body, "separable_conv_block_1", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype
)
body = separable_conv_block(
body,
"separable_conv_block_2",
int(64 * alpha),
int(128 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_3",
int(128 * alpha),
int(128 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_4",
int(128 * alpha),
int(256 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_5",
int(256 * alpha),
int(256 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_6",
int(256 * alpha),
int(512 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
if is_shallow:
body = separable_conv_block(
body,
"separable_conv_block_7",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_8",
int(1024 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
else:
for i in range(7, 12):
body = separable_conv_block(
body,
"separable_conv_block_%d" % i,
int(512 * alpha),
int(512 * alpha),
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_12",
int(512 * alpha),
int(1024 * alpha),
downsample=True,
layout=layout,
dtype=dtype,
)
body = separable_conv_block(
body,
"separable_conv_block_13",
int(1024 * alpha),
int(1024 * alpha),
layout=layout,
dtype=dtype,
)
pool = relay.nn.global_avg_pool2d(data=body, layout=layout)
flatten = relay.nn.batch_flatten(data=pool)
weight = relay.var("fc_weight")
bias = relay.var("fc_bias")
fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)
fc = relay.nn.bias_add(fc, bias)
softmax = relay.nn.softmax(data=fc)
return relay.Function(relay.analysis.free_vars(softmax), softmax)
def get_workload(
batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW"
):
"""Get benchmark workload for mobilenet
Parameters
----------
batch_size : int, optional
The batch size used in the model
num_classes : int, optional
Number of classes
image_shape : tuple, optional
The input image shape, cooperate with layout
dtype : str, optional
The data type
layout : str, optional
The data layout of image_shape and the operators
cooperate with image_shape
Returns
-------
mod : tvm.IRModule
The relay module that contains a MobileNet network.
params : dict of str to NDArray
The parameters.
"""
data_shape = tuple([batch_size] + list(image_shape))
net = mobile_net(
num_classes=num_classes,
data_shape=data_shape,
dtype=dtype,
alpha=1.0,
is_shallow=False,
layout=layout,
)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/nat.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Defines a unary natural number (Peano natural number) abstract
data type for Relay and provides some utility functions for it.
Nats are useful for testing purposes, as they make it easy to write
test cases for recursion and pattern matching."""
from tvm.relay.backend.interpreter import ConstructorValue
def get_type(prelude, name):
ty_var = prelude.mod.get_global_type_var(name)
ty_data = prelude.mod.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
def count(prelude, n):
"""Takes a ConstructorValue corresponding to a nat ADT
and converts it into a Python integer. This is an example of
using an ADT value in Python.
"""
assert isinstance(n, ConstructorValue)
_, z, s = prelude.mod.get_type("nat")
if n.tag == z.tag:
return 0
assert n.tag == s.tag
return 1 + count(prelude, n.fields[0])
def make_nat_value(prelude, n):
"""The inverse of count(): Given a non-negative Python integer,
constructs a ConstructorValue representing that value as a nat.
"""
_, z, s = prelude.mod.get_type("nat")
if n == 0:
return ConstructorValue(z.tag, [], z)
return ConstructorValue(s.tag, [make_nat_value(prelude, n - 1)], s)
def make_nat_expr(prelude, n):
"""Given a non-negative Python integer, constructs a Python
expression representing that integer's value as a nat.
"""
assert n >= 0
_, z, s = prelude.mod.get_type("nat")
ret = z()
while n > 0:
ret = s(ret)
n = n - 1
return ret
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/py_converter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return
"""Utility for converting Relay code into a Python script with equivalent semantics"""
import sys
import ast
from ast import alias, Assign, Load, Name, NameConstant, Num, Return, Store, Str
import re
import tvm
from tvm import relay
from tvm.relay.adt import Pattern
from tvm.relay.backend import te_compiler
from tvm.relay.expr import Expr, GlobalVar, Var
from tvm.relay.function import Function
from tvm.relay.expr_functor import ExprFunctor
__MAJOR__, __MINOR__, _, _, _ = sys.version_info
OUTPUT_VAR_NAME = "_py_out"
# corresponds to:
# import numpy
# import tvm
# from tvm import relay
# from tvm import nd
# from tvm.runtime import import container as _container
# from tvm.relay.backend.interpreter import RefValue, ConstructorValue
PROLOGUE = [
ast.Import([alias("numpy", None)]),
ast.Import([alias("tvm", None)]),
ast.ImportFrom("tvm", [alias("relay", None)], 0),
ast.ImportFrom("tvm", [alias("nd", None)], 0),
ast.ImportFrom("tvm.runtime", [alias("container", "_container")], 0),
ast.ImportFrom(
"tvm.relay.backend.interpreter",
[alias("RefValue", None), alias("ConstructorValue", None)],
0,
),
]
class PythonConverter(ExprFunctor):
"""Functor for translating Relay programs into Python ASTs."""
def __init__(self, mod, target) -> None:
super().__init__()
self.mod = mod
self.tgt = target
self.tec = te_compiler.get()
self.fun_no = 0
self.var_no = 0
self.var_map = {}
def convert(self, prog: Expr):
"""This method converts the passed Relay expression into a Python
AST object with equivalent semantics.
The Python AST can be executed using exec(); it can be turned
into text and inspected using astor.
"""
optimized = self.optimize(prog)
# start with conversion prelude (imports) and convert global defs
body = []
body += PROLOGUE
body += self.convert_module()
prog_body, extra_defs = self.visit(optimized)
body += extra_defs
# we finally must assign the final expression to the output var
# so it can be read after running EXEC
body.append(Assign([Name(OUTPUT_VAR_NAME, Store())], prog_body))
global __MAJOR__, __MINOR__
if __MAJOR__ == 3 and __MINOR__ == 8:
return ast.fix_missing_locations(ast.Module(body=body, type_ignores=[]))
else:
return ast.fix_missing_locations(ast.Module(body=body))
def optimize(self, prog: Expr):
"""Performs optimizations necessary to be able to generate code for prog."""
# unwrap tuple wrappers (some op calls produce them)
unwrapped = prog.astuple() if isinstance(prog, relay.TupleWrapper) else prog
assert relay.analysis.well_formed(unwrapped)
mod = self.mod.from_expr(unwrapped, self.mod.functions, self.mod.type_definitions)
# necessary pass: SimplifyInference (otherwise we can't generate code for some operators)
# and fusion (to get primitive functions)
opts = tvm.transform.Sequential(
[relay.transform.SimplifyInference(), relay.transform.FuseOps(fuse_opt_level=0)]
)
mod = opts(mod)
optimized = mod["main"]
return optimized if isinstance(unwrapped, Function) else optimized.body
def sanitize(self, name: str) -> str:
"""Removes any invalid characters (only underscores, numbers, and letters permitted)
from the given name. Since we append a number and underscore to var names anyway,
it doesn't matter if the name is the empty string."""
return re.sub(r"\W", "", name)
def generate_var_name(self, name_hint: str) -> str:
"""Generates a unique variable name starting from the hint."""
name = "{}_var_{}".format(self.sanitize(name_hint), self.var_no)
self.var_no += 1
return name
def generate_function_name(self, name_hint: str) -> str:
"""Generates a unique function name starting from the hint."""
name = "{}_fun_{}".format(self.sanitize(name_hint), self.fun_no)
self.fun_no += 1
return name
def get_var_name(self, var: Expr) -> str:
"""Returns the var name for the given Realy variable."""
if var in self.var_map:
return self.var_map[var]
name = self.generate_var_name(var.name_hint)
self.var_map[var] = name
return name
def include_var(self, var: Expr, assign=False):
"""Returns a variable AST node for the given Relay var depending on
whether it must appear in an assignment or not."""
name = self.get_var_name(var)
return Name(name, Store() if assign else Load())
def parse_name(self, name: str):
"""Given the name of a Python method with dots (e.g., 'relay.var'),
returns an appropriate AST object corresponding to that name."""
attributes = name.split(".")
ret = Name(attributes[0], Load())
for i in range(len(attributes) - 1):
ret = ast.Attribute(ret, attributes[i + 1], Load())
return ret
def parse_numpy_array(self, arr):
"""Given a Numpy array, produces an appropriate Python array
or numerical literal representing its contents."""
def parse_single(i):
return NameConstant(i) if isinstance(i, bool) else Num(i)
if arr.ndim == 0:
return parse_single(arr.item())
if arr.ndim == 1:
return ast.List([parse_single(i.item()) for i in arr], Load())
elts = []
for row in arr:
elts.append(self.parse_numpy_array(row))
return ast.List(elts, Load())
def convert_fields(self, fields: [Expr]):
"""Given a list of call args or tuple fields, converts
each and returns their ASTs and their defs lists (in order)."""
bodies = []
defs = []
for field in fields:
member_body, member_defs = self.visit(field)
bodies.append(member_body)
defs += member_defs
return (bodies, defs)
def convert_to_thunk(self, name_hint: str, expr: Expr):
"""Wraps the passed expression in a thunk."""
body, defs = self.visit(expr)
thunk_name = self.generate_function_name(name_hint)
thunk = self.create_def(thunk_name, [], defs + [Return(body)])
return (thunk, thunk_name)
def convert_func_node(self, func: Function, name_var=None):
"""Converts the given Relay function into a Python function, with
special for named functions (locally or globally)"""
if name_var is None:
func_name = self.generate_function_name("_anon_func")
if isinstance(name_var, GlobalVar):
func_name = str(name_var.name_hint)
if isinstance(name_var, Var):
func_name = self.get_var_name(name_var)
var_names = [self.get_var_name(var) for var in func.params]
body, defs = self.visit(func.body)
ret = self.create_def(func_name, var_names, defs + [Return(body)])
return (ret, func_name)
def convert_module(self):
"""Converts all the global functions defined in the module and returns
them as a list of definitions"""
defs = []
for var, func in self.mod.functions.items():
# optimize the definition so any operators used are lowered
opt_func = self.optimize(func)
try:
converted_func, _ = self.convert_func_node(opt_func, var)
defs.append(converted_func)
except TypeError:
# TODO(wweic): fix conversion for Any
pass
return defs
def create_call(self, func_name: str, arguments):
"""Creates a simple function call."""
return ast.Call(self.parse_name(func_name), arguments, [])
def create_def(self, func_name: str, arguments: [str], body):
"""Wrapper over function definition AST node, whose constructor is inconvenient."""
inner_args = [ast.arg(argument, None) for argument in arguments]
global __MAJOR__, __MINOR__
if __MAJOR__ == 3 and __MINOR__ == 8:
arguments = ast.arguments([], inner_args, None, [], [], None, [])
else:
arguments = ast.arguments(inner_args, None, [], [], None, [])
return ast.FunctionDef(
func_name,
arguments,
body,
[],
None,
)
def create_op_call(self, op: Function, relay_args, py_args):
"""Lowers the passed primitive function, registers it in TVM's
global compiler, and produces a call to the lowered function in
the generated Python code."""
# compile the function and register globally
cc_key = te_compiler.CCacheKey(op, self.tgt)
func_hash = tvm.ir.structural_hash(op)
op_name = "_lowered_op_{}".format(func_hash)
if not tvm.get_global_func(op_name, allow_missing=True):
jitted = self.tec.jit(cc_key, self.tgt)
tvm.register_func(op_name, jitted)
def convert_input(py_input, arg_type):
"""Use the types of the function arguments to determine whether we expect
a tensor or tuple (returns list of inputs to the lowered op call)"""
# equivalent: input.data
if isinstance(arg_type, relay.TensorType):
return [py_input]
assert isinstance(arg_type, relay.TupleType)
# convert each input.fields[i]
ret = []
for i in range(len(arg_type.fields)):
ret += convert_input(
ast.Subscript(py_input, ast.Index(Num(i)), Load()), arg_type.fields[i]
)
return ret
def convert_output(ret_type):
"""Use the function return type to produce auxiliary variables to store outputs.
Returns ([assignments of output vars], [extra arguments to pass to op call],
expression collecting output)"""
if isinstance(ret_type, relay.TensorType):
output_var_name = self.generate_var_name("_out")
output_var = Name(output_var_name, Load())
shape = ast.Tuple([Num(dim) for dim in ret_type.concrete_shape], Load())
# create a new NDArray of the right shape and dtype
assign_output = Assign(
[Name(output_var_name, Store())],
self.create_call(
"nd.array", [self.create_call("numpy.empty", [shape, Str(ret_type.dtype)])]
),
)
return ([assign_output], [output_var], output_var)
assert isinstance(ret_type, relay.TupleType)
assignments = []
extra_args = []
fields = []
for t in ret_type.fields:
inner_assignments, inner_args, inner_output = convert_output(t)
assignments += inner_assignments
extra_args += inner_args
fields.append(inner_output)
fields = [ast.List(fields, Load())]
return (assignments, extra_args, self.create_call("_container.tuple_object", fields))
# create a function to wrap the call of the lowered op and return
# a call to that function
wrap_name = self.generate_function_name("_{}_wrapper".format(op_name))
wrap_args = [self.generate_var_name("_arg_{}".format(i)) for i in range(len(py_args))]
inner_call_args = []
for i in range(len(py_args)):
inner_call_args += convert_input(Name(wrap_args[i], Load()), relay_args[i].checked_type)
output_assignments, aux_args, output = convert_output(op.checked_type.ret_type)
# equiv: _op = tvm.get_global_func(op_name)
op_var = self.generate_var_name("_op")
op_call = self.create_call("tvm.get_global_func", [Str(op_name)])
op_assign = Assign([Name(op_var, Store())], op_call)
# equiv: _op(args)
inner_call = self.create_call(op_var, inner_call_args + aux_args)
body = output_assignments + [op_assign, ast.Expr(inner_call), Return(output)]
wrap_def = self.create_def(wrap_name, wrap_args, body)
return wrap_def, self.create_call(wrap_name, py_args)
def create_match_check(self, pattern: Pattern, data):
"""Given an ADT match pattern and a (Python) expression pointing to
an ADT value, this generates a Python expression that checks if the
ADT value matches the given pattern (returning True or False)."""
# wildcard or var match everything
if isinstance(pattern, (relay.PatternWildcard, relay.PatternVar)):
return NameConstant(True)
conds = []
if isinstance(pattern, relay.PatternConstructor):
# constructor patterns check whether the constructors match
# and also the matches of any nested patterns
# equiv: (arg.tag == patern_constructor.tag)
conds.append(
ast.Compare(
ast.Attribute(data, "tag", Load()),
[ast.Eq()],
[ast.Num(pattern.constructor.tag)],
)
)
assert isinstance(pattern, (relay.PatternConstructor, relay.PatternTuple))
# now check for any nested patterns
for i in range(len(pattern.patterns)):
nested_pat = pattern.patterns[i]
# can safely skip var or wildcard patterns: they will
# never cause a check to fail
if not isinstance(nested_pat, relay.PatternConstructor):
continue
# index into the value corresponding to the subpattern
field_index = ast.Subscript(
ast.Attribute(data, "fields", Load()), ast.Index(Num(i)), Load()
)
conds.append(self.create_match_check(nested_pat, field_index))
# if we do not need to check nested pattern, just return the single check
if len(conds) == 1:
return conds[0]
# otherwise AND together any nested checks
return ast.BoolOp(ast.And(), conds)
def create_match_clause_body(self, pattern: Pattern, body: Expr):
"""Given a match clause pattern and a clause body,
generates a Python function that when called with an ADT
that matches the pattern, returns the result of evaluating
the clause body. This function returns a function definition
and the name of the generated function."""
def collect_var_assignments(pat, val):
"""This helper function ensures that the pattern is used to
properly assign all subfields of the given AST for use
in the clause body
E.g., for PatternConstructor(A, PatternVar(v), PatternWildcard(),
PatternConstructor(B, PatternVar(w)))
we would want to have
v = a.fields[0]
w = a.fields[2].fields[0]
"""
if isinstance(pat, relay.PatternWildcard):
return []
if isinstance(pat, relay.PatternVar):
return [Assign([self.include_var(pat.var, assign=True)], val)]
# constructor pattern: assign each field of the value
# based on subpatterns
assignments = []
for i in range(len(pat.patterns)):
# we want the assignments for val.fields[i]
field = ast.Subscript(
ast.Attribute(val, "fields", Load()), ast.Index(Num(i)), Load()
)
assignments += collect_var_assignments(pat.patterns[i], field)
return assignments
func_name = self.generate_function_name("_match_clause_body")
arg_name = self.generate_var_name("_match_clause_body")
clause_body, defs = self.visit(body)
assignments = collect_var_assignments(pattern, Name(arg_name, Load()))
func_def = self.create_def(
func_name, [arg_name], defs + assignments + [Return(clause_body)]
)
return (func_def, func_name)
# Convention for the expr visitor: Each visit function returns a tuple of two members.
#
# The first is a Python AST comprised of a single *expression* that evaluates to an equivalent
# result to the desired Relay expression (and executes all effects in the right order).
#
# The second is a list of function definition *statements* defining thunks and other
# auxiliary functions needed in the translated AST object. The defs in the second object
# will always have unique names and will never perform any effects, so as long as they
# appear in the Python program before the first statement is executed, there should not
# be any problems.
def visit_var(self, var: Expr):
return (self.include_var(var, assign=False), [])
def visit_global_var(self, gvar: Expr):
# we don't need to add numbers to global var names because
# the *names* are checked for uniqueness in the mod
return (Name(str(gvar.name_hint), Load()), [])
def visit_let(self, letexp: Expr):
# To properly account for scoping and ensure that the entire node produces an expression,
# we translate the let binding as a function that we call with the value we intend to bind.
# Yes, this is somewhat ugly.
"""
let var = value in body
=======================
def let_thunk(var):
return body
let_thunk(value)
"""
bind_body, bind_defs = self.visit(letexp.body)
func_name = self.generate_function_name("_let_func")
binding_func = self.create_def(
func_name, [self.get_var_name(letexp.var)], bind_defs + [Return(bind_body)]
)
# we call the binding func with the intended value for the bound variable
# special case: if the value is a function literal, we must ensure it can be
# recursive by naming it after the var
if isinstance(letexp.value, Function):
value_def, value_name = self.convert_func_node(letexp.value, letexp.var)
return (
self.create_call(func_name, [Name(value_name, Load())]),
[value_def, binding_func],
)
value_body, value_defs = self.visit(letexp.value)
value_defs.append(binding_func)
binding_call = self.create_call(func_name, [value_body])
return (binding_call, value_defs)
def visit_tuple(self, tup: Expr):
fields, ret_defs = self.convert_fields(tup.fields)
fields = [ast.List(fields, Load())]
return (self.create_call("_container.tuple_object", fields), ret_defs)
def visit_tuple_getitem(self, tgi: Expr):
tup, tup_defs = self.visit(tgi.tuple_value)
ret = ast.Subscript(tup, ast.Index(Num(tgi.index)), Load())
return (ret, tup_defs)
def visit_if(self, if_block: Expr):
cond_body, cond_defs = self.visit(if_block.cond)
true_body, true_defs = self.visit(if_block.true_branch)
false_body, false_defs = self.visit(if_block.false_branch)
# need to get the value out of a NDArray to check the condition
# equvialent to: val.numpy()
cond_check = ast.Call(ast.Attribute(cond_body, "asnumpy", Load()), [], [])
ret = ast.IfExp(cond_check, true_body, false_body)
return (ret, cond_defs + true_defs + false_defs)
def visit_constant(self, constant: Expr):
"""Proceeds by converting constant value to a numpy array
and converting it to the appropriate value in the generated
code (whether it be a Python scalar or a Numpy array)"""
value = constant.data.numpy()
const_expr = ast.Call(
ast.Attribute(Name("numpy", Load()), "array", Load()),
[self.parse_numpy_array(value)],
[ast.keyword("dtype", Str(constant.checked_type.dtype))],
)
return (self.create_call("nd.array", [const_expr]), [])
def visit_function(self, func: Expr):
# Python's lambdas are very restrictive, so we do "name" inline functions
converted_func, func_name = self.convert_func_node(func)
return (Name(func_name, Load()), [converted_func])
def visit_call(self, call: Expr):
"""For calls, we must distinguish between ordinary functions,
operators, and constructor calls."""
func = call.op
fields, field_defs = self.convert_fields(call.args)
if isinstance(func, tvm.ir.Op):
raise Exception("Operators should have been lowered and eliminated")
if isinstance(func, relay.Constructor):
# produce a constructor value
return (
self.create_call(
"ConstructorValue",
[ast.Num(func.tag), ast.List(fields, Load()), NameConstant(None)],
),
field_defs,
)
# lowered operator: generate a call to a function that gets the PackedFunc
# from TVM's registry
if isinstance(func, Function) and func.attrs and func.attrs.Primitive.value == 1:
op_call_def, op_call = self.create_op_call(func, call.args, fields)
return (op_call, field_defs + [op_call_def])
# ordinary function
converted_func, defs = self.visit(func)
defs += field_defs
return (ast.Call(converted_func, fields, []), defs)
def visit_ref_create(self, ref: Expr):
val, defs = self.visit(ref.value)
return (self.create_call("RefValue", [val]), defs)
def visit_ref_read(self, read: Expr):
ref, defs = self.visit(read.ref)
return (ast.Attribute(ref, "value", Load()), defs)
def visit_ref_write(self, write: Expr):
"""For writing refs, we wrap the update in a thunk
(returning an empty tuple to match Relay's semantics)
that we execute at the right time. This ensures such assignments
can be properly nested, since assignments are statements
in Python but expressions in Relay"""
ref, ref_defs = self.visit(write.ref)
val, val_defs = self.visit(write.value)
thunk_name = self.generate_function_name("_ref_write_thunk")
thunk = self.create_def(
thunk_name,
[],
ref_defs
+ val_defs
+ [
Assign([ast.Attribute(ref, "value", Store())], val),
Return(self.create_call("_container.tuple_object", [])),
],
)
return (self.create_call(thunk_name, []), [thunk])
def visit_match(self, match: Expr):
"""For matches, we wrap the entire expression in a thunk
because it is easiest to implement them using if statements.
For each clause, we generate a function that checks if the
pattern matches. If yes, we call a function that assigns
the variables appropriately and invokes the clause body."""
data, defs = self.visit(match.data)
data_var = self.generate_var_name("_match_data")
# must ensure the data clause is executed exactly once
thunk_body = [Assign([Name(data_var, Store())], data)]
for clause in match.clauses:
check_expr = self.create_match_check(clause.lhs, Name(data_var, Load()))
body_def, body_name = self.create_match_clause_body(clause.lhs, clause.rhs)
defs.append(body_def)
# equiv: if check(data): return body(data)
thunk_body.append(
ast.If(
check_expr, [Return(self.create_call(body_name, [Name(data_var, Load())]))], []
)
)
# finally if nothing matches we have a failed assert (should never happen)
thunk_body.append(ast.Assert(NameConstant(False), Str("Match was not exhaustive")))
thunk_name = self.generate_function_name("_match_thunk")
thunk_def = self.create_def(thunk_name, [], defs + thunk_body)
return (self.create_call(thunk_name, []), [thunk_def])
# these are both handled in the "call" case
def visit_constructor(self, _):
pass
def visit_op(self, _):
pass
def to_python(expr: Expr, mod=None, target=tvm.target.Target("llvm")):
"""Converts the given Relay expression into a Python script (as a Python AST object).
For easiest debugging, import the astor package and use to_source()."""
mod = mod if mod is not None else tvm.IRModule()
mod = relay.transform.InferType()(mod)
converter = PythonConverter(mod, target)
python = converter.convert(expr)
assert python
return python
def run_as_python(expr: Expr, mod=None, target=tvm.target.Target("llvm")):
"""Converts the given Relay expression into a Python script and
executes it."""
mod = mod if mod is not None else tvm.IRModule()
py_ast = to_python(expr, mod, target)
code = compile(py_ast, "<string>", "exec")
var_map = {OUTPUT_VAR_NAME: None}
# pylint: disable=exec-used
exec(code, var_map, var_map)
return var_map[OUTPUT_VAR_NAME]
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/resnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
"""
# pylint: disable=unused-argument
from tvm import relay
from .init import create_workload
from . import layers
def residual_unit(
data,
num_filter,
stride,
dim_match,
name,
bottle_neck=True,
data_layout="NCHW",
kernel_layout="IOHW",
):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : bool
True means channel number between input and output is the same,
otherwise means differ
name : str
Base name of the operators
"""
bn_axis = data_layout.index("C")
if bottle_neck:
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv2d(
data=act1,
channels=int(num_filter * 0.25),
kernel_size=(1, 1),
strides=stride,
padding=(0, 0),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv2d(
data=act2,
channels=int(num_filter * 0.25),
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn3 = layers.batch_norm_infer(data=conv2, epsilon=2e-5, axis=bn_axis, name=name + "_bn3")
act3 = relay.nn.relu(data=bn3)
conv3 = layers.conv2d(
data=act3,
channels=num_filter,
kernel_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
name=name + "_conv3",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv2d(
data=act1,
channels=num_filter,
kernel_size=(1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv3, shortcut)
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, axis=bn_axis, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv2d(
data=act1,
channels=num_filter,
kernel_size=(3, 3),
strides=stride,
padding=(1, 1),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, axis=bn_axis, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv2d(
data=act2,
channels=num_filter,
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv2d(
data=act1,
channels=num_filter,
kernel_size=(1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv2, shortcut)
def resnet(
units,
num_stages,
filter_list,
num_classes,
data_shape,
bottle_neck=True,
layout="NCHW",
dtype="float32",
):
"""Return ResNet Program.
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stages
filter_list : list
Channel size of each stage
num_classes : int
Output size of symbol
data_shape : tuple of int.
The shape of input data.
bottle_neck : bool
Whether apply bottleneck transformation.
layout: str
The data layout for conv2d
dtype : str
The global data type.
"""
data_layout = layout
kernel_layout = "OIHW" if layout == "NCHW" else "HWIO"
bn_axis = data_layout.index("C")
num_unit = len(units)
assert num_unit == num_stages
data = relay.var("data", shape=data_shape, dtype=dtype)
data = layers.batch_norm_infer(
data=data, epsilon=2e-5, axis=bn_axis, scale=False, name="bn_data"
)
(_, _, height, _) = data_shape
if layout == "NHWC":
(_, height, _, _) = data_shape
if height <= 32: # such as cifar10
body = layers.conv2d(
data=data,
channels=filter_list[0],
kernel_size=(3, 3),
strides=(1, 1),
padding=(1, 1),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
else: # often expected to be 224 such as imagenet
body = layers.conv2d(
data=data,
channels=filter_list[0],
kernel_size=(7, 7),
strides=(2, 2),
padding=(3, 3),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
body = layers.batch_norm_infer(data=body, epsilon=2e-5, axis=bn_axis, name="bn0")
body = relay.nn.relu(data=body)
body = relay.nn.max_pool2d(
data=body, pool_size=(3, 3), strides=(2, 2), padding=(1, 1), layout=data_layout
)
for i in range(num_stages):
body = residual_unit(
body,
filter_list[i + 1],
(1 if i == 0 else 2, 1 if i == 0 else 2),
False,
name="stage%d_unit%d" % (i + 1, 1),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
for j in range(units[i] - 1):
body = residual_unit(
body,
filter_list[i + 1],
(1, 1),
True,
name="stage%d_unit%d" % (i + 1, j + 2),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn1 = layers.batch_norm_infer(data=body, epsilon=2e-5, axis=bn_axis, name="bn1")
relu1 = relay.nn.relu(data=bn1)
# Although kernel is not used here when global_pool=True, we should put one
pool1 = relay.nn.global_avg_pool2d(data=relu1, layout=data_layout)
flat = relay.nn.batch_flatten(data=pool1)
fc1 = layers.dense_add_bias(data=flat, units=num_classes, name="fc1")
net = relay.nn.softmax(data=fc1)
return relay.Function(relay.analysis.free_vars(net), net)
def get_net(
batch_size,
num_classes,
num_layers=50,
image_shape=(3, 224, 224),
layout="NCHW",
dtype="float32",
**kwargs,
):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
(_, height, _) = image_shape
if layout == "NHWC":
(height, _, _) = image_shape
data_shape = (batch_size,) + image_shape
if height <= 28:
num_stages = 3
if (num_layers - 2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers - 2) // 9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers - 2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers - 2) // 6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
return resnet(
units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
data_shape=data_shape,
bottle_neck=bottle_neck,
layout=layout,
dtype=dtype,
)
def get_workload(
batch_size=1,
num_classes=1000,
num_layers=18,
image_shape=(3, 224, 224),
layout="NCHW",
dtype="float32",
**kwargs,
):
"""Get benchmark workload for resnet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
num_layers : int, optional
Number of layers
image_shape : tuple, optional
The input image shape
layout: str
The data layout for conv2d
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
mod : tvm.IRModule
The relay module that contains a ResNet network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size=batch_size,
num_classes=num_classes,
num_layers=num_layers,
image_shape=image_shape,
dtype=dtype,
layout=layout,
**kwargs,
)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/resnet_3d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Network definition of 3D ResNet for Action Recognition (CVPR 2018)
Reference : https://github.com/kenshohara/3D-ResNets-PyTorch
"""
# pylint: disable=unused-argument
from tvm import relay
from .init import create_workload
from . import layers
def residual_unit(
data,
num_filter,
stride,
dim_match,
name,
bottle_neck=True,
data_layout="NCDHW",
kernel_layout="OIDHW",
):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : bool
True means channel number between input and output is the same,
otherwise means differ
name : str
Base name of the operators
"""
if bottle_neck:
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=int(num_filter * 0.25),
kernel_size=(1, 1, 1),
strides=stride,
padding=(0, 0, 0),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=int(num_filter * 0.25),
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn3 = layers.batch_norm_infer(data=conv2, epsilon=2e-5, name=name + "_bn3")
act3 = relay.nn.relu(data=bn3)
conv3 = layers.conv3d(
data=act3,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=(1, 1, 1),
padding=(0, 0, 0),
name=name + "_conv3",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv3, shortcut)
bn1 = layers.batch_norm_infer(data=data, epsilon=2e-5, name=name + "_bn1")
act1 = relay.nn.relu(data=bn1)
conv1 = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=stride,
padding=(1, 1, 1),
name=name + "_conv1",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn2 = layers.batch_norm_infer(data=conv1, epsilon=2e-5, name=name + "_bn2")
act2 = relay.nn.relu(data=bn2)
conv2 = layers.conv3d(
data=act2,
channels=num_filter,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name=name + "_conv2",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
if dim_match:
shortcut = data
else:
shortcut = layers.conv3d(
data=act1,
channels=num_filter,
kernel_size=(1, 1, 1),
strides=stride,
name=name + "_sc",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
return relay.add(conv2, shortcut)
def resnet(
units,
num_stages,
filter_list,
num_classes,
data_shape,
bottle_neck=True,
layout="NCDHW",
dtype="float32",
):
"""Return ResNet Program.
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stages
filter_list : list
Channel size of each stage
num_classes : int
Output size of symbol
data_shape : tuple of int.
The shape of input data.
bottle_neck : bool
Whether apply bottleneck transformation.
layout: str
The data layout for conv3d
dtype : str
The global data type.
"""
data_layout = layout
kernel_layout = "OIDHW" if layout == "NCDHW" else "DHWIO"
num_unit = len(units)
assert num_unit == num_stages
data = relay.var("data", shape=data_shape, dtype=dtype)
data = layers.batch_norm_infer(data=data, epsilon=2e-5, scale=False, name="bn_data")
if layout == "NCDHW":
(_, _, _, height, _) = data_shape
else:
(_, _, height, _, _) = data_shape
if height <= 32: # such as cifar10
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
padding=(1, 1, 1),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
else: # often expected to be 224 such as imagenet
body = layers.conv3d(
data=data,
channels=filter_list[0],
kernel_size=(3, 7, 7),
strides=(1, 2, 2),
padding=(1, 3, 3),
name="conv0",
data_layout=data_layout,
kernel_layout=kernel_layout,
)
body = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn0")
body = relay.nn.relu(data=body)
# body = relay.nn.max_pool3d(data=body, pool_size=(3, 3), strides=(2, 2), padding=(1, 1),
# layout=data_layout)
for i in range(num_stages):
body = residual_unit(
body,
filter_list[i + 1],
(1 if i == 0 else 2, 1 if i == 0 else 2, 1 if i == 0 else 2),
False,
name="stage%d_unit%d" % (i + 1, 1),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
for j in range(units[i] - 1):
body = residual_unit(
body,
filter_list[i + 1],
(1, 1, 1),
True,
name="stage%d_unit%d" % (i + 1, j + 2),
bottle_neck=bottle_neck,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
bn1 = layers.batch_norm_infer(data=body, epsilon=2e-5, name="bn1")
relu1 = relay.nn.relu(data=bn1)
# Although kernel is not used here when global_pool=True, we should put one
pool1 = relay.nn.global_avg_pool3d(data=relu1, layout=data_layout)
flat = relay.nn.batch_flatten(data=pool1)
fc1 = layers.dense_add_bias(data=flat, units=num_classes, name="fc1")
net = relay.nn.softmax(data=fc1)
return relay.Function(relay.analysis.free_vars(net), net)
def get_net(
batch_size,
num_classes,
num_layers=50,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
if layout == "NCDHW":
(_, _, height, _) = image_shape
else:
(_, height, _, _) = image_shape
data_shape = (batch_size,) + image_shape
if height <= 28:
num_stages = 3
if (num_layers - 2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers - 2) // 9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers - 2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers - 2) // 6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError("no experiments done on num_layers {}".format(num_layers))
return resnet(
units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
data_shape=data_shape,
bottle_neck=bottle_neck,
layout=layout,
dtype=dtype,
)
def get_workload(
batch_size=1,
num_classes=1000,
num_layers=18,
image_shape=(3, 16, 112, 112),
layout="NCDHW",
dtype="float32",
**kwargs,
):
"""Get benchmark workload for resnet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
num_layers : int, optional
Number of layers
image_shape : tuple, optional
The input image shape
layout: str
The data layout for conv3d
dtype : str, optional
The data type
kwargs : dict
Extra arguments
Returns
-------
mod : tvm.IRModule
The relay module that contains a ResNet network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(
batch_size=batch_size,
num_classes=num_classes,
num_layers=num_layers,
image_shape=image_shape,
dtype=dtype,
layout=layout,
**kwargs,
)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/squeezenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=unused-argument
"""
Symbol of SqueezeNet
Reference:
Iandola, Forrest N., et al.
"Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size." (2016).
"""
from tvm import relay
from .init import create_workload
from . import layers
# Helpers
def _make_fire(net, squeeze_channels, expand1x1_channels, expand3x3_channels, prefix):
net = _make_fire_conv(net, squeeze_channels, 1, 0, "%s_input" % prefix)
left = _make_fire_conv(net, expand1x1_channels, 1, 0, "%s_left" % prefix)
right = _make_fire_conv(net, expand3x3_channels, 3, 1, "%s_right" % prefix)
# NOTE : Assume NCHW layout here
net = relay.concatenate((left, right), axis=1)
return net
def _make_fire_conv(net, channels, kernel_size, padding=0, prefix=""):
net = layers.conv2d(
net,
channels=channels,
kernel_size=(kernel_size, kernel_size),
padding=(padding, padding),
name="%s_conv" % prefix,
)
net = relay.nn.bias_add(net, relay.var("%s_conv_bias" % prefix))
net = relay.nn.relu(net)
return net
# Net
def get_net(batch_size, image_shape, num_classes, version, dtype):
"""Get symbol of SqueezeNet
Parameters
----------
batch_size : int
The batch size used in the model
image_shape : tuple, optional
The input image shape
num_classes: int
The number of classification results
version : str, optional
"1.0" or "1.1" of SqueezeNet
"""
assert version in [
"1.0",
"1.1",
], "Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)
data_shape = (batch_size,) + image_shape
net = relay.var("data", shape=data_shape, dtype=dtype)
if version == "1.0":
net = layers.conv2d(
net, channels=96, kernel_size=(7, 7), strides=(2, 2), padding=(3, 3), name="conv1"
)
net = relay.nn.bias_add(net, relay.var("conv1_bias"))
net = relay.nn.relu(net)
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 16, 64, 64, "fire1")
net = _make_fire(net, 16, 64, 64, "fire2")
net = _make_fire(net, 32, 128, 128, "fire3")
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 32, 128, 128, "fire4")
net = _make_fire(net, 48, 192, 192, "fire5")
net = _make_fire(net, 48, 192, 192, "fire6")
net = _make_fire(net, 64, 256, 256, "fire7")
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 64, 256, 256, "fire8")
else:
net = layers.conv2d(
net, channels=64, kernel_size=(3, 3), strides=(2, 2), padding=(1, 1), name="conv1"
)
net = relay.nn.bias_add(net, relay.var("conv1_bias"))
net = relay.nn.relu(net)
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 16, 64, 64, "fire1")
net = _make_fire(net, 16, 64, 64, "fire2")
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 32, 128, 128, "fire3")
net = _make_fire(net, 32, 128, 128, "fire4")
net = relay.nn.max_pool2d(net, pool_size=(3, 3), strides=(2, 2))
net = _make_fire(net, 48, 192, 192, "fire5")
net = _make_fire(net, 48, 192, 192, "fire6")
net = _make_fire(net, 64, 256, 256, "fire7")
net = _make_fire(net, 64, 256, 256, "fire8")
net = relay.nn.dropout(net, rate=0.5)
net = layers.conv2d(net, channels=num_classes, kernel_size=(1, 1), name="conv_final")
net = relay.nn.bias_add(net, relay.var("conv_final_bias"))
net = relay.nn.relu(net)
net = relay.nn.global_avg_pool2d(net)
net = relay.nn.batch_flatten(net)
net = relay.nn.softmax(net)
args = relay.analysis.free_vars(net)
return relay.Function(args, net)
def get_workload(
batch_size=1, num_classes=1000, version="1.0", image_shape=(3, 224, 224), dtype="float32"
):
"""Get benchmark workload for SqueezeNet
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of classes
version : str, optional
"1.0" or "1.1" of SqueezeNet
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
mod : tvm.IRModule
The relay module that contains a SqueezeNet network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, image_shape, num_classes, version, dtype)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/synthetic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Synthetic networks for testing purposes. Ideally, these networks are similar in
structure to real world networks, but are much smaller in order to make testing
faster.
"""
from __future__ import absolute_import
from tvm import relay
from .init import create_workload, Constant
from . import layers
def get_net(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None):
"""Get synthetic testing network.
Parameters
----------
image_shape : tuple, optional
The input shape as (batch_size, channels, height, width).
dtype : str, optional
The data type for the input.
wtype : str, optional
The data type for weights. Defaults to `dtype`.
Returns
-------
net : relay.Function
The dataflow.
"""
if wtype is None:
wtype = dtype
data = relay.var("data", shape=input_shape, dtype=dtype)
dense_shape = [-1, input_shape[3]]
dense = relay.nn.relu(
relay.nn.dense(
relay.reshape(data, dense_shape),
relay.var("dense_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype),
)
)
dense = relay.reshape_like(dense, data)
conv_shape = [input_shape[1], input_shape[1], 3, 3]
conv = relay.nn.softmax(
relay.nn.conv2d(
data,
relay.var("conv_weight", shape=conv_shape, dtype=wtype),
padding=1,
kernel_size=3,
)
)
added = relay.add(dense, conv)
biased = layers.batch_norm_infer(
relay.nn.bias_add(added, relay.var("bias", dtype=wtype)), name="batch_norm"
)
dense = relay.nn.relu(
relay.nn.dense(
relay.reshape(biased, dense_shape),
relay.var("dense2_weight", shape=[input_shape[3], dense_shape[1]], dtype=wtype),
)
)
dense = relay.reshape_like(dense, data)
conv = relay.nn.softmax(
relay.nn.conv2d(
biased,
relay.var("conv2_weight", shape=conv_shape, dtype=wtype),
padding=1,
kernel_size=3,
)
)
added = relay.add(dense, conv)
args = relay.analysis.free_vars(added)
return relay.Function(args, added)
def get_workload(input_shape=(1, 3, 24, 12), dtype="float32", wtype=None):
"""Get benchmark workload for the synthetic net.
Parameters
----------
image_shape : tuple, optional
The input shape as (batch_size, channels, height, width).
dtype : str, optional
The data type for the input.
wtype : str, optional
The data type for weights. Defaults to `dtype`.
Returns
-------
mod : tvm.IRModule
The relay module that contains a synthetic network.
params : dict of str to NDArray
The parameters.
"""
return create_workload(
get_net(input_shape=input_shape, dtype=dtype, wtype=wtype),
initializer=Constant(),
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/temp_op_attr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Defines a TempOpAttr class that allows temporarily changing an attr of the
operator to allow unit testing. This is useful for AlterOpLayout and Legalize
tests."""
from tvm import relay
class TempOpAttr(object):
"""Temporarily changes the attr of an op."""
def __init__(self, op_name, attr_key, attr_value):
"""Saves the required info for RAII pattern usage.
Parameters
----------
op_name : str
The op name.
attr_key : str
The attribute name.
attr_value : object
The attribute value.
Examples
--------
.. code-block:: python
# Temporarily update FTVMAlterOpLayout to a user-defined packed function.
# After the test is finished, the attr value will be set back to the original value.
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
my_mod = relay.transform.AlterOpLayout()(my_mod)
"""
self.op = relay.op.get(op_name)
self.attr_key = attr_key
self.attr_value = attr_value
def __enter__(self):
self.older_attr = self.op.get_attr(self.attr_key)
self.op.reset_attr(self.attr_key)
self.op.set_attr(self.attr_key, self.attr_value)
return self
def __exit__(self, ptype, value, trace):
self.op.reset_attr(self.attr_key)
if self.older_attr:
self.op.set_attr(self.attr_key, self.older_attr)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/tf.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init, import-outside-toplevel
"""
Tensorflow Model Helpers
========================
Some helper definitions for tensorflow models.
"""
import re
import os.path
import collections
import numpy as np
# Tensorflow imports
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
import tvm
from tvm.contrib.download import download_testdata
try:
tf_compat_v1 = tf.compat.v1
except (ImportError, AttributeError):
tf_compat_v1 = tf
######################################################################
# Some helper functions
# ---------------------
def ProcessGraphDefParam(graph_def):
"""Type-checks and possibly canonicalizes `graph_def`.
Parameters
----------
graph_def : Obj
tensorflow graph definition.
Returns
-------
graph_def : Obj
tensorflow graph definition
"""
if not isinstance(graph_def, graph_pb2.GraphDef):
# `graph_def` could be a dynamically-created message, so try a duck-typed
# approach
try:
old_graph_def = graph_def
graph_def = graph_pb2.GraphDef()
graph_def.MergeFrom(old_graph_def)
except TypeError:
raise TypeError("graph_def must be a GraphDef proto.")
return graph_def
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
def vmobj_to_list(o):
"""Converts TVM objects returned by VM execution to Python List.
Parameters
----------
o : Obj
VM Object as output from VM runtime executor.
Returns
-------
result : list
Numpy objects as list with equivalent values to the input object.
"""
if isinstance(o, tvm.nd.NDArray):
result = [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
result = hd
elif o.constructor.name_hint == "Nil":
result = []
elif "tensor_nil" in o.constructor.name_hint:
result = [0]
elif "tensor" in o.constructor.name_hint:
result = [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
return result
def AddShapesToGraphDef(session, out_node):
"""Add shapes attribute to nodes of the graph.
Input graph here is the default graph in context.
Parameters
----------
session : tf.Session
Tensorflow session
out_node : String or List
Final output node of the graph.
Returns
-------
graph_def : Obj
tensorflow graph definition with shapes attribute added to nodes.
"""
graph_def = tf_compat_v1.graph_util.convert_variables_to_constants(
session,
session.graph.as_graph_def(add_shapes=True),
convert_to_list(out_node),
)
return graph_def
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self, label_lookup_path=None, uid_lookup_path=None):
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Parameters
----------
label_lookup_path: String
File containing String UID to integer node ID mapping .
uid_lookup_path: String
File containing String UID to human-readable string mapping.
Returns
-------
node_id_to_name: dict
dict from integer node ID to human-readable string.
"""
if not tf_compat_v1.gfile.Exists(uid_lookup_path):
tf.logging.fatal("File does not exist %s", uid_lookup_path)
if not tf_compat_v1.gfile.Exists(label_lookup_path):
tf.logging.fatal("File does not exist %s", label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf_compat_v1.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r"[n\d]*[ \S,]*")
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf_compat_v1.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(" target_class:"):
target_class = int(line.split(": ")[1])
if line.startswith(" target_class_string:"):
target_class_string = line.split(": ")[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal("Failed to locate: %s", val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ""
return self.node_lookup[node_id]
def get_workload_official(model_url, model_sub_path, retries=5):
"""Import workload from tensorflow official
Parameters
----------
model_url: str
URL from where it will be downloaded.
model_sub_path:
Sub path in extracted tar for the ftozen protobuf file.
retries: int
The number of retries to attempt downloading and uncompressing
the model in the CI, due to possible network and CI node issues.
Returns
-------
model_path: str
Full path to saved model file
"""
attempts = retries + 1
error = None
for current_attempt_idx in range(attempts):
try:
model_tar_name = os.path.basename(model_url)
model_path = download_testdata(model_url, model_tar_name, module=["tf", "official"])
dir_path = os.path.dirname(model_path)
if model_path.endswith("tgz") or model_path.endswith("gz"):
import tarfile
tar = tarfile.open(model_path)
tar.extractall(path=dir_path)
tar.close()
elif model_path.endswith("zip"):
import zipfile
zip_object = zipfile.ZipFile(model_path)
zip_object.extractall(path=dir_path)
zip_object.close()
else:
raise RuntimeError("Could not decompress the file: " + model_path)
return os.path.join(dir_path, model_sub_path)
except (EOFError, RuntimeError) as err:
error = err
print(f"Raised : {str(error)}, current attempt : {current_attempt_idx} ...")
raise error
def get_workload(model_path, model_sub_path=None, inputs_dict=None, output=None):
"""Import workload from frozen protobuf
Parameters
----------
model_path: str
model_path on remote repository to download from.
model_sub_path: str
Model path in the compressed archive.
Returns
-------
graph_def: graphdef
graph_def is the tensorflow workload.
"""
if model_sub_path:
path_model = get_workload_official(model_path, model_sub_path)
else:
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/"
model_url = os.path.join(repo_base, model_path)
path_model = download_testdata(model_url, model_path, module="tf")
# Creates graph from saved graph_def.pb.
with tf_compat_v1.gfile.FastGFile(path_model, "rb") as f:
graph_def = tf_compat_v1.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf_compat_v1.import_graph_def(graph_def, name="", input_map=inputs_dict)
if inputs_dict is not None:
# graph is changed so generate graph_def again
with tf_compat_v1.Session(graph=graph) as sess:
graph_def = AddShapesToGraphDef(sess, output)
return graph_def
#######################################################################
# PTB LSTMBlockCell Model
# -----------------------
class PTBSmallConfig(object):
"""Small config.
This configurations are used when training the model
"""
num_layers = 2
num_steps = 1
hidden_size = 200
batch_size = 1
vocab_size = 10000
init_scale = 0.1
def get_config():
"""Configuration used for training the model"""
return PTBSmallConfig()
def pick_from_weight(weight, pows=1.0):
"""Identify token from Softmax output.
This token will be mapped to word in the vocabulary.
"""
weight = weight**pows
t = np.cumsum(weight)
s = np.sum(weight)
return int(np.searchsorted(t, 0.5 * s))
def do_tf_sample(session, data, in_states, num_samples):
"""Sampled from the model"""
samples = []
sample = None
# Cell inputs c and h should be passed for each layer explicitly.
state_input_name = [
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1:0",
]
state = in_states
# Graph nodes to be fetched as run output. Tensorflow LSTMBlockCell create internal
# nodes for intermediate operations (gates) in the cell during run.
# Cell state (c) is ':1'and cell output (h) is ':6' for each layer.
fetches = [
[
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
"Model/Softmax:0",
]
def _get_feed_dict(input_name, input_data):
"""Create feed dict"""
feed_dict = {}
if isinstance(input_data, list):
for i, e in enumerate(input_name):
feed_dict[e] = input_data[i]
else:
feed_dict[input_name] = input_data
return feed_dict
for x in data:
feed_dict = _get_feed_dict(state_input_name, state)
feed_dict["Model/Placeholder:0"] = [[x]]
state, probs = session.run(fetches, feed_dict)
sample = pick_from_weight(probs[0])
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
feed_dict = _get_feed_dict(state_input_name, state)
feed_dict["Model/Placeholder:0"] = [[samples[-1]]]
state, probs = session.run(fetches, feed_dict)
sample = pick_from_weight(probs[0])
samples.append(sample)
k += 1
return samples, state
def _create_ptb_vocabulary(data_dir):
"""Read the PTB sample data input to create vocabulary"""
data_path = os.path.join(data_dir, "simple-examples/data/")
file_name = "ptb.train.txt"
def _read_words(filename):
"""Read the data for creating vocabulary"""
with tf_compat_v1.gfile.GFile(filename, "r") as f:
return f.read().encode("utf-8").decode("utf-8").replace("\n", "<eos>").split()
def _build_vocab(filename):
"""Create vocabulary"""
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
# for python 3.x
id_to_word = dict((v, k) for k, v in word_to_id.items())
return word_to_id, id_to_word
def ptb_raw_data(data_path, file_name):
"""Read the sample data and create vocabulary"""
train_path = os.path.join(data_path, file_name)
word_to_id, id_2_word = _build_vocab(train_path)
return word_to_id, id_2_word
return ptb_raw_data(data_path, file_name)
def get_workload_ptb():
"""Import ptb workload from frozen protobuf
Parameters
----------
Nothing.
Returns
-------
graph_def: graphdef
graph_def is the tensorflow workload for ptb.
word_to_id : dict
English word to integer id mapping
id_to_word : dict
Integer id to English word mapping
"""
sample_repo = "http://www.fit.vutbr.cz/~imikolov/rnnlm/"
sample_data_file = "simple-examples.tgz"
sample_url = sample_repo + sample_data_file
ptb_model_file = "RNN/ptb/ptb_model_with_lstmblockcell.pb"
# pylint: disable=import-outside-toplevel
import tarfile
file_path = download_testdata(sample_url, sample_data_file, module=["data", "ptb_data"])
dir_path = os.path.dirname(file_path)
t = tarfile.open(file_path, "r")
t.extractall(dir_path)
word_to_id, id_to_word = _create_ptb_vocabulary(dir_path)
dtype = "float32"
shape = (1, 200)
# Convert states of LSTMBlockCell to placeholder, so TVM can feed data
state_name = [
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros:0",
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1:0",
]
inputs_dict = {
state_name[0]: tf_compat_v1.placeholder(dtype, shape, state_name[0].split(":")[0]),
state_name[1]: tf_compat_v1.placeholder(dtype, shape, state_name[1].split(":")[0]),
state_name[2]: tf_compat_v1.placeholder(dtype, shape, state_name[2].split(":")[0]),
state_name[3]: tf_compat_v1.placeholder(dtype, shape, state_name[3].split(":")[0]),
}
return (
word_to_id,
id_to_word,
get_workload(ptb_model_file, inputs_dict=inputs_dict, output="Model/Softmax"),
)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/tflite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common utilities for creating TFLite models"""
from distutils.version import LooseVersion
import numpy as np
import pytest
import tvm
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
import tflite.Model # pylint: disable=wrong-import-position
import tensorflow as tf # pylint: disable=wrong-import-position
class TFLiteModel:
"""Creates TFLite Model and facilitates reference data generation"""
def __init__(self, dtype):
self.serial_model = None # This is what TFLite convert() provides
self.dtype = dtype # This is the dtype of graph inputs
self.shape_dict = {}
self.dtype_dict = {}
def create_conv2d_single(self, kernel_shape, strides, padding, dilation, activation):
"""Returns tf.function that creates TFLite Conv2d layer"""
@tf.function
def conv2d_single_function(ifm_tensor):
"""Returns TFLite Conv2d layer"""
op = tf.nn.conv2d(
ifm_tensor,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], 3, 3]),
dtype=tf.float32,
),
strides=[1, strides[0], strides[1], 1],
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
elif activation == "NONE":
pass
else:
assert False, "Unsupported activation {}".format(activation)
return op
return conv2d_single_function
def create_tflite_model(self, tfl_function, shapes, ranges=None):
"""Creates TFLite serial graph"""
tensor_specs = []
for i, shape in enumerate(shapes):
input_name = "input_" + str(i)
self.shape_dict.update({input_name: shape})
self.dtype_dict.update({input_name: self.dtype})
tensor_specs.append(tf.TensorSpec(shape, dtype=tf.float32, name=input_name))
concrete_func = tfl_function.get_concrete_function(*tensor_specs)
if not ranges:
ranges = [(0, 1) for _ in shapes]
def representative_dataset():
for _ in range(100):
inputs = []
for i, shape in enumerate(shapes):
data = np.random.uniform(
low=ranges[i][0], high=ranges[i][1], size=tuple(shape)
).astype("float32")
inputs.append(data)
yield inputs
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
self.serial_model = converter.convert()
def convert_to_relay(self):
"""Converts TFLite serialized graph into Relay"""
assert self.serial_model is not None, "TFLite model is empty!"
tflite_model = tflite.Model.Model.GetRootAsModel(self.serial_model, 0)
relay_module, relay_params = tvm.relay.frontend.from_tflite(
tflite_model, self.shape_dict, self.dtype_dict
)
return relay_module, relay_params
def generate_randomized_input_data(self, seed, shape, dtype):
"""Generates randomized input numpy arrays based on shape and dtype."""
random_state = np.random.RandomState(seed)
random_data = None
if dtype == np.float32:
random_data = random_state.uniform(-1, 1, size).astype(dtype)
else:
low = np.iinfo(dtype).min
high = np.iinfo(dtype).max + 1
random_data = random_state.randint(low, high, shape, dtype)
return random_data
# pylint: disable=import-outside-toplevel
def generate_reference_data(self):
"""
This method uses TFLite reference kernels to generate reference output.
It returns randomized inputs and reference outputs.
"""
assert self.serial_model is not None, "TFLite model was not created."
output_tolerance = None
if tf.__version__ < LooseVersion("2.5.0"):
output_tolerance = 1
interpreter = tf.lite.Interpreter(model_content=self.serial_model)
else:
output_tolerance = 0
interpreter = tf.lite.Interpreter(
model_content=self.serial_model,
experimental_op_resolver_type=tf.lite.experimental.OpResolverType.BUILTIN_REF,
experimental_preserve_all_tensors=False,
)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Generate predictable randomized input
seed = 0
input_data = {}
for input_detail in input_details:
input_values = self.generate_randomized_input_data(
seed, input_detail["shape"], input_detail["dtype"]
)
interpreter.set_tensor(input_detail["index"], input_values)
input_data.update({input_detail["name"]: input_values})
interpreter.invoke()
# Obtain the expected output from interpreter
expected_output_data = {}
for output_detail in output_details:
expected_output_data.update(
{output_detail["name"]: interpreter.get_tensor(output_detail["index"])}
)
return input_data, expected_output_data, output_tolerance
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/vgg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""References:
Simonyan, Karen, and Andrew Zisserman. "Very deep convolutional networks for
large-scale image recognition." arXiv preprint arXiv:1409.1556 (2014).
"""
from tvm import relay
from .init import create_workload
from . import layers as wrapper
def get_feature(internal_layer, layers, filters, batch_norm=False):
"""Get VGG feature body as stacks of convolutions."""
for i, num in enumerate(layers):
for j in range(num):
internal_layer = wrapper.conv2d(
data=internal_layer,
kernel_size=(3, 3),
padding=(1, 1),
channels=filters[i],
name="conv%s_%s" % (i + 1, j + 1),
)
internal_layer = relay.nn.bias_add(
internal_layer, relay.var("conv%s_%s_bias" % (i + 1, j + 1))
)
if batch_norm:
internal_layer = wrapper.batch_norm_infer(
data=internal_layer, name="bn%s_%s" % (i + 1, j + 1)
)
internal_layer = relay.nn.relu(data=internal_layer)
internal_layer = relay.nn.max_pool2d(data=internal_layer, pool_size=(2, 2), strides=(2, 2))
return internal_layer
def get_classifier(input_data, num_classes):
"""Get VGG classifier layers as fc layers."""
flatten = relay.nn.batch_flatten(data=input_data)
fc6 = wrapper.dense_add_bias(data=flatten, units=4096, name="fc6")
relu6 = relay.nn.relu(data=fc6)
drop6 = relay.nn.dropout(data=relu6, rate=0.5)
fc7 = wrapper.dense_add_bias(data=drop6, units=4096, name="fc7")
relu7 = relay.nn.relu(data=fc7)
drop7 = relay.nn.dropout(data=relu7, rate=0.5)
fc8 = wrapper.dense_add_bias(data=drop7, units=num_classes, name="fc8")
return fc8
def get_net(batch_size, image_shape, num_classes, dtype, num_layers=11, batch_norm=False):
"""
Parameters
----------
batch_size : int
The batch size used in the model
image_shape : tuple, optional
The input image shape
num_classes : int, optional
Number of claseses
dtype : str, optional
The data type
num_layers : int
Number of layers for the variant of vgg. Options are 11, 13, 16, 19.
batch_norm : bool, default False
Use batch normalization.
"""
vgg_spec = {
11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512]),
}
if num_layers not in vgg_spec:
raise ValueError("Invalide num_layers {}. Choices are 11,13,16,19.".format(num_layers))
layers, filters = vgg_spec[num_layers]
data_shape = (batch_size,) + image_shape
data = relay.var("data", shape=data_shape, dtype=dtype)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
symbol = relay.nn.softmax(data=classifier)
args = relay.analysis.free_vars(symbol)
return relay.Function(args, symbol)
def get_workload(
batch_size,
num_classes=1000,
image_shape=(3, 224, 224),
dtype="float32",
num_layers=11,
batch_norm=False,
):
"""Get benchmark workload for VGG nets.
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
num_layers : int
Number of layers for the variant of vgg. Options are 11, 13, 16, 19.
batch_norm : bool
Use batch normalization.
Returns
-------
mod : tvm.IRModule
The relay module that contains a VGG network.
params : dict of str to NDArray
The parameters.
"""
net = get_net(batch_size, image_shape, num_classes, dtype, num_layers, batch_norm)
return create_workload(net)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/testing/yolo_detection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument, no-init,
"""
Yolo detection boxes helper functions
====================
DarkNet helper functions for yolo and image loading.
This functions will not be loaded by default.
These are utility functions used for testing and tutorial file.
"""
from __future__ import division
import math
from collections import namedtuple
from functools import cmp_to_key
import numpy as np
Box = namedtuple("Box", ["x", "y", "w", "h"])
def nms_comparator(a, b):
if "sort_class" in b and b["sort_class"] >= 0:
diff = a["prob"][b["sort_class"]] - b["prob"][b["sort_class"]]
else:
diff = a["objectness"] - b["objectness"]
return diff
def _correct_boxes(dets, w, h, netw, neth, relative):
new_w, new_h = (netw, (h * netw) // w) if (netw / w < neth / h) else ((w * neth // h), neth)
for det in dets:
b = det["bbox"]
b = b._replace(x=(b.x - (netw - new_w) / 2 / netw) / (new_w / netw))
b = b._replace(y=(b.y - (neth - new_h) / 2 / neth) / (new_h / neth))
b = b._replace(w=b.w * netw / new_w)
b = b._replace(h=b.h * neth / new_h)
if not relative:
b = b._replace(x=b.x * w)
b = b._replace(w=b.w * w)
b = b._replace(y=b.y * h)
b = b._replace(h=b.h * h)
det["bbox"] = b
return dets
def _overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2
l2 = x2 - w2 / 2
left = l1 if l1 > l2 else l2
r1 = x1 + w1 / 2
r2 = x2 + w2 / 2
right = r1 if r1 < r2 else r2
return right - left
def _box_intersection(a, b):
w = _overlap(a.x, a.w, b.x, b.w)
h = _overlap(a.y, a.h, b.y, b.h)
if w < 0 or h < 0:
return 0
return w * h
def _box_union(a, b):
i = _box_intersection(a, b)
u = a.w * a.h + b.w * b.h - i
return u
def _box_iou(a, b):
return _box_intersection(a, b) / _box_union(a, b)
def _get_box(data, biases, n, location, lw, lh, w, h):
bx = (location[2] + data[location[0]][0][location[1]][location[2]]) / lw
by = (location[1] + data[location[0]][1][location[1]][location[2]]) / lh
bw = np.exp(data[location[0]][2][location[1]][location[2]]) * biases[2 * n] / w
bh = np.exp(data[location[0]][3][location[1]][location[2]]) * biases[2 * n + 1] / h
return Box(bx, by, bw, bh)
def _get_yolo_detections(l, im_shape, net_shape, thresh, relative, dets):
data = l["output"]
active_data_loc = np.asarray(np.where(data[:, 4, :, :] > thresh))
before_correct_dets = []
for i in range(active_data_loc.shape[1]):
location = [active_data_loc[0][i], active_data_loc[1][i], active_data_loc[2][i]]
box_b = _get_box(
data,
l["biases"],
np.asarray(l["mask"])[location[0]],
location,
data.shape[3],
data.shape[2],
net_shape[0],
net_shape[1],
)
objectness = data[location[0]][4][location[1]][location[2]]
classes = l["classes"]
prob = objectness * data[location[0], 5 : 5 + 1 + classes, location[1], location[2]]
prob[prob < thresh] = 0
detection = {}
detection["bbox"] = box_b
detection["classes"] = classes
detection["prob"] = prob
detection["objectness"] = objectness
before_correct_dets.append(detection)
dets.extend(
_correct_boxes(
before_correct_dets, im_shape[0], im_shape[1], net_shape[0], net_shape[1], relative
)
)
def _get_region_detections(l, im_shape, net_shape, thresh, relative, dets):
data = l["output"]
before_correct_dets = []
for row in range(data.shape[2]):
for col in range(data.shape[3]):
for n in range(data.shape[0]):
prob = [0] * l["classes"]
scale = data[n, l["coords"], row, col] if not l["background"] else 1
location = [n, row, col]
box_b = _get_box(
data,
l["biases"],
n,
location,
data.shape[3],
data.shape[2],
data.shape[3],
data.shape[2],
)
objectness = scale if scale > thresh else 0
if objectness:
prob = (
scale * data[n, l["coords"] + 1 : l["coords"] + 1 + l["classes"], row, col]
)
prob[prob < thresh] = 0
detection = {}
detection["bbox"] = box_b
detection["prob"] = prob
detection["objectness"] = objectness
before_correct_dets.append(detection)
_correct_boxes(
before_correct_dets, im_shape[0], im_shape[1], net_shape[0], net_shape[1], relative
)
dets.extend(before_correct_dets)
def fill_network_boxes(net_shape, im_shape, thresh, relative, tvm_out):
dets = []
for layer in tvm_out:
if layer["type"] == "Yolo":
_get_yolo_detections(layer, im_shape, net_shape, thresh, relative, dets)
elif layer["type"] == "Region":
_get_region_detections(layer, im_shape, net_shape, thresh, relative, dets)
return dets
def do_nms_sort(dets, classes, thresh):
"Does the sorting based on the threshold values"
k = len(dets) - 1
cnt = 0
while cnt < k:
if dets[cnt]["objectness"] == 0:
dets[k], dets[cnt] = dets[cnt], dets[k]
k = k - 1
else:
cnt = cnt + 1
total = k + 1
for k in range(classes):
for i in range(total):
dets[i]["sort_class"] = k
dets[0:total] = sorted(dets[0:total], key=cmp_to_key(nms_comparator), reverse=True)
for i in range(total):
if dets[i]["prob"][k] == 0:
continue
a = dets[i]["bbox"]
for j in range(i + 1, total):
b = dets[j]["bbox"]
if _box_iou(a, b) > thresh:
dets[j]["prob"][k] = 0
def get_detections(im, det, thresh, names, classes):
"Draw the markings around the detected region"
labelstr = []
category = -1
detection = None
valid = False
for j in range(classes):
if det["prob"][j] > thresh:
if category == -1:
category = j
labelstr.append(names[j] + " " + str(round(det["prob"][j], 4)))
if category > -1:
valid = True
imc, imh, imw = im.shape
width = int(imh * 0.006)
offset = category * 123457 % classes
red = _get_color(2, offset, classes)
green = _get_color(1, offset, classes)
blue = _get_color(0, offset, classes)
rgb = [red, green, blue]
b = det["bbox"]
left = int((b.x - b.w / 2.0) * imw)
right = int((b.x + b.w / 2.0) * imw)
top = int((b.y - b.h / 2.0) * imh)
bot = int((b.y + b.h / 2.0) * imh)
if left < 0:
left = 0
if right > imw - 1:
right = imw - 1
if top < 0:
top = 0
if bot > imh - 1:
bot = imh - 1
detection = {
"category": category,
"labelstr": labelstr,
"left": left,
"top": top,
"right": right,
"bot": bot,
"width": width,
"rgb": rgb,
}
return valid, detection
def draw_detections(font_path, im, dets, thresh, names, classes):
"Draw the markings around the detected region"
for det in dets:
valid, detection = get_detections(im, det, thresh, names, classes)
if valid:
rgb = detection["rgb"]
label = _get_label(font_path, "".join(detection["labelstr"]), rgb)
_draw_box_width(
im,
detection["left"],
detection["top"],
detection["right"],
detection["bot"],
detection["width"],
rgb[0],
rgb[1],
rgb[2],
)
_draw_label(im, detection["top"] + detection["width"], detection["left"], label, rgb)
def show_detections(im, dets, thresh, names, classes):
"Print the markings and the detected region"
for det in dets:
valid, detection = get_detections(im, det, thresh, names, classes)
if valid:
print(
"class:{} left:{} top:{} right:{} bottom:{}".format(
detection["labelstr"],
detection["left"],
detection["top"],
detection["right"],
detection["bot"],
)
)
def _get_pixel(im, x, y, c):
return im[c][y][x]
def _set_pixel(im, x, y, c, val):
if x < 0 or y < 0 or c < 0 or x >= im.shape[2] or y >= im.shape[1] or c >= im.shape[0]:
return
im[c][y][x] = val
def _draw_label(im, r, c, label, rgb):
w = label.shape[2]
h = label.shape[1]
if (r - h) >= 0:
r = r - h
for j in range(h):
if j < h and (j + r) < im.shape[1]:
for i in range(w):
if i < w and (i + c) < im.shape[2]:
for k in range(label.shape[0]):
val = _get_pixel(label, i, j, k)
_set_pixel(im, i + c, j + r, k, val) # rgb[k] * val)
def _get_label(font_path, labelstr, rgb):
# pylint: disable=import-outside-toplevel
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
text = labelstr
colorText = "black"
testDraw = ImageDraw.Draw(Image.new("RGB", (1, 1)))
font = ImageFont.truetype(font_path, 25)
width, height = testDraw.textsize(labelstr, font=font)
img = Image.new(
"RGB", (width, height), color=(int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255))
)
d = ImageDraw.Draw(img)
d.text((0, 0), text, fill=colorText, font=font)
opencvImage = np.divide(np.asarray(img), 255)
return opencvImage.transpose(2, 0, 1)
def _get_color(c, x, max_value):
c = int(c)
colors = [[1, 0, 1], [0, 0, 1], [0, 1, 1], [0, 1, 0], [1, 1, 0], [1, 0, 0]]
ratio = (float(x) / float(max_value)) * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio -= i
r = (1 - ratio) * colors[i][c] + ratio * colors[j][c]
return r
def _draw_box(im, x1, y1, x2, y2, r, g, b):
y1 = int(y1)
y2 = int(y2)
x1 = int(x1)
x2 = int(x2)
ac, ah, aw = im.shape
if x1 < 0:
x1 = 0
if x1 >= aw:
y1 = 0
if y1 >= ah:
y1 = ah - 1
if y2 < 0:
y2 = 0
if y2 >= ah:
y2 = ah - 1
for i in range(x1, x2):
im[0][y1][i] = r
im[0][y2][i] = r
im[1][y1][i] = g
im[1][y2][i] = g
im[2][y1][i] = b
im[2][y2][i] = b
for i in range(y1, y2):
im[0][i][x1] = r
im[0][i][x2] = r
im[1][i][x1] = g
im[1][i][x2] = g
im[2][i][x1] = b
im[2][i][x2] = b
def _draw_box_width(im, x1, y1, x2, y2, w, r, g, b):
for i in range(int(w)):
_draw_box(im, x1 + i, y1 + i, x2 - i, y2 - i, r, g, b)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/transform/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, redefined-builtin, invalid-name
"""The Relay IR namespace containing transformations."""
# transformation passes
from .transform import *
from .recast import recast
from . import fake_quantization_to_integer, mixed_precision
from .flexible_shape import FlexibleShapeDispatch
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/transform/_ffi_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""FFI APIs for Relay transformation passes."""
import tvm._ffi
tvm._ffi._init_api("relay._transform", __name__)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/transform/fake_quantization_to_integer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay functions for rewriting fake quantized ops."""
import numpy as np
import tvm
from tvm import relay
from tvm.ir import TensorAffineType, TupleAffineType
# import to register canonicalization funcs for fq2i
# pylint: disable=unused-import
from tvm.relay.qnn.op import canonicalizations
from tvm.tir import bijective_layout
from ..op import register_fake_quantization_to_integer
def fold_constant(expr):
return relay.transform.FoldConstantExpr(expr, tvm.IRModule())
def get_zeros(scale):
return fold_constant(relay.op.cast(relay.op.zeros_like(scale), "int32"))
def infer_shape(expr):
return relay.transform.InferType()(tvm.IRModule.from_expr(expr))["main"].body.checked_type.shape
def approx_equal(x, y):
x = fold_constant(x)
y = fold_constant(y)
if isinstance(x, relay.Constant) and isinstance(y, relay.Constant):
equal = np.allclose(x.data.asnumpy(), y.data.asnumpy())
else:
equal = tvm.ir.structural_equal(x, y)
return equal
@register_fake_quantization_to_integer("qnn.dequantize")
def dequantize(expr, type_map):
"""Remove dequantize op"""
out = expr.args[0]
t = type_map[expr]
return [out, t]
@register_fake_quantization_to_integer("qnn.quantize")
def quantize(expr, type_map):
"""Turn a quantize op into requantize or remove it"""
out = expr.args[0]
t = type_map[out]
in_scale = fold_constant(t.scale)
in_zero_point = fold_constant(t.zero_point)
if not (
approx_equal(in_scale, expr.args[1])
and approx_equal(in_zero_point, expr.args[2])
and tvm.ir.structural_equal(t.dtype, expr.attrs.out_dtype)
):
out = relay.qnn.op.requantize(
out,
in_scale,
in_zero_point,
expr.args[1],
expr.args[2],
out_dtype=expr.attrs.out_dtype,
axis=t.axis,
)
return [
out,
TensorAffineType(expr.args[1], expr.args[2], expr.attrs.out_dtype, expr.attrs.axis),
]
def register_unary_identity(op_name):
def identity(expr, type_map):
assert len(expr.args) == 1
arg = expr.args[0]
t = type_map[arg]
return [expr, t]
return register_fake_quantization_to_integer(op_name, identity)
register_unary_identity("reshape")
register_unary_identity("squeeze")
register_unary_identity("strided_slice")
register_unary_identity("transpose")
register_unary_identity("expand_dims")
register_unary_identity("nn.max_pool2d")
register_unary_identity("nn.batch_flatten")
register_unary_identity("nn.depth_to_space")
register_unary_identity("max")
register_unary_identity("min")
register_unary_identity("image.resize2d")
@register_fake_quantization_to_integer("nn.adaptive_avg_pool1d")
def adaptive_avgpool1d(expr, type_map):
"""Rewrite an adaptive avgpool op"""
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
if not (
approx_equal(t.scale, out_t.scale)
and approx_equal(t.zero_point, out_t.zero_point)
and tvm.ir.structural_equal(t.dtype, out_t.dtype)
):
arg = relay.qnn.op.requantize(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype="int32",
axis=t.axis,
)
else:
arg = relay.op.cast(arg, "int32")
output_size = expr.attrs.output_size
out = relay.op.nn.adaptive_avg_pool1d(arg, output_size)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, "int32", out_t.axis)]
@register_fake_quantization_to_integer("nn.avg_pool2d")
def avgpool2d(expr, type_map):
"""Rewrite a avgpool op"""
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
# Cast (or requantize) to int32.
if not (
approx_equal(t.scale, out_t.scale)
and approx_equal(t.zero_point, out_t.zero_point)
and tvm.ir.structural_equal(t.dtype, out_t.dtype)
):
arg = relay.qnn.op.requantize(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype="int32",
axis=t.axis,
)
else:
arg = relay.op.cast(arg, "int32")
out = relay.op.nn.avg_pool2d(arg, **expr.attrs)
if out_t.dtype != "int32":
# Cast back to output dtype to preserve input dtype == output dtype for AvgPool2d.
out = relay.op.clip(out, a_min=np.iinfo(out_t.dtype).min, a_max=np.iinfo(out_t.dtype).max)
out = relay.op.cast(out, out_t.dtype)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, out_t.dtype, out_t.axis)]
@register_fake_quantization_to_integer("nn.global_avg_pool2d")
def global_avgpool2d(expr, type_map):
"""Rewrite a global_avgpool op"""
arg = expr.args[0]
t = type_map[arg]
out_t = type_map[expr]
out_t = type_map[expr]
if not (
approx_equal(t.scale, out_t.scale)
and approx_equal(t.zero_point, out_t.zero_point)
and tvm.ir.structural_equal(t.dtype, out_t.dtype)
):
arg = relay.qnn.op.requantize(
arg,
t.scale,
t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype="int32",
axis=t.axis,
)
else:
arg = relay.op.cast(arg, "int32")
out = relay.op.nn.global_avg_pool2d(arg)
return [out, TensorAffineType(out_t.scale, out_t.zero_point, "int32", out_t.axis)]
@register_fake_quantization_to_integer("broadcast_to")
def broadcast_to(expr, type_map):
"""Rewrite a broadcast_to op"""
arg = expr.args[0]
t = type_map[arg]
shape = expr.attrs.shape
out = relay.op.broadcast_to(arg, shape)
return [out, t]
@register_fake_quantization_to_integer("nn.bias_add")
def bias_add(expr, type_map):
"""Rewrite a bias_add op"""
x, b = expr.args
x_t = type_map[x]
if b in type_map:
# Ensure bias matches the previous op
b_t = type_map[b]
in_scale = fold_constant(x_t.scale)
in_zero_point = fold_constant(x_t.zero_point)
if not (
approx_equal(x_t.scale, b_t.scale)
and approx_equal(x_t.zero_point, b_t.zero_point)
and tvm.ir.structural_equal(x_t.dtype, b_t.dtype)
):
b = relay.qnn.op.requantize(
b,
b_t.scale,
b_t.zero_point,
in_scale,
in_zero_point,
out_dtype=x_t.dtype,
axis=0,
)
else:
# If the bias is a constant, we need to quantize it
assert isinstance(b, relay.expr.Constant)
assert b.checked_type.dtype in ["float32", "float64", "float16", "bfloat16"]
b = relay.qnn.op.quantize(b, x_t.scale, x_t.zero_point, axis=0, out_dtype=x_t.dtype)
out = relay.op.nn.bias_add(x, b, **expr.attrs)
return [out, x_t]
@register_fake_quantization_to_integer("nn.conv2d")
def conv2d(expr, type_map):
"""Rewrite a conv2d op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.conv2d_transpose")
def conv2d_transpose(expr, type_map):
"""Rewrite a conv2d_transpose op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
conv_scale = fold_constant(x_t.scale * w_t.scale)
conv_zp = get_zeros(conv_scale)
out = relay.qnn.op.conv2d_transpose(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
out_layout = attrs["out_layout"] if attrs["out_layout"] != "" else attrs["data_layout"]
out_axis = bijective_layout(out_layout, "NCHW").backward_index(list(range(4)))[1]
return [out, TensorAffineType(conv_scale, conv_zp, out.attrs.out_dtype, out_axis.value)]
@register_fake_quantization_to_integer("nn.dense")
def dense(expr, type_map):
"""Rewrite a dense op"""
attrs = {**expr.attrs}
attrs.pop("out_dtype")
x, weight = expr.args
x_t = type_map[x]
w_t = type_map[weight]
dense_scale = fold_constant(x_t.scale * w_t.scale)
dense_zp = get_zeros(dense_scale)
out = relay.qnn.op.dense(
x, weight, x_t.zero_point, w_t.zero_point, x_t.scale, w_t.scale, **attrs
)
return [out, TensorAffineType(dense_scale, dense_zp, out.attrs.out_dtype, 1)]
@register_fake_quantization_to_integer("nn.batch_matmul")
def batch_matmul(expr, type_map):
"""Rewrite a batch_matmul op"""
x, y = expr.args
x_t = type_map[x]
y_t = type_map[y]
matmul_scale = fold_constant(x_t.scale * y_t.scale)
matmul_zp = relay.const(0)
out = relay.qnn.op.batch_matmul(x, y, x_t.zero_point, y_t.zero_point, x_t.scale, y_t.scale)
return [out, TensorAffineType(matmul_scale, matmul_zp, out.attrs.out_dtype, x_t.axis)]
@register_fake_quantization_to_integer("concatenate")
def concat(expr, type_map):
"""Rewrite a concat op"""
scales = []
zps = []
tuple_type = type_map[expr.args[0]]
for t in tuple_type.types:
scales.append(t.scale)
zps.append(t.zero_point)
out_type = type_map[expr]
out = relay.qnn.op.concatenate(
expr.args[0],
relay.Tuple(scales),
relay.Tuple(zps),
out_type.scale,
out_type.zero_point,
**expr.attrs,
)
return [out, out_type]
@register_fake_quantization_to_integer("topk")
def topk(expr, type_map):
"""Rewrite a topk op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
assert "ret_type" in attrs and attrs["ret_type"] == "values"
return [expr, t]
@register_fake_quantization_to_integer("split")
def split(expr, type_map):
"""Rewrite a split op"""
arg = expr.args[0]
t = type_map[arg]
attrs = {**expr.attrs}
if isinstance(attrs["indices_or_sections"], tvm.tir.IntImm):
num_split = attrs["indices_or_sections"].value
attrs["indices_or_sections"] = num_split
else:
num_split = len(attrs["indices_or_sections"]) + 1
return [expr, TupleAffineType([t] * num_split)]
@register_fake_quantization_to_integer("clip")
def clip(expr, type_map):
"""Rewrite a clip op"""
arg = expr.args[0]
t = type_map[arg]
amin = expr.attrs.a_min
amax = expr.attrs.a_max
scale = fold_constant(t.scale)
z_p = fold_constant(t.zero_point)
if (
isinstance(scale, relay.expr.Constant)
and scale.data.numpy().size == 1
and isinstance(z_p, relay.expr.Constant)
and z_p.data.numpy().size == 1
):
scale = scale.data.numpy().item()
z_p = z_p.data.numpy().item()
new_min = int(amin / scale + z_p)
new_max = int(amax / scale + z_p)
out = relay.op.clip(arg, new_min, new_max)
else:
if not isinstance(amin, relay.expr.Constant):
amin = relay.op.const(amin)
if not isinstance(amax, relay.expr.Constant):
amax = relay.op.const(amax)
scale_shape = infer_shape(scale)
if len(scale_shape) > 0 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
amin = relay.op.reshape(relay.op.broadcast_to(amin, scale_shape), b_shape)
amax = relay.op.reshape(relay.op.broadcast_to(amax, scale_shape), b_shape)
amin = relay.qnn.op.quantize(amin, scale, z_p, t.axis, t.dtype)
amax = relay.qnn.op.quantize(amax, scale, z_p, t.axis, t.dtype)
out = relay.op.minimum(relay.op.maximum(arg, fold_constant(amin)), fold_constant(amax))
return [out, t]
@register_fake_quantization_to_integer("nn.relu")
def relu(expr, type_map):
"""Rewrite a relu op"""
arg = expr.args[0]
t = type_map[arg]
scale_shape = infer_shape(t.scale)
z_p = t.zero_point
assert len(scale_shape) <= 1
if len(scale_shape) == 1 and scale_shape[0] > 1:
b_shape = [1] * len(infer_shape(arg))
b_shape[t.axis] = -1
z_p = relay.op.reshape(relay.op.broadcast_to(z_p, scale_shape), b_shape)
zero = relay.op.cast(z_p, t.dtype)
return [relay.op.maximum(arg, fold_constant(zero)), t]
@register_fake_quantization_to_integer("nn.leaky_relu")
def leaky_relu(expr, type_map):
"""Rewrite a leaky relu op"""
arg = expr.args[0]
x_t = type_map[arg]
out_t = type_map[expr]
alpha = expr.attrs.alpha
output = relay.qnn.op.leaky_relu(
expr, alpha, x_t.scale, x_t.zero_point, out_t.scale, out_t.zero_point
)
return [output, x_t]
@register_fake_quantization_to_integer("nn.pad")
def pad(expr, type_map):
"""Rewite an nn.pad op"""
arg = expr.args[0]
t = type_map[arg]
pad_value = expr.args[1]
# TF2ONNX will sometimes implement the pad_value as a constant without a quantize
# To support that, the pass lets branches that terminate in a constant through
if pad_value in type_map:
# if the pad value is calcuated from a dequantize op, it should be in the type map
# and we need to make sure it's affine type matches the arg
pad_t = type_map[pad_value]
if not tvm.ir.structural_equal(t, pad_t):
pad_value = relay.qnn.op.requantize(
pad_value,
pad_t.scale,
pad_t.zero_point,
t.scale,
t.zero_point,
out_dtype=t.dtype,
axis=pad_t.axis,
)
else:
# If the pad-value is a constant, we need to quantize it
assert isinstance(pad_value, relay.expr.Constant)
assert pad_value.checked_type.dtype in ["float32", "float64", "float16", "bfloat16"]
pad_value = relay.qnn.op.quantize(pad_value, t.scale, t.zero_point)
out = relay.op.nn.pad(arg, pad_value=pad_value, **expr.attrs)
return [out, t]
@register_fake_quantization_to_integer("mean")
def mean(expr, type_map):
"""Rewrite a mean op"""
arg = expr.args[0]
t = type_map[arg]
arg = relay.op.cast(arg, "int32")
out = relay.op.mean(arg, **expr.attrs)
out = relay.op.cast(out, t.dtype)
return [out, t]
def get_binary_types(expr, type_map):
"""Get Affine types of a binary op's inputs and unify them"""
# Support the case where one input is quantized and the other is a constant float
left = expr.args[0]
right = expr.args[1]
left_t = None
right_t = None
if left in type_map:
left_t = type_map[left]
if right in type_map:
right_t = type_map[right]
out_t = type_map[expr]
if left_t is None and right_t is None:
raise TypeError("neither input is quantized!")
if left_t is None:
assert isinstance(left, relay.expr.Constant)
left = relay.qnn.op.quantize(
left, right_t.scale, right_t.zero_point, out_dtype=right_t.dtype
)
left_t = right_t
if right_t is None:
assert isinstance(right, relay.expr.Constant)
right = relay.qnn.op.quantize(
right, left_t.scale, left_t.zero_point, out_dtype=left_t.dtype
)
right_t = left_t
# Handle the case of mismatched inputs
if not left_t.dtype == out_t.dtype:
out_t = left_t
return left, right, left_t, right_t, out_t
def register_binary_qnn(op_name, op):
"""Register a Binary Op that converts to QNN"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
out = op(
left,
right,
left_t.scale,
left_t.zero_point,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
left_t.axis,
right_t.axis,
)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
# Use lambdas here to avoid a circular import problem
# pylint: disable=unnecessary-lambda
register_binary_qnn("add", lambda *args: relay.qnn.op.add(*args))
register_binary_qnn("multiply", lambda *args: relay.qnn.op.mul(*args))
register_binary_qnn("subtract", lambda *args: relay.qnn.op.subtract(*args))
def register_binary_identity(op_name, op):
"""Register a binary op that works directly on int8"""
def binary(expr, type_map):
left, right, left_t, right_t, out_t = get_binary_types(expr, type_map)
if left_t != out_t:
left = relay.qnn.op.requantize(
left,
left_t.scale,
left_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=left_t.axis,
)
if right_t != out_t:
right = relay.qnn.op.requantize(
right,
right_t.scale,
right_t.zero_point,
out_t.scale,
out_t.zero_point,
out_dtype=out_t.dtype,
axis=right_t.axis,
)
out = op(left, right)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, binary)
register_binary_identity("minimum", relay.op.minimum)
register_binary_identity("maximum", relay.op.maximum)
def register_unary_qnn(op_name, op):
"""Rewrite a unary op"""
def unary(expr, type_map):
arg = expr.args[0]
x_t = type_map[arg]
out_t = type_map[expr]
out = op(
arg,
x_t.scale,
x_t.zero_point,
out_t.scale,
out_t.zero_point,
)
return [out, out_t]
return register_fake_quantization_to_integer(op_name, unary)
register_unary_qnn("sqrt", relay.qnn.op.sqrt)
register_unary_qnn("rsqrt", relay.qnn.op.rsqrt)
register_unary_qnn("exp", relay.qnn.op.exp)
register_unary_qnn("erf", relay.qnn.op.erf)
register_unary_qnn("sigmoid", relay.qnn.op.sigmoid)
register_unary_qnn("hardswish", relay.qnn.op.hardswish)
register_unary_qnn("tanh", relay.qnn.op.tanh)
register_unary_qnn("abs", relay.qnn.op.abs)
register_unary_qnn("log", relay.qnn.op.log)
| https://github.com/zk-ml/tachikoma |
python/tvm/relay/transform/flexible_shape.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relay functions for wrapping a module with flexible shape dispatch."""
import tvm
from tvm import relay
def override_shape(tensor_type, axis, dim):
"""Change a dimension in a tensor shape."""
# Handle multiple tensors by overriding the shape of each.
if isinstance(tensor_type, relay.TupleType):
tensor_type = tensor_type.fields
else:
tensor_type = [tensor_type]
# Create new tensortypes for each input.
new_types = []
for t_type in tensor_type:
new_dims = list(t_type.shape)
new_dims[axis] = dim
new_types.append(relay.TensorType(new_dims, t_type.dtype))
# Dont return a tuple if there is a single tensor.
if len(new_types) == 1:
return new_types[0]
return relay.TupleType(tvm.runtime.convert(new_types))
def specialize_body(mod, function, axis, dim, input_indices, affects_output=True):
"""
Create a subgraph to handle specific input shapes
This function takes in a module and one of it's functions and creates a
similar function with a specific input shape. It then attaches the new function
to the module. Calling this function multiple times results in a module that
contains several similar functions each specialized to a specific input shape.
This allows a dispatch handler to be built on top of the module to deal with
flexible shapes.
There are a few modes to this function. When the specialized function has multiple
flexible inputs, the index of those inputs must be provided to the input_indices argument.
In this case, the axis of the flexible dimension for each of those inputs must be the same.
By default, this function assumes that the output shape is dependent on the input
shape (as is the case in dynamic batching) and will also specialize the output type
accordingly. If this is not true, the affects_output argument must be set to False.
Parameters
----------
mod: IRModule
The module that contains specialized functions and the dispatcher.
function: Function
The original non-specialized function that will be transformed.
axis: int
Which axis the flexible shape is on.
dim: int
The shape to specialize the new subgraph for along the axis dim.
input_indices: List[int]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
gvar : GlobalVar
The new variable for the specialized subgraph.
spec_types : List[TensorType]
A list of the new specialized types for each input in the graph.
"""
# Iterate through specified inputs and construct specialized shapes for each.
new_params = list(function.params)
data_binding = {}
dyn_data_array = []
for inp in input_indices:
data = function.params[inp]
flex_ty = override_shape(data.type_annotation, axis, dim)
dyn_data = relay.Var(data.name_hint, type_annotation=flex_ty)
new_params[inp] = dyn_data
data_binding[data] = dyn_data
dyn_data_array.append(dyn_data)
# Create a new function body for the modified shapes.
new_body = relay.expr.bind(function.body, data_binding)
# Only change the output shape if the input shape affects it.
if affects_output:
new_ret_ty = override_shape(function.ret_type, axis, dim)
else:
new_ret_ty = function.ret_type
gvar = relay.GlobalVar("main_" + str(dim))
# Add the new function to the main IRModule.
mod[gvar] = relay.Function(
new_params, new_body, new_ret_ty, function.type_params, function.attrs
)
return gvar, [d.type_annotation for d in dyn_data_array]
def flexible_dispatch(
mod, buckets, axis=0, auto_pad=False, pad_value=0, input_indices=None, affects_output=True
):
"""
Enable inference of multiple shaped inputs in one module.
This transformation adds a handler around a module that
checks input shapes and dispatches to a subgraph specialized
to handle the specific shapes of that input. If no exactly matching
subgraph is available, the input will be run using full dynamism.
For best performance, specify all the sizes the module will
be likely to see using the buckets argument.
By default, this function will dispatch shapes that exactly match one
of the buckets to a corresponding subgraph. All non-matching shapes
use the same fully dynamic fallback. This can be detrimental to performance
for those non-matching shapes. Setting auto_pad to True causes this
function to round-up the shape of non-matching inputs to the closest
bucket. This allows them to use the tuned kernels of bucket shapes
which can improve performance.
Functions that have multiple inputs sharing a dynamic axis, which
is common for batch size or sequence length dynamism, are supported
through the input_indices argument.
Many types of dynamism such as batching affect both the input and output
shape, however this is not always the case. If the output shape
is independent of the input, the affects_output argument of this
function must be set to False.
Parameters
----------
buckets: list[int]
The sizes of the input dimension that should be explicitly handled.
Each value in buckets will have a corresponding subgraph constructed to
handle it.
axis: int
The dimension of the input that should be made flexible. This will
most often be used for the batch dimension.
auto_pad: Optional[bool]
If True, then padding will be inserted to values that don't match one of
the provided buckets.
pad_value: Optional[float]
When auto_pad is true, padding will be done with this value.
input_indices: Optional[List[int]]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
mod : IRModule
The new module wrapped with a flexible shape dispatch handler.
"""
main_fn = mod["main"]
# Default to single input if not specified.
if input_indices is None:
input_indices = [0]
# Extract all input data and create a new dynamic variable for each.
data = []
dyn_data = []
for i in input_indices:
data.append(main_fn.params[i])
dyn_shape = override_shape(data[i].type_annotation, axis, relay.Any())
dyn_data.append(relay.Var(data[i].name_hint, type_annotation=dyn_shape))
# Extract the dynamic shape value from one of the inputs.
rt_sh = relay.op.shape_of(dyn_data[0])
flex_value = relay.op.take(rt_sh, relay.const(axis))
if_exprs = []
for i, bucket in enumerate(buckets):
input_data = dyn_data
check_dim = flex_value
# Apply automatic padding if specified.
if auto_pad:
input_data = []
# Construct padding expression for inputs.
for j, inp in enumerate(dyn_data):
pad_width = relay.const(bucket) - flex_value
rank = len(data[j].type_annotation.shape)
pads = relay.zeros([rank, 2], "int32")
pads = relay.scatter_nd(pads, relay.const([axis, 1]), pad_width)
padded_value = relay.nn.pad(inp, pads, pad_value)
# Determine if this is the proper bucket to pad to. Do this by checking if the
# input shape is between this bucket and the previous.
if i == 0:
padded_value = relay.If(
relay.op.less_equal(flex_value, relay.const(bucket)), padded_value, inp
)
else:
padded_value = relay.If(
relay.op.logical_and(
relay.op.less_equal(flex_value, relay.const(bucket)),
relay.op.greater(flex_value, relay.const(buckets[i - 1])),
),
padded_value,
inp,
)
# Update input value and test dimension to reflect possible padding.
input_data.append(padded_value)
# Grab the new possibly padded shape for checking bucket size.
check_dim = relay.op.take(relay.op.shape_of(input_data[0]), relay.const(axis))
# Create a specialized subgraph for the current bucket.
spec_call, spec_ty = specialize_body(
mod, main_fn, axis, bucket, input_indices=input_indices, affects_output=affects_output
)
# Apply hard casting to shape to create statically typed graphs.
spec_data = []
for j, inp in enumerate(input_data):
spec_data.append(relay.op.reshape(inp, spec_ty[j].shape))
# Create a dispatch statement for the current specialized graph.
call_args = list(main_fn.params)
for j, inp in enumerate(input_indices):
call_args[inp] = spec_data[j]
new_call = spec_call(*call_args)
# Remove meaningless padded outputs if applicable.
if auto_pad and affects_output:
new_call = relay.take(
new_call,
relay.arange(start=relay.const(0), stop=flex_value, dtype="int32"),
axis=axis,
)
# Add this new case to the dispatch handler.
if_exprs.append((relay.op.equal(check_dim, relay.const(bucket)), new_call))
# Create a subgraph to handle all other shapes.
default_dyn_call, _ = specialize_body(
mod, main_fn, axis, relay.Any(), input_indices=input_indices, affects_output=affects_output
)
call_args = list(main_fn.params)
for j, inp in enumerate(input_indices):
call_args[inp] = dyn_data[j]
new_body = default_dyn_call(*call_args)
# Create an If chain to dispatch shapes to the appropriate specialized subgraph.
for cond, true_branch in if_exprs:
new_body = relay.If(cond, true_branch, new_body)
# Assign new parameters to the function.
new_params = list(main_fn.params)
for j, inp in enumerate(input_indices):
new_params[inp] = dyn_data[j]
# Update the output shape to be dynamic if needed.
if affects_output:
dyn_ret_type = override_shape(main_fn.ret_type, axis, relay.Any())
else:
dyn_ret_type = main_fn.ret_type
# Assign the handler as the new entrypoint in the module.
new_main = relay.Function(
new_params, new_body, dyn_ret_type, main_fn.type_params, main_fn.attrs
)
mod["main"] = new_main
# Do type inference to make sure everything worked.
mod = relay.transform.InferType()(mod)
return mod
class FlexibleShapeDispatch(object):
"""Enable inference of multiple shaped inputs in one module.
This transformation adds a handler around a module that
checks input shapes and dispatches to a subgraph specialized
to handle the specific shapes of that input. If no exactly matching
subgraph is available, the input will be run using full dynamism.
For best performance, specify all the sizes the module will
be likely to see using the buckets argument.
By default, this pass will dispatch shapes that exactly match one
of the buckets to a corresponding subgraph. All non-matching shapes
use the same fully dynamic fallback. This can be detrimental to performance
for those non-matching shapes. Setting auto_pad to True causes this
pass to round-up the shape of non-matching inputs to the closest
bucket. This allows them to use the tuned kernels of bucket shapes
which can improve performance.
Models that have multiple inputs sharing a dynamic axis, which
is common for batch size or sequence length dynamism, are supported
through the input_indices argument.
Many types of dynamism such as batching affect both the input and output
shape, however this is not always the case. If the output shape
is independent of the input, the affects_output argument of this
pass must be set to False.
Parameters
----------
buckets: list[int]
The sizes of the input dimension that should be explicitly handled.
Each value in buckets will have a corresponding subgraph constructed to
handle it.
axis: int
The dimension of the input that should be made flexible. This will
most often be used for the batch dimension.
auto_pad: Optional[bool]
If True, then padding will be inserted to values that don't match one of
the provided buckets.
pad_value: Optional[float]
When auto_pad is true, padding will be done with this value.
input_indices: Optional[List[int]]
Which inputs should be dispatched dynamically, provided by index. All inputs
must share the same dynamic axis.
affects_output: Optional[bool]
Whether the change in input shape has a corresponding effect on the output shape.
Batching for example effects both the input and output whereas changing sequence
length in an NLP model typically does not.
Returns
-------
ret : FlexibleShapeDispatch
A pass that can be applied to a module to add flexible shape handling.
"""
def __init__(
self,
buckets,
axis=0,
auto_pad=False,
pad_value=0,
input_indices=None,
affects_output=True,
):
self.axis = axis
self.buckets = buckets
self.auto_pad = auto_pad
self.pad_value = pad_value
self.input_indices = input_indices
self.affects_output = affects_output
super(FlexibleShapeDispatch, self).__init__()
def __call__(self, mod):
# Shape information is required for this pass.
mod = relay.transform.InferType()(mod)
return flexible_dispatch(
mod,
self.buckets,
self.axis,
self.auto_pad,
self.pad_value,
self.input_indices,
self.affects_output,
)
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.