file_path
stringlengths 7
180
| content
stringlengths 0
811k
| repo
stringclasses 11
values |
---|---|---|
src/relay/op/dyn/tensor/transform.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/tensor/transform.h
* \brief Transform op attributes that can be shared among Relay and its dialects.
*/
#ifndef TVM_RELAY_OP_DYN_TENSOR_TRANSFORM_H_
#define TVM_RELAY_OP_DYN_TENSOR_TRANSFORM_H_
namespace tvm {
namespace relay {
namespace dyn {} // namespace dyn
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_DYN_TENSOR_TRANSFORM_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/make_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file tvm/relay/op/make_op.h
* \brief Header of internal operator functions
* to assist in creating ops in C++
*/
#ifndef TVM_RELAY_OP_MAKE_OP_H_
#define TVM_RELAY_OP_MAKE_OP_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/tir/index_map.h>
// Include Templated Make Functions
#include "nn/convolution_make.h"
#include "nn/pooling.h"
namespace tvm {
namespace relay {
Expr MakeBroadCastTo(Expr data, Array<Integer> shape);
Expr MakeCast(Expr data, DataType dtype);
Expr MakeClip(Expr a, double a_min, double a_max);
Expr MakeConcatenate(Expr data, int axis);
Expr MakeMatmul(Expr tensor_a, Expr tensor_b, IndexExpr units, DataType out_dtype, bool transpose_a,
bool transpose_b);
Expr MakeDense(Expr data, Expr weight, IndexExpr units, DataType out_dtype);
Expr MakeBatchMatmul(Expr lhs, Expr rhs, DataType out_dtype, bool transpose_a, bool transpose_b);
Expr MakeExpandDims(Expr data, int axis, int num_newaxis);
Expr MakeFixedPointMultiplyPerAxis(Expr x, Expr m, Expr lshift, Expr rshift,
bool is_lshift_required, bool is_rshift_required,
Array<Integer> axis);
Expr MakeFull(Expr fill_value, Array<Integer> shape, DataType dtype);
Expr MakeLayoutTransform(Expr data, String src_layout, String dst_layout);
Expr MakeMetaScheduleLayoutTransform(Expr data, tir::IndexMap index_map);
Expr MakeAutoSchedulerLayoutTransform(Expr data, String src_layout, String dst_layout);
Expr MakeOnes(Array<Integer> shape, DataType dtype);
Expr MakePad(Expr data, Array<Array<Integer>> pad_width, Expr pad_value, String pad_mode);
Expr MakeReduce(Expr data, Array<Integer> axis, bool keepdims, bool exclude, String op_name);
Expr MakeRepeat(Expr data, int repeats, int axis);
Expr MakeReshape(Expr data, Array<Integer> newshape, bool allowzero = false);
Expr MakeReshapeLike(Expr lhs, Expr rhs, int lhs_begin, Integer lhs_end, int rhs_begin,
Integer rhs_end);
Expr MakeSplit(Expr data, ObjectRef indices_or_sections, int axis);
Expr MakeSqueeze(Expr data, Array<Integer> axis);
Expr MakeStack(Expr data, int axis);
Expr MakeTranspose(Expr data, Array<Integer> axes);
Expr MakeStridedSlice(Expr data, Array<Integer> begin, Array<Integer> end, Array<Integer> strides,
String slice_mode,
Optional<Array<Integer>> axes = NullValue<Array<Integer>>());
Expr MakeTile(Expr data, Array<Integer> reps);
Expr MakeTopK(Expr data, int k, int axis, String ret_type, bool is_ascend, DataType dtype);
Expr MakeUpSampling(Expr data, double scale_h, double scale_w, String layout, String method,
bool align_corners);
Expr MakeUpSampling3D(Expr data, double scale_d, double scale_h, double scale_w, String layout,
String method, String coordinate_transformation_mode);
Expr MakeVariance(Expr data, Expr mean, Array<Integer> axis, bool keepdims, bool exclude,
bool unbiased);
Expr MakeZeros(Array<Integer> shape, DataType dtype);
Expr MakeOneHot(Expr indices, Expr on_value, Expr off_value, int depth, int axis, DataType dtype);
Expr MakeResize2D(Expr data, Array<IndexExpr> size, Array<FloatImm> roi, String layout,
String method, String coordinate_transformation_mode, String rounding_method,
double cubic_alpha, int cubic_exclude, double extrapolation_value,
DataType out_dtype);
Expr MakeSparseToDense(Expr indices, Array<Integer> output_shape, Expr values, Expr default_value);
Expr MakeArange(Expr start, Expr stop, Expr step, DataType dtype);
Expr MakeShapeOf(Expr data, DataType dtype);
Expr MakeTake(Expr data, Expr indices, Integer batch_dims, Integer axis, String mode);
Expr MakeBiasAdd(Expr data, Expr bias, int axis);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_MAKE_OP_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/memory/device_copy.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file relay/op/memory/device_copy.h
* \brief Helpers for working with "device_copy" attributes.
*/
#ifndef TVM_RELAY_OP_MEMORY_DEVICE_COPY_H_
#define TVM_RELAY_OP_MEMORY_DEVICE_COPY_H_
#include <tvm/relay/attrs/device_copy.h>
#include <tvm/relay/expr.h>
#include <utility>
#include "../call/call.h"
namespace tvm {
namespace relay {
/*! \brief Returns the "device_copy" operator. */
const Op& DeviceCopyOp();
/*!
* \brief Wraps \p expr in a "device_copy" CallNode indicating it should be evaluated and
* stored at \p src_virtual_device but then copied to \p dst_virtual_device.
*/
Expr DeviceCopy(Expr expr, VirtualDevice src_virtual_device, VirtualDevice dst_virtual_device);
/*!
* \brief Wraps \p expr in a "device_copy" CallNode indicating it should be evaluated and
* stored at \p src_virtual_device but then copied to \p dst_virtual_device.However, return \p expr
* directly if \p src_virtual_device and \p dst_virtual_device are (structurally) the same.
*/
Expr MaybeDeviceCopy(Expr expr, VirtualDevice src_virtual_device, VirtualDevice dst_virtual_device);
/*! \brief Result of \p GetDeviceCopyProps. */
struct DeviceCopyProps {
Expr body; // = null
VirtualDevice src_virtual_device = VirtualDevice::FullyUnconstrained();
VirtualDevice dst_virtual_device = VirtualDevice::FullyUnconstrained();
DeviceCopyProps() = default;
DeviceCopyProps(Expr body, VirtualDevice src_virtual_device, VirtualDevice dst_virtual_device)
: body(std::move(body)),
src_virtual_device(std::move(src_virtual_device)),
dst_virtual_device(std::move(dst_virtual_device)) {}
};
/*!
* \brief Returns the body expression, source, and destination \p VirtualDevices for \p call_node
* if it is a "device_copy" CallNode. Otherwise returns the null expression and unconstrained
* virtual device.
*/
DeviceCopyProps GetDeviceCopyProps(const CallNode* call_node);
/*!
* \brief Returns the body expression, source, and destination \p VirtualDevices for \p expr if it
* is a "device_copy" Call. Otherwise returns the null expression and unconstrained virtual device.
*/
DeviceCopyProps GetDeviceCopyProps(const Expr& expr);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_MEMORY_DEVICE_COPY_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/memory/memory.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/memory/memory.h
* \brief Operators for memory related operations in Relay.
*/
#ifndef TVM_RELAY_OP_MEMORY_MEMORY_H_
#define TVM_RELAY_OP_MEMORY_MEMORY_H_
#include <tvm/target/virtual_device.h>
#include <vector>
#include "tvm/relay/expr.h"
namespace tvm {
namespace relay {
Expr AllocStorage(Expr size, Expr alignment, VirtualDevice virtual_device, DataType dtype_hint);
/*! \brief Returns the "memory.alloc_tensor" operator. */
const Op& MemoryAllocTensorOp();
Expr AllocTensor(Expr storage, Expr offset, tvm::relay::Expr shape, DataType dtype,
Array<IndexExpr> assert_shape);
Expr ToTupleType(const Type& ty, const std::vector<Expr>& exprs);
std::vector<Expr> FromTupleType(const Type& type, const Expr& expr);
std::vector<TensorType> FlattenTupleType(const Type& type);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_MEMORY_MEMORY_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/memory/on_device.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file relay/op/memory/on_device.h
* \brief Helpers for working with the "on_device" 'annotation' call.
*/
#ifndef TVM_RELAY_OP_MEMORY_ON_DEVICE_H_
#define TVM_RELAY_OP_MEMORY_ON_DEVICE_H_
#include <tvm/relay/attrs/on_device.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/function.h>
#include <tvm/runtime/ndarray.h>
#include <utility>
#include <vector>
namespace tvm {
namespace relay {
/*! \brief Returns the "on_device" operator. */
const Op& OnDeviceOp();
/*!
* \brief Wraps \p body in an "on_device" CallNode for \p virtual_device.
*
* See \p OnDeviceAttrs for an overview.
*/
Call OnDevice(Expr body, VirtualDevice virtual_device, bool constrain_result = false,
bool constrain_body = true);
/*! \brief Result of \p GetOnDeviceProps. */
struct OnDeviceProps {
Expr body; // = null
VirtualDevice virtual_device = VirtualDevice::FullyUnconstrained();
bool constrain_result = false;
bool constrain_body = false;
OnDeviceProps() = default;
OnDeviceProps(Expr body, VirtualDevice virtual_device, bool constrain_result, bool constrain_body)
: body(std::move(body)),
virtual_device(std::move(virtual_device)),
constrain_result(constrain_result),
constrain_body(constrain_body) {}
bool is_fixed() const { return constrain_result && constrain_body; }
bool is_normal() const { return !constrain_result && constrain_body; }
};
/*!
* \brief Wraps \p body in an "on_device" CallNode, taking all fields other than \p body from \p
* props.
*/
inline Call OnDeviceWithProps(Expr body, const OnDeviceProps& props) {
return OnDevice(std::move(body), props.virtual_device, props.constrain_result,
props.constrain_body);
}
/*!
* \brief Wraps \p body in an "on_device" CallNode, but don't constrain the body or result to
* any particular virtual device. This allows a "device_copy" to be inserted by PlanDevices
* where required, while at the same time not introducing unnecessary freedom in the device
* choices.
*/
inline Call OnDeviceCopyOk(Expr body) {
return OnDevice(std::move(body), VirtualDevice::FullyUnconstrained(),
/*constrain_result=*/false, /*constrain_body=*/false);
}
/*!
* \brief Wraps \p expr in an "on_device" CallNode for \p virtual_device and \p constraint if the
* \p VirtualDevice for \p expr cannot otherwise be recovered by the lexical scoping convention.
* This means we will NOT wrap if:
* - \p virtual_device is full unconstrained, which signals there are no device annotations
* already in play.
* - \p expr is an operator or primitive function literal. These are device polymorphic.
* - \p expr is a non-primitive function literal. The device is captured by the
* "result_virtual_device" attribute on the function itself.
* - \p expr is a global var. The device is on the function attributes the global is bound to.
* - \p expr is a local var. The device is tracked by the device aware visitors for us.
* - \p expr is a constructor. These are device polymorphic.
* Nested on_device calls will never be constructed, they are instead merged on-the-fly.
*/
Expr MaybeOnDevice(Expr body, VirtualDevice virtual_device, bool constrain_result = false,
bool constrain_body = true);
/*! \brief As for MaybeOnDevice, but with both body and result constrained. */
inline Expr MaybeOnDeviceFixed(Expr body, VirtualDevice virtual_device) {
return MaybeOnDevice(std::move(body), std::move(virtual_device), /*constrain_result=*/true,
/*constrain_body=*/true);
}
/*! \brief As for MaybeOnDevice, but with fields other than body taken from \p props. */
inline Expr MaybeOnDeviceWithProps(Expr body, const OnDeviceProps& props) {
return MaybeOnDevice(std::move(body), props.virtual_device, props.constrain_result,
props.constrain_body);
}
/*!
* \brief Returns the body expression, \p VirtualDevice, and constraint field for \p call_node if it
* is an "on_device" CallNode. Otherwise returns the null expression, the unconstrained
* \p VirtualDevice, and \p kBody.
*/
OnDeviceProps GetOnDeviceProps(const CallNode* call_node);
/*!
* \brief Returns the body expression, \p VirtualDevice, and constraint field for \p expr if it is
* an "on_device" CallNode. Otherwise returns the null expression, the unconstrained \p
* VirtualDevice, and \p kBody.
*/
OnDeviceProps GetOnDeviceProps(const Expr& expr);
/*!
* \brief Returns the body of \p expr if it is an "on_device" annotation, otherwise returns
* \p expr directly.
*/
inline Expr IgnoreOnDevice(const Expr& expr) {
OnDeviceProps props = GetOnDeviceProps(expr);
return props.body.defined() ? props.body : expr;
}
/*!
* \brief Returns \p expr as \p NodeType, or null if it is not of that type. Looks through
* any "on_device" annotations.
*/
template <typename NodeType>
const NodeType* AsIgnoringOnDevice(const Expr& expr) {
const auto* node = expr.as<NodeType>();
if (node != nullptr) {
return node;
}
OnDeviceProps props = GetOnDeviceProps(expr);
if (!props.body.defined()) {
return nullptr;
}
return props.body.as<NodeType>();
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_MEMORY_ON_DEVICE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/nn/convolution.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/nn/convolution.h
* \brief Properties def of convlution operator for sharing.
*/
#ifndef TVM_RELAY_OP_NN_CONVOLUTION_H_
#define TVM_RELAY_OP_NN_CONVOLUTION_H_
#include <tvm/auto_scheduler/compute_dag.h>
#include <tvm/runtime/logging.h>
#include <tvm/tir/analysis.h>
#include <string>
#include <utility>
#include <vector>
#include "../op_common.h"
namespace tvm {
namespace relay {
bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
template <typename AttrType>
bool Conv2DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
ICHECK_EQ(types.size(), 3);
const auto* data = types[0].as<TensorTypeNode>();
if (data == nullptr) return false;
static const Layout kNCHW("NCHW");
static const Layout kOIHW("OIHW");
const AttrType* param = attrs.as<AttrType>();
ICHECK(param != nullptr);
const Layout in_layout(param->data_layout);
const Layout kernel_layout(param->kernel_layout);
const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW);
ICHECK(trans_in_layout.defined())
<< "Conv only support input layouts that are convertible from NCHW."
<< " But got " << in_layout;
const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW);
ICHECK(trans_kernel_layout.defined())
<< "Conv only support kernel layouts that are convertible from OIHW."
<< " But got " << kernel_layout;
Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW);
ICHECK(trans_out_layout.defined())
<< "Conv only support output layouts that are convertible from NCHW."
<< " But got " << out_layout;
Array<IndexExpr> dshape_nchw = trans_in_layout.ForwardShape(data->shape);
IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
ICHECK(param->kernel_size.defined() && param->channels.defined())
<< "The kernel size and channels of a Conv must be set or inferred by previous pass";
ICHECK_EQ(param->kernel_size.size(), 2);
ICHECK_EQ(param->dilation.size(), 2);
channels = param->channels;
dilated_ksize_y = 1 + (param->kernel_size[0] - 1) * param->dilation[0];
dilated_ksize_x = 1 + (param->kernel_size[1] - 1) * param->dilation[1];
// NOTE: Do not check weight shape here!
// Different backend requires different layout to compute
// the batch gemm stage in winograd efficiently, but we want to
// make this op work for all backends.
// So we accept all weight shapes, and assume the TOPI developers
// can handle this correctly in alter_op_layout.
// dilation
Array<IndexExpr> oshape({dshape_nchw[0], channels, 0, 0});
IndexExpr pad_h, pad_w;
GetPaddingHeightWidth(param->padding, &pad_h, &pad_w);
if (!dshape_nchw[2].as<tir::AnyNode>()) {
oshape.Set(2, (dshape_nchw[2] + pad_h - dilated_ksize_y) / param->strides[0] + 1);
} else {
oshape.Set(2, dshape_nchw[2]);
}
if (!dshape_nchw[3].as<tir::AnyNode>()) {
oshape.Set(3, (dshape_nchw[3] + pad_w - dilated_ksize_x) / param->strides[1] + 1);
} else {
oshape.Set(3, dshape_nchw[3]);
}
DataType out_dtype = param->out_dtype;
if (out_dtype.bits() == 0) {
out_dtype = data->dtype;
}
oshape = trans_out_layout.BackwardShape(oshape);
// assign output type
reporter->Assign(types[2], TensorType(oshape, out_dtype));
return true;
}
template <typename T>
InferCorrectLayoutOutput ConvInferCorrectLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
const T* params = attrs.as<T>();
// We always make other operators to fit the layouts of convolution layers
// So this inference ignores all inputs
return InferCorrectLayoutOutput(
{params->data_layout, params->kernel_layout},
{params->out_layout == "" ? params->data_layout : params->out_layout}, attrs);
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_NN_CONVOLUTION_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/nn/convolution_make.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/nn/convolution_make.h
* \brief utilities for creating convolution ops
*/
#ifndef TVM_RELAY_OP_NN_CONVOLUTION_MAKE_H_
#define TVM_RELAY_OP_NN_CONVOLUTION_MAKE_H_
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <string>
#include <utility>
#include <vector>
namespace tvm {
namespace relay {
template <typename T>
inline Expr MakeConv(Expr data, Expr weight, Array<IndexExpr> strides, Array<IndexExpr> padding,
Array<IndexExpr> dilation, int groups, IndexExpr channels,
Array<IndexExpr> kernel_size, std::string data_layout,
std::string kernel_layout, std::string out_layout, DataType out_dtype,
std::string op_name) {
auto attrs = make_object<T>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilation = std::move(dilation);
attrs->groups = groups;
attrs->channels = std::move(channels);
attrs->kernel_size = std::move(kernel_size);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_layout = std::move(out_layout);
attrs->out_dtype = std::move(out_dtype);
const Op& op = Op::Get(op_name);
return Call(op, {data, weight}, Attrs(attrs), {});
}
template <typename T>
inline Expr MakeConvWinograd(Expr data, Expr weight, int tile_size, Array<IndexExpr> strides,
Array<IndexExpr> padding, Array<IndexExpr> dilation, int groups,
IndexExpr channels, Array<IndexExpr> kernel_size,
std::string data_layout, std::string kernel_layout,
std::string out_layout, DataType out_dtype, std::string op_name) {
auto attrs = make_object<T>();
attrs->tile_size = tile_size;
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilation = std::move(dilation);
attrs->groups = groups;
attrs->channels = std::move(channels);
attrs->kernel_size = std::move(kernel_size);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_layout = std::move(out_layout);
attrs->out_dtype = std::move(out_dtype);
const Op& op = Op::Get(op_name);
return Call(op, {data, weight}, Attrs(attrs), {});
}
template <typename T>
inline Expr MakeConvGemm(Expr data, Expr weight, Array<IndexExpr> strides, Array<IndexExpr> padding,
Array<IndexExpr> dilation, int groups, IndexExpr channels,
Array<IndexExpr> kernel_size, std::string data_layout,
std::string kernel_layout, std::string out_layout, DataType out_dtype,
std::string op_name) {
auto attrs = make_object<T>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilation = std::move(dilation);
attrs->groups = groups;
attrs->channels = std::move(channels);
attrs->kernel_size = std::move(kernel_size);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_layout = std::move(out_layout);
attrs->out_dtype = std::move(out_dtype);
const Op& op = Op::Get(op_name);
return Call(op, {data, weight}, Attrs(attrs), {});
}
template <typename T>
inline Expr MakeConvTranspose(Expr data, Expr weight, Array<IndexExpr> strides,
Array<IndexExpr> padding, Array<IndexExpr> dilation, int groups,
IndexExpr channels, Array<IndexExpr> kernel_size,
std::string data_layout, std::string kernel_layout,
std::string out_layout, Array<IndexExpr> output_padding,
DataType out_dtype, std::string op_name) {
auto attrs = make_object<T>();
attrs->strides = std::move(strides);
attrs->padding = std::move(padding);
attrs->dilation = std::move(dilation);
attrs->groups = groups;
attrs->channels = std::move(channels);
attrs->kernel_size = std::move(kernel_size);
attrs->data_layout = std::move(data_layout);
attrs->kernel_layout = std::move(kernel_layout);
attrs->out_layout = std::move(out_layout);
attrs->output_padding = std::move(output_padding);
attrs->out_dtype = std::move(out_dtype);
const Op& op = Op::Get(op_name);
return Call(op, {data, weight}, Attrs(attrs), {});
}
template <typename T>
inline Expr MakeDeformableConv(Expr data, Expr offset, Expr weight, Array<IndexExpr> strides,
Array<IndexExpr> padding, Array<IndexExpr> dilation,
int deformable_groups, int groups, int channels,
Array<IndexExpr> kernel_size, std::string data_layout,
std::string kernel_layout, std::string out_layout,
DataType out_dtype, std::string op_name) {
auto attrs = make_object<T>();
attrs->strides = strides;
attrs->padding = padding;
attrs->dilation = dilation;
attrs->deformable_groups = deformable_groups;
attrs->groups = groups;
attrs->channels = channels;
attrs->kernel_size = kernel_size;
attrs->data_layout = data_layout;
attrs->kernel_layout = kernel_layout;
attrs->out_layout = out_layout;
attrs->out_dtype = out_dtype;
const Op& op = Op::Get(op_name);
return Call(op, {data, offset, weight}, Attrs{attrs}, {});
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_NN_CONVOLUTION_MAKE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/nn/nn.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/nn/nn.h
* \brief Properties def of nn operators for sharing.
*/
#ifndef TVM_RELAY_OP_NN_NN_H_
#define TVM_RELAY_OP_NN_NN_H_
#include <tvm/auto_scheduler/compute_dag.h>
#include <tvm/ir/attrs.h>
#include <tvm/ir/expr.h>
#include <tvm/relay/type.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "../op_common.h"
namespace tvm {
namespace relay {
template <typename AttrType>
bool MatmulRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
ICHECK_EQ(types.size(), 3);
const auto* tensor_a = types[0].as<TensorTypeNode>();
const auto* tensor_b = types[1].as<TensorTypeNode>();
if (tensor_a == nullptr) return false;
ICHECK(static_cast<int>(tensor_a->shape.size()) != 0);
const AttrType* param = attrs.as<AttrType>();
ICHECK(param != nullptr);
TensorType meta_schedule_tensor_b{nullptr};
if (param->meta_schedule_original_shape.size() > 0) {
meta_schedule_tensor_b = TensorType(param->meta_schedule_original_shape,
tensor_b == nullptr ? tensor_a->dtype : tensor_b->dtype);
tensor_b = meta_schedule_tensor_b.get();
}
// Default set to dense layout
bool transpose_a = false;
bool transpose_b = true;
const auto& mattrs = attrs.as<MatmulAttrs>();
if (mattrs != nullptr) {
transpose_a = mattrs->transpose_a;
transpose_b = mattrs->transpose_b;
}
const Array<tvm::PrimExpr>& dshape = tensor_a->shape;
Array<tvm::PrimExpr> oshape = dshape;
tvm::PrimExpr reduce = dshape[dshape.size() - 1];
if (transpose_a) {
reduce = dshape[dshape.size() - 2];
oshape.Set((oshape.size() - 2), dshape[oshape.size() - 1]);
}
auto tensor_b_dtype = (tensor_b == nullptr ? tensor_a->dtype : tensor_b->dtype);
if (param->units.defined()) {
// validate the tensor_b shape is proper if defined
// Assign tensor_b type
const Array<IndexExpr>& wshape = transpose_b ? Array<IndexExpr>({param->units, reduce})
: Array<IndexExpr>({reduce, param->units});
// It is possible for tensor_b to be nullptr in which case we will use
// data dtype as the tensor_b dtype. However if tensor_b dtype is explicitly
// present we will use that.
if (param->auto_scheduler_rewritten_layout.size() != 0) {
// If the layout is rewritten by auto-scheduler or meta-schedule,
// we just forcefully apply the layout provided by auto-scheduler and
// skip the normal inference logic.
{} // do nothing
} else if (param->meta_schedule_original_shape.size() == 0) {
// Normal case: assign result to reporter
reporter->Assign(types[1], TensorType(wshape, tensor_b_dtype));
}
oshape.Set((oshape.size() - 1), param->units);
} else {
if (tensor_b == nullptr) return false;
const Array<tvm::PrimExpr>& wshape = tensor_b->shape;
// When tensor_b's layout has been rewritten, figure it out based on the
// total number of elements and input dimensions.
if (param->auto_scheduler_rewritten_layout.size() != 0) {
PrimExpr tensor_b_elements = 1;
for (size_t i = 0; i < wshape.size(); i++) {
tensor_b_elements = tensor_b_elements * wshape[i];
}
oshape.Set(oshape.size() - 1, tensor_b_elements / dshape[dshape.size() - 1]);
// Otherwise just pull it out of the tensor_b shape directly.
} else {
if (param->auto_scheduler_rewritten_layout.size() == 0 &&
param->meta_schedule_original_shape.size() == 0) {
// ensure inner dimension matches between data and weight. If one inner
// dimension is dynamic then it is inferred to match the other inner
// dimension.
std::vector<PrimExpr> A_shape(tensor_a->shape.begin(), tensor_a->shape.end());
std::vector<PrimExpr> B_shape(tensor_b->shape.begin(), tensor_b->shape.end());
auto sa = A_shape.size();
auto sb = B_shape.size();
if (transpose_a && transpose_b) {
auto tmp = A_shape[sa - 2];
A_shape[sa - 2] = B_shape[sb - 1];
B_shape[sb - 1] = tmp;
} else if (transpose_a) {
auto tmp = A_shape[sa - 2];
A_shape[sa - 2] = B_shape[sb - 2];
B_shape[sb - 2] = tmp;
} else if (transpose_b) {
auto tmp = A_shape[sa - 1];
A_shape[sa - 1] = B_shape[sb - 1];
B_shape[sb - 1] = tmp;
} else {
auto tmp = A_shape[sa - 1];
A_shape[sa - 1] = B_shape[sb - 2];
B_shape[sb - 2] = tmp;
}
reporter->Assign(types[0], TensorType(A_shape, tensor_a->dtype));
reporter->Assign(types[1], TensorType(B_shape, tensor_b_dtype));
}
oshape.Set(oshape.size() - 1, transpose_b ? wshape[0] : wshape[1]);
}
}
DataType out_dtype = param->out_dtype;
if (out_dtype.bits() == 0) {
out_dtype = tensor_a->dtype;
}
// assign output type
reporter->Assign(types[2], TensorType(oshape, out_dtype));
return true;
}
template <typename AttrType>
bool BatchMatmulRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
ICHECK_EQ(types.size(), 3);
const auto* x = types[0].as<TensorTypeNode>();
const auto* y = types[1].as<TensorTypeNode>();
if (x == nullptr || y == nullptr) return false;
const AttrType* param = attrs.as<AttrType>();
DataType out_dtype = param->out_dtype;
if (out_dtype.bits() == 0) {
out_dtype = x->dtype;
if (x->dtype.bits() == 0) {
out_dtype = y->dtype;
}
}
TensorType meta_schedule_y{nullptr};
if (param->meta_schedule_original_shape.size() != 0) {
meta_schedule_y = TensorType(param->meta_schedule_original_shape, out_dtype);
y = meta_schedule_y.get();
}
ICHECK(param != nullptr);
bool transpose_a = param->transpose_a;
bool transpose_b = param->transpose_b;
Array<PrimExpr> y_shape{nullptr};
if (param->auto_scheduler_rewritten_layout.size() != 0) {
y_shape = auto_scheduler::GetShapeFromRewrittenLayout(
param->auto_scheduler_rewritten_layout,
transpose_b ? tvm::runtime::Array<tvm::runtime::String>({"b", "j", "k"})
: tvm::runtime::Array<tvm::runtime::String>({"b", "k", "j"}));
} else if (param->meta_schedule_original_shape.size() != 0) {
y_shape = param->meta_schedule_original_shape;
} else {
y_shape = y->shape;
}
ICHECK(x->shape.size() == 3 && y_shape.size() == 3);
const PrimExpr& xb = x->shape[0];
const PrimExpr& xi = x->shape[transpose_a ? 2 : 1];
const PrimExpr& xk = x->shape[transpose_a ? 1 : 2];
const PrimExpr& yb = y_shape[0];
const PrimExpr& yk = y_shape[transpose_b ? 2 : 1];
const PrimExpr& yj = y_shape[transpose_b ? 1 : 2];
bool is_dyn = false;
for (size_t i = 0; i < 3; ++i) {
if (x->shape[i].as<tir::AnyNode>() != nullptr || y_shape[i].as<tir::AnyNode>() != nullptr) {
is_dyn = true;
break;
}
}
if (!is_dyn) {
ICHECK(reporter->AssertEQ(xb, yb) || reporter->AssertEQ(xb, 1) || reporter->AssertEQ(yb, 1))
<< "BatchDot: batch dimensions don't match, "
<< " x shape=" << x->shape << ", y shape=" << y_shape;
ICHECK(reporter->AssertEQ(xk, yk)) << "BatchDot: shapes of x and y is inconsistent, "
<< " x shape=" << x->shape << ", y shape=" << y_shape;
}
// assign output type
const auto& out_b =
xb->IsInstance<tir::AnyNode>() || yb->IsInstance<tir::AnyNode>() ? tir::Any() : max(xb, yb);
reporter->Assign(types[2], TensorType(Array<tvm::PrimExpr>({out_b, xi, yj}), out_dtype));
return true;
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_NN_NN_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/nn/pooling.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/nn/convolution.h
* \brief Properties def of convlution operator for sharing.
*/
#ifndef TVM_RELAY_OP_NN_POOLING_H_
#define TVM_RELAY_OP_NN_POOLING_H_
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/op.h>
#include <utility>
namespace tvm {
namespace relay {
template <typename T>
inline Expr MakeMaxPool(Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> strides,
Array<IndexExpr> dilation, Array<IndexExpr> padding, String layout,
String out_layout, bool ceil_mode, String op_name) {
auto attrs = make_object<T>();
attrs->pool_size = std::move(pool_size);
attrs->strides = std::move(strides);
attrs->dilation = std::move(dilation);
attrs->padding = std::move(padding);
attrs->layout = std::move(layout);
attrs->out_layout = std::move(out_layout);
attrs->ceil_mode = ceil_mode;
static const Op& op = Op::Get(op_name);
return Call(op, {data}, Attrs(attrs), {});
}
template <typename T>
inline Expr MakeAvgPool(Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> strides,
Array<IndexExpr> dilation, Array<IndexExpr> padding, String layout,
String out_layout, bool ceil_mode, bool count_include_pad, String op_name) {
auto attrs = make_object<T>();
attrs->pool_size = std::move(pool_size);
attrs->strides = std::move(strides);
attrs->dilation = std::move(dilation);
attrs->padding = std::move(padding);
attrs->layout = std::move(layout);
attrs->out_layout = std::move(out_layout);
attrs->ceil_mode = ceil_mode;
attrs->count_include_pad = count_include_pad;
static const Op& op = Op::Get(op_name);
return Call(op, {data}, Attrs(attrs), {});
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_NN_POOLING_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/nn/upsampling.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file src/relay/op/nn/upsampling.h
* \brief implementation of the InferCorrectLayout pass for upsampling
*/
#ifndef TVM_RELAY_OP_NN_UPSAMPLING_H_
#define TVM_RELAY_OP_NN_UPSAMPLING_H_
#include <tvm/relay/attrs/nn.h>
#include <tvm/tir/data_layout.h>
#include "../op_common.h"
namespace tvm {
namespace relay {
template <typename T>
InferCorrectLayoutOutput UpsamplingInferCorrectLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
const auto* attrs_ptr = attrs.as<T>();
ICHECK(attrs_ptr);
ObjectPtr<T> params = make_object<T>(*attrs_ptr);
if (new_in_layouts.defined()) {
ICHECK_EQ(new_in_layouts.size(), 1);
Layout raw_layout(params->layout);
Layout input = new_in_layouts[0];
if (input.IndexOf(LayoutAxis::Get('W')) == raw_layout.IndexOf(LayoutAxis::Get('W')) &&
input.IndexOf(LayoutAxis::Get('H')) == raw_layout.IndexOf(LayoutAxis::Get('H')) &&
!input.Contains(LayoutAxis::Get('w')) && !input.Contains(LayoutAxis::Get('h')) &&
(input.IndexOf(LayoutAxis::Get('D')) == -1 ||
(input.IndexOf(LayoutAxis::Get('D')) == raw_layout.IndexOf(LayoutAxis::Get('D')) &&
!input.Contains(LayoutAxis::Get('d'))))) {
params->layout = input.name(); // modify self to follow the input layout
}
}
return InferCorrectLayoutOutput({params->layout}, {params->layout}, Attrs(params));
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_NN_UPSAMPLING_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/op_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file op_common.h
* \brief A set of utilities and common functionality
* for relay ops.
*/
#ifndef TVM_RELAY_OP_OP_COMMON_H_
#define TVM_RELAY_OP_OP_COMMON_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <string>
#include <unordered_map>
#include <vector>
#include "../transforms/infer_layout_utils.h"
#include "type_relations.h"
namespace tvm {
namespace relay {
/*! Quick helper macro
* - Expose a positional make function to construct the node.
* - Register op to the registry.
*
* We make the decision to always only expose positional argument.
* We will do rewrapping in the frontend to support language
* sugars such as keyword arguments and default value.
* \param OpName the name of registry.
*/
#define RELAY_REGISTER_UNARY_OP(OpName) \
TVM_REGISTER_GLOBAL("relay.op._make." OpName).set_body_typed([](Expr data) { \
static const Op& op = Op::Get(OpName); \
return Call(op, {data}, Attrs(), {}); \
}); \
RELAY_REGISTER_OP(OpName) \
.set_num_inputs(1) \
.add_argument("data", "Tensor", "The input tensor.") \
.add_type_rel("Identity", IdentityRel) \
.set_attr<TOpPattern>("TOpPattern", kElemWise) \
.set_attr<TOpIsStateful>("TOpIsStateful", false) \
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
/*! Quick helper macro
* - Expose a positional make function to construct the node.
* - Register op to the registry.
*
* We make the decision to always only expose positional argument.
* We will do rewrapping in the frontend to support language
* sugars such as keyword arguments and default value.
*
* \param OpName the name of registry.
*/
#define RELAY_REGISTER_BINARY_OP(OpName) \
TVM_REGISTER_GLOBAL("relay.op._make." OpName).set_body_typed([](Expr lhs, Expr rhs) { \
static const Op& op = Op::Get(OpName); \
return Call(op, {lhs, rhs}, Attrs(), {}); \
}); \
RELAY_REGISTER_OP(OpName) \
.set_num_inputs(2) \
.add_argument("lhs", "Tensor", "The left hand side tensor.") \
.add_argument("rhs", "Tensor", "The right hand side tensor.") \
.add_type_rel("Broadcast", BroadcastRel) \
.set_attr<TOpPattern>("TOpPattern", kBroadcast) \
.set_attr<TOpIsStateful>("TOpIsStateful", false) \
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", BinaryBroadcastLayout)
// Comparisons
#define RELAY_REGISTER_CMP_OP(OpName) \
TVM_REGISTER_GLOBAL("relay.op._make." OpName).set_body_typed([](Expr lhs, Expr rhs) { \
static const Op& op = Op::Get(OpName); \
return Call(op, {lhs, rhs}, Attrs(), {}); \
}); \
RELAY_REGISTER_OP(OpName) \
.set_num_inputs(2) \
.add_argument("lhs", "Tensor", "The left hand side tensor.") \
.add_argument("rhs", "Tensor", "The right hand side tensor.") \
.add_type_rel("BroadcastComp", BroadcastCompRel) \
.set_attr<TOpPattern>("TOpPattern", kBroadcast) \
.set_attr<TOpIsStateful>("TOpIsStateful", false) \
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", BinaryBroadcastLayout)
/*! \brief A helper class for matching and rewriting operators. */
template <typename R>
class OpMatch {
public:
using MatchFunc =
std::function<R(const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_args)>;
/*! \brief Match an operator with the given name.
* \param op_name The name of the operator to match.
* \param func The function to execute when it matches.
* \return A self-reference for builder style API.
*/
inline OpMatch& Match(const std::string& op_name, MatchFunc func) {
auto op = Op::Get(op_name);
match_map_.insert({op, func});
return *this;
}
/*! \brief Rewrite a call operation based on the operator and the registered
* match functions.
* \param call The call to rewrite.
* \return The result of rewriting.
*/
inline R operator()(const Call& call) {
auto it = match_map_.find(Downcast<Op>(call->op));
if (it != match_map_.end()) {
return it->second(call->args, call->attrs, call->type_args);
} else {
if (default_ != nullptr) {
return default_(call->args, call->attrs, call->type_args);
} else {
LOG(FATAL) << "unexpected operation " << call->op;
}
}
}
private:
/*! \brief The match function map. */
std::unordered_map<Op, MatchFunc, ObjectPtrHash, ObjectPtrEqual> match_map_;
/*! \brief An optional default case. */
MatchFunc default_;
};
/*! \brief A utility function to get padding width from a 1 or 2 ints tuple. */
inline void GetPaddingWidth(const Array<IndexExpr>& padding, IndexExpr* pad_w) {
if (padding.size() == 1) {
*pad_w = padding[0] * 2;
} else if (padding.size() == 2) {
*pad_w = padding[0] + padding[1];
} else {
ICHECK_EQ(padding.size(), 4) << " Expected padding size of 1 or 2, found " << padding.size();
}
}
/*! \brief A utility function to get padding height and width from a 1, 2, 4 ints tuple. */
inline void GetPaddingHeightWidth(const Array<IndexExpr>& padding, IndexExpr* pad_h,
IndexExpr* pad_w) {
if (padding.size() == 1) {
*pad_h = padding[0] * 2;
*pad_w = padding[0] * 2;
} else if (padding.size() == 2) {
*pad_h = padding[0] * 2;
*pad_w = padding[1] * 2;
} else if (padding.size() == 4) {
*pad_h = padding[0] + padding[2];
*pad_w = padding[1] + padding[3];
} else {
ICHECK_EQ(padding.size(), 4) << " Padding size should be 1, 2 or 4, but got " << padding.size();
}
}
/*! \brief A utility function to get padding depth, height and width from a 1, 3, 6 ints tuple. */
inline void GetPaddingDepthHeightWidth(const Array<IndexExpr>& padding, IndexExpr* pad_d,
IndexExpr* pad_h, IndexExpr* pad_w) {
if (padding.size() == 1) {
*pad_d = padding[0] * 2;
*pad_h = padding[0] * 2;
*pad_w = padding[0] * 2;
} else if (padding.size() == 3) {
*pad_d = padding[0] * 2;
*pad_h = padding[1] * 2;
*pad_w = padding[2] * 2;
} else if (padding.size() == 6) {
*pad_d = padding[0] + padding[3];
*pad_h = padding[1] + padding[4];
*pad_w = padding[2] + padding[5];
} else {
ICHECK_EQ(padding.size(), 6) << " Padding size should be 1, 3 or 6, but got " << padding.size();
}
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_OP_COMMON_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/tensor/transform.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/tensor/transform.h
* \brief Transform op attributes that can be shared among Relay and its dialects.
*/
#ifndef TVM_RELAY_OP_TENSOR_TRANSFORM_H_
#define TVM_RELAY_OP_TENSOR_TRANSFORM_H_
#include <tvm/ir/error.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/op_attr_types.h>
#include <algorithm>
#include <limits>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "../../transforms/infer_layout_utils.h"
#include "../make_op.h"
namespace tvm {
namespace relay {
template <typename AttrType>
bool ConcatenateRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
// types: [data, result]
ICHECK_EQ(types.size(), 2) << "the arity of concatenate is 2, not " << types.size();
/* If we receive a tuple we can continue, if we receive
* anything but an incomplete type we should signal an
* error.
*/
const auto* tensor_tuple = types[0].as<TupleTypeNode>();
if (tensor_tuple == nullptr) {
reporter->GetDiagCtx().EmitFatal(
Diagnostic::Error(reporter->GetSpan())
<< "concatenate requires a tuple of tensors as the first argument, found "
<< PrettyPrint(types[0]));
return false;
} else if (types[0].as<IncompleteTypeNode>() != nullptr) {
return false;
}
const auto* param = attrs.as<AttrType>();
if (param == nullptr) {
reporter->GetDiagCtx().EmitFatal(Diagnostic::Error(reporter->GetSpan())
<< "the call attributes are not defined");
return false;
}
if (tensor_tuple->fields[0].as<IncompleteTypeNode>()) {
return false;
}
const auto& first = Downcast<TensorType>(tensor_tuple->fields[0]);
// Sanity check: ndim and dtype.
const int ndim = static_cast<int>(first->shape.size());
const DataType dtype = first->dtype;
// Sanity check: axis
int axis = param->axis;
if (!(-ndim <= axis && axis < ndim)) {
throw CompileError(ErrorBuilder() << "concatenate only accepts `axis` in [-ndim, ndim)"
<< ", but got axis = " << axis << ", and ndim = " << ndim);
}
axis = axis < 0 ? ndim + axis : axis;
for (const Type& ele : tensor_tuple->fields) {
if (ele.as<IncompleteTypeNode>()) {
return false;
}
const auto& e = Downcast<TensorType>(ele);
int e_ndim = static_cast<int>(e->shape.size());
const DataType& e_dtype = e->dtype;
if (e_ndim != ndim) {
throw Error("relay.concatenate requires all tensors have the same ndim");
}
if (e_dtype != dtype) {
throw Error("relay.concatenate requires all tensors have the same dtype");
}
}
// Calculate shape
std::vector<IndexExpr> oshape(ndim);
const size_t data_length = tensor_tuple->fields.size();
// Accumulate the concat axis output dim or decide if this is dynamic concat
bool is_dynamic_concat = false;
std::vector<TensorType> input_tensors;
IndexExpr concat_output_dim = first->shape[axis];
for (size_t i = 0; i < data_length; ++i) {
const auto& e = Downcast<TensorType>(tensor_tuple->fields[i]);
input_tensors.push_back(e);
if (e->shape[axis].as<AnyNode>()) {
is_dynamic_concat = true;
concat_output_dim = Any();
} else if (i > 0 && !is_dynamic_concat) {
// accumulate axis dimension
concat_output_dim += e->shape[axis];
}
}
oshape[axis] = concat_output_dim;
for (int i = 0; i < ndim; ++i) {
if (i == axis) {
// The concat axis is already handled above.
// The rest of the body sets the output shape for non-concat axes
continue;
}
std::vector<IndexExpr> non_any;
for (size_t j = 0; j < data_length; ++j) {
const auto& e = input_tensors[j];
if (!e->shape[i].as<AnyNode>()) {
non_any.push_back(e->shape[i]);
}
}
size_t non_any_size = non_any.size();
for (size_t k = 1; k < non_any_size; k++) {
if (reporter->AssertEQ(non_any[0], non_any[k])) continue;
throw Error(
"relay.concatenate requires all tensors have the same shape "
"on non-concatenating axes");
}
if (non_any_size == data_length) {
// All static case
oshape[i] = non_any[0];
} else if (non_any_size > 0 && is_dynamic_concat) {
// For non-concat axes, we want to enforce static shape constraint.
// However, if the concat axis is static, the output shape would become static while
// the input could be partially static/dynamic. To prevent runtime segfaults due to the lack
// of runtime input shape checking for such cases, static shape constraint is only enforced
// when the output concat axis is dynamic.
//
// Examples (both concat on the first axis):
// * [(?, 3), (?, ?)] -> (?, 3)
// * [(1, 3), (1, ?)] -> (2, ?)
oshape[i] = non_any[0];
} else {
oshape[i] = Any();
}
}
auto rtype = TensorType(oshape, dtype);
reporter->Assign(types[1], rtype);
return true;
}
static inline InferCorrectLayoutOutput ConcatenateLayout(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
const auto* attrs_ptr = attrs.as<ConcatenateAttrs>();
ICHECK(attrs_ptr);
ObjectPtr<ConcatenateAttrs> param = make_object<ConcatenateAttrs>(*attrs_ptr);
Array<Array<IndexExpr>> old_in_shapes;
ICHECK_EQ(old_in_types.size(), 1);
for (auto old_in_tuple_t : old_in_types) {
ICHECK(old_in_tuple_t.as<TupleTypeNode>());
for (auto old_in_t : old_in_tuple_t.as<TupleTypeNode>()->fields) {
old_in_shapes.push_back(old_in_t.as<TensorTypeNode>()->shape);
}
}
size_t axis =
param->axis < 0 ? param->axis + old_in_shapes[0].size() : static_cast<size_t>(param->axis);
Layout ret;
bool is_new_layout_selected = false;
if (new_in_layouts.defined()) { // this function is called after some operators are alternated.
// If all the new input layouts are same, the new in layout gets selected. For axis, the new
// axis in the new layout is identified. The param->axis is then modified on the fly to conform
// to the new input layout.
const auto& concate_dim = old_in_layouts[0][axis];
bool all_input_layouts_same = true;
for (auto new_layout : new_in_layouts) {
if (!new_layout.Equals(new_in_layouts[0])) {
all_input_layouts_same = false;
}
}
if (all_input_layouts_same) {
auto new_index = new_in_layouts[0].IndexOf(concate_dim);
ret = new_in_layouts[0];
param->axis = new_index;
is_new_layout_selected = true;
}
}
if (!is_new_layout_selected) {
// this function is called on the original correct relay ir
for (size_t i = 0; i < old_in_layouts.size(); ++i) {
if (old_in_layouts[i].defined()) {
ret = old_in_layouts[i];
break;
}
}
if (ret.ndim() <= axis || !ret[axis].IsPrimal()) {
return InferCorrectLayoutOutput({Layout::Undef()}, {Layout::Undef()}, attrs);
}
}
return InferCorrectLayoutOutput(Array<Layout>(old_in_layouts.size(), ret), {ret}, Attrs(param));
}
/*!
* \brief Infer output shape for reshape.
*
* \param data_shape The input data shape.
* \param attrs The attributes.
* \param reverse Whether to reverse the indices.
* \return Output shape.
*/
Array<IndexExpr> InferNewShape(const Array<IndexExpr>& data_shape, const Attrs& attrs,
bool reverse);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_TENSOR_TRANSFORM_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/type_relations.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/relay/op/type_relations.h
* \brief A set of utilities and common functionality
* for type relations.
*/
#ifndef TVM_RELAY_OP_TYPE_RELATIONS_H_
#define TVM_RELAY_OP_TYPE_RELATIONS_H_
#include <tvm/ir/error.h>
#include <tvm/relay/type.h>
#include <string>
namespace tvm {
namespace relay {
/*!
* \brief The identity type relation, all the types are equal.
*
* \param types The input and output types to the relation.
* \param num_inputs The number of input arguments.
* \param attrs The attributes
* \param reporter The reporter.
* \return true whether relation has been resolved.
*/
bool IdentityRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
/*!
* \brief The broadcast type relation, implements the broadcasting
* rule over the two input types producing the broadcasted type.
*
* \param types The input and output types to the relation.
* \param num_inputs The number of input arguments.
* \param attrs The attributes
* \param reporter The reporter.
* \return true whether relation has been resolved.
*/
bool BroadcastRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
/*!
* \brief Determine the broadcasted shape from two input shapes
* \param t1 One of two Tensortype whose shapes are broadcasted
* \param t2 One of two Tensortype whose shapes are broadcasted
* \param output_dtype dtype of the output TensorType
* \return A TensorType whose shape is broadcasted from two input TensorType.
*/
TensorType ConcreteBroadcast(const TensorType& t1, const TensorType& t2, DataType output_dtype);
/*!
* \brief The broadcast type relation, implements the broadcasting
* rule over the two input types producing the broadcasted type.
*
* This differs from BroadcastRel in the return dtype,
* it instead returns bool(uint8), for use in comparsion operators
* such as equal, not_equal, lt, and so on.
*
* \param types The input and output types to the relation.
* \param num_inputs The number of input arguments.
* \param attrs The attributes
* \param reporter The reporter.
* \return true whether relation has been resolved.
*/
bool BroadcastCompRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
bool IdentityCompRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
Array<IndexExpr> RankShape(const Array<IndexExpr>& shape);
/*!
* \brief The shape of type relation.
*
* \param types The input and output types to the relation.
* \param num_inputs The number of input arguments.
* \param attrs The attributes
* \param reporter The reporter.
* \return true whether relation has been resolved.
*/
bool ShapeOfRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_TYPE_RELATIONS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/op/vm/vm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/op/vm/vm.h
* \brief Dialect operators for Relay VM.
*/
#ifndef TVM_RELAY_OP_VM_VM_H_
#define TVM_RELAY_OP_VM_VM_H_
#include <tvm/relay/expr.h>
namespace tvm {
namespace relay {
Expr InvokeTVMOp(Expr func, Expr inputs, Expr outputs, DictAttrs attrs);
Expr ShapeOf(Expr expr);
Expr ReshapeTensor(Expr data, Expr shape, Array<PrimExpr> newshape);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_OP_VM_VM_H_
| https://github.com/zk-ml/tachikoma |
src/relay/qnn/op/op_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/qnn/op/op_common.h
* \brief A set of utilities and common functionality for QNN ops.
*/
#ifndef TVM_RELAY_QNN_OP_OP_COMMON_H_
#define TVM_RELAY_QNN_OP_OP_COMMON_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/qnn/attrs.h>
#include <tvm/relay/qnn/transform.h>
#include <vector>
#include "../../op/type_relations.h"
#include "../../transforms/infer_layout_utils.h"
#include "../utils.h"
namespace tvm {
namespace relay {
namespace qnn {
TVM_REGISTER_NODE_TYPE(BroadcastAttrs);
/*
* Number of inputs for the Qnn binary operators.
* Refer the QNN_REGISTER_BINARY_OP macro to see
* what the operators are.
*/
static constexpr int kNumQnnBinaryOpInputs = 8;
/*
* Number of expected arg types.
*/
static constexpr int kNumQnnBinaryOpArgTypes = 9;
/*
* \brief Simple struct to organize the inputs to the Qnn
* binary operators. The main reason to have a struct
* is to be able to perform the common checks needed at a
* central location.
*/
struct QnnBinaryOpArguments {
Expr lhs;
Expr rhs;
Expr lhs_scale;
Expr lhs_zero_point;
Expr rhs_scale;
Expr rhs_zero_point;
Expr output_scale;
Expr output_zero_point;
explicit QnnBinaryOpArguments(const Array<Expr>& new_args) {
ICHECK_EQ(new_args.size(), kNumQnnBinaryOpInputs);
int idx = 0;
lhs = new_args[idx++];
rhs = new_args[idx++];
lhs_scale = new_args[idx++];
lhs_zero_point = new_args[idx++];
rhs_scale = new_args[idx++];
rhs_zero_point = new_args[idx++];
output_scale = new_args[idx++];
output_zero_point = new_args[idx++];
ICHECK_EQ(idx, kNumQnnBinaryOpInputs);
}
};
/*
* Number of inputs for the Qnn unary operators.
*/
static constexpr int kNumQnnUnaryOpInputs = 5;
/*
* Number of expected arg types.
*/
static constexpr int kNumQnnUnaryOpArgTypes = 6;
/*
* \brief Simple struct to organize the inputs to the Qnn
* unary operators. The main reason to have a struct
* is to be able to perform the common checks needed at a
* central location.
*/
struct QnnUnaryOpArguments {
Expr x;
Expr scale;
Expr zero_point;
Expr output_scale;
Expr output_zero_point;
explicit QnnUnaryOpArguments(const Array<Expr>& new_args) {
ICHECK_EQ(new_args.size(), kNumQnnUnaryOpInputs);
int idx = 0;
x = new_args[idx++];
scale = new_args[idx++];
zero_point = new_args[idx++];
output_scale = new_args[idx++];
output_zero_point = new_args[idx++];
ICHECK_EQ(idx, kNumQnnUnaryOpInputs);
}
};
/*
* \brief Simple structure to hold the input tensor's dtype
* and shape. This structure allows a common point to do
* all the validation checks for Qnn unary operators.
*/
struct QnnUnaryOpTensorType {
DataType dtype;
Array<PrimExpr> shape;
explicit QnnUnaryOpTensorType(const Array<tvm::relay::Type>& arg_types, const int32_t arg_idx) {
ICHECK_EQ(arg_types.size(), kNumQnnUnaryOpArgTypes);
auto tensor_type = arg_types[arg_idx].as<TensorTypeNode>();
ICHECK(tensor_type != nullptr);
dtype = tensor_type->dtype;
shape = tensor_type->shape;
}
};
/*
* \brief Simple structure to hold the input tensor's dtype
* and shape. This structure allows a common point to do
* all the validation checks for Qnn binary operators.
*/
struct QnnBinaryOpTensorType {
DataType dtype;
Array<PrimExpr> shape;
explicit QnnBinaryOpTensorType(const Array<tvm::relay::Type>& arg_types, const int32_t arg_idx) {
ICHECK_EQ(arg_types.size(), kNumQnnBinaryOpArgTypes);
auto tensor_type = arg_types[arg_idx].as<TensorTypeNode>();
ICHECK(tensor_type != nullptr);
dtype = tensor_type->dtype;
shape = tensor_type->shape;
}
};
/*
* \brief Converts the expression from expression's dtype
* to target dtype. This is mainly used for converting
* computations done in Int32 to lower precision Int8 or
* UInt8.
* \param expr The expression to whose dtype needs conversion.
* \param target_dtype The dtype of the target expression
* \return New expression with target dtype and possibly lower
* precision.
*/
inline Expr ConvertDtype(const Expr& expr, const DataType& target_dtype) {
auto q_min = GetQmin(target_dtype);
auto q_max = GetQmax(target_dtype);
auto output = Clip(expr, q_min, q_max);
return Cast(output, target_dtype);
}
/*
* \brief Requantizes the given expression if expression's
* scale and zero point both do not match target scale and
* zero point. This is mainly needed for requantizing the
* input tensors with output tensor's scale and zero point
* to ease the computation of final quantized tensor.
* \param expr The expression on which the check needs to be performed.
* \param expr_scale The scale of the expression.
* \param expr_zero_point The zero point of the expression.
* \param target_scale The scale of the output tensor.
* \param target_zero_point The zero point of the output tensor.
* \param expr_shape The shape of the input expression.
* \return New expression that is requantized to target scale and zero
* point if the expression scale and zero points are different otherwise
* it simply casts the given expression to Int32 as no requantization is
* needed in this case.
*/
inline Expr RequantizeOrUpcast(const Expr& expr, const Expr& expr_scale,
const Expr& expr_zero_point, const Expr& target_scale,
const Expr& target_zero_point, const Array<PrimExpr>& expr_shape,
const int& axis = -1,
const DataType& target_dtype = DataType::Int(32)) {
auto result = expr;
if (!IsEqualScalar(expr_scale, target_scale) ||
!IsEqualScalar(expr_zero_point, target_zero_point)) {
result = Requantize(expr, expr_shape, expr_scale, expr_zero_point, target_scale,
target_zero_point, target_dtype, axis);
} else {
result = Cast(result, target_dtype);
}
return result;
}
/*! \brief Infer layout for QNN binary broadcast operators */
inline InferCorrectLayoutOutput QnnBinaryBroadcastLayout(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
// Use Relay Binary Broadcast Infer correct layout.
auto layouts = BinaryBroadcastLayout(attrs, new_in_layouts, old_in_layouts, old_in_types);
// Fill the layouts of remaining input tensors - scales and zero points. The layouts of these
// tensors can be treated as C.
Layout channel_layout = Layout("C");
Array<Layout> input_layouts = {layouts->input_layouts[0],
layouts->input_layouts[1],
channel_layout,
channel_layout,
channel_layout,
channel_layout,
channel_layout,
channel_layout};
Array<Layout> output_layouts = layouts->output_layouts;
return InferCorrectLayoutOutput(input_layouts, output_layouts, attrs);
}
static inline bool QnnBroadcastRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
const TypeReporter& reporter) {
// Expected Types: lhs, rhs, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point, output_scale,
// output_zero_point, out_type
ICHECK_EQ(types.size(), kNumQnnBinaryOpArgTypes);
// Check the lhs and rhs types
for (size_t i = 0; i < 2; ++i) {
if (types[i].as<IncompleteTypeNode>()) {
return false;
}
}
// Check the scale and zero point types
for (size_t i = 2; i < 8; ++i) {
if (types[i].as<IncompleteTypeNode>()) {
return false;
}
}
const auto* lhs_data = types[0].as<TensorTypeNode>();
const auto* rhs_data = types[1].as<TensorTypeNode>();
if (lhs_data == nullptr || rhs_data == nullptr) {
return false;
}
const BroadcastAttrs* broadcast_attrs = attrs.as<BroadcastAttrs>();
ICHECK(broadcast_attrs);
auto lhs_rank = static_cast<int>(lhs_data->shape.size());
auto rhs_rank = static_cast<int>(rhs_data->shape.size());
auto get_channel_axis = [](int rank, int axis_from_attr) {
if (rank <= 1) return 0;
if (axis_from_attr < 0) return rank + axis_from_attr;
return axis_from_attr;
};
const int lhs_axis = get_channel_axis(lhs_rank, broadcast_attrs->lhs_axis);
const int rhs_axis = get_channel_axis(rhs_rank, broadcast_attrs->rhs_axis);
// If zero point and scale are scalar then axis doesn't matter.
bool lhs_scale_is_scalar = (types[2].as<TensorTypeNode>())->shape.size() == 0;
bool lhs_zp_is_scalar = (types[3].as<TensorTypeNode>())->shape.size() == 0;
bool rhs_scale_is_scalar = (types[4].as<TensorTypeNode>())->shape.size() == 0;
bool rhs_zp_is_scalar = (types[5].as<TensorTypeNode>())->shape.size() == 0;
if (!(lhs_scale_is_scalar && lhs_zp_is_scalar)) {
ICHECK_LT(lhs_axis, lhs_rank > 0 ? lhs_rank : 1)
<< "lhs_axis " << broadcast_attrs->lhs_axis << " is out of range";
ICHECK_GE(lhs_axis, 0) << "lhs_axis " << broadcast_attrs->lhs_axis << " is out of range";
}
if (!(rhs_scale_is_scalar && rhs_zp_is_scalar)) {
ICHECK_LT(rhs_axis, rhs_rank > 0 ? rhs_rank : 1)
<< "rhs_axis " << broadcast_attrs->rhs_axis << " is out of range";
ICHECK_GE(rhs_axis, 0) << "rhs_axis " << broadcast_attrs->rhs_axis << " is out of range";
}
PrimExpr lhs_axis_shape;
if (lhs_rank > 0) {
lhs_axis_shape = lhs_data->shape[lhs_axis];
} else {
lhs_axis_shape = Integer(1);
}
PrimExpr rhs_axis_shape;
if (rhs_rank > 0) {
rhs_axis_shape = rhs_data->shape[rhs_axis];
} else {
rhs_axis_shape = Integer(1);
}
// Check and assign types for scale and zero points.
AssignType(types[2], DataType::Float(32), lhs_axis_shape, reporter); // lhs_scale
AssignType(types[3], DataType::Int(32), lhs_axis_shape, reporter); // lhs_zero_point
AssignType(types[4], DataType::Float(32), rhs_axis_shape, reporter); // rhs_scale
AssignType(types[5], DataType::Int(32), rhs_axis_shape, reporter); // rhs_zero_point
ICHECK(IsScalarType(types[6], DataType::Float(32))); // output_scale
ICHECK(IsScalarType(types[7], DataType::Int(32))); // output_zero_point
// Collect the input tensor and output tensor devoid of scale and zero points to reuse Relay
// BroadcastRel infer type function.
Array<Type> tensor_types = {types[0], types[1], types[8]};
return BroadcastRel(tensor_types, 3, attrs, reporter);
}
/*! Quick helper macro
* - Expose a positional make function to construct the node.
* - Register op to the registry.
*
* We make the decision to always only expose positional argument.
* We will do rewrapping in the frontend to support language
* sugars such as keyword arguments and default value.
*
* \param OpName the name of registry.
*/
#define QNN_REGISTER_BINARY_OP(OpName) \
TVM_REGISTER_GLOBAL("relay.qnn.op._make." OpName) \
.set_body_typed([](Expr lhs, Expr rhs, Expr lhs_scale, Expr lhs_zero_point, Expr rhs_scale, \
Expr rhs_zero_point, Expr output_scale, Expr output_zero_point, \
int lhs_axis, int rhs_axis) { \
static const Op& op = Op::Get("qnn." OpName); \
auto attrs = make_object<BroadcastAttrs>(); \
attrs->lhs_axis = lhs_axis; \
attrs->rhs_axis = rhs_axis; \
return Call(op, \
{lhs, rhs, lhs_scale, lhs_zero_point, rhs_scale, rhs_zero_point, output_scale, \
output_zero_point}, \
Attrs(attrs), {}); \
}); \
RELAY_REGISTER_OP("qnn." OpName) \
.set_attrs_type<BroadcastAttrs>() \
.set_num_inputs(kNumQnnBinaryOpInputs) \
.add_argument("lhs", "Tensor", "The left hand side quantized tensor.") \
.add_argument("rhs", "Tensor", "The right hand side quantized tensor.") \
.add_argument("lhs_scale", "Tensor", "The scale of the lhs tensor.") \
.add_argument("lhs_zero_point", "Tensor", "The zero_point of the lhs tensor.") \
.add_argument("rhs_scale", "Tensor", "The scale of the rhs tensor.") \
.add_argument("rhs_zero_point", "Tensor", "The zero_point of the rhs tensor.") \
.add_argument("output_scale", "Tensor", "The scale of the output tensor.") \
.add_argument("output_zero_point", "Tensor", "The zero_point of the output tensor.") \
.add_argument("lhs_axis", "Tensor", "The channel quantization of the lhs tensor.") \
.add_argument("rhs_axis", "Tensor", "The channel quantization of the rhs tensor.") \
.add_type_rel("QnnBroadcast", QnnBroadcastRel) \
.set_attr<TNonComputational>("TNonComputational", true) \
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", QnnBinaryBroadcastLayout)
static inline bool QnnElementwiseUnaryFuncRel(const Array<Type>& types, int num_inputs,
const Attrs& attrs, const TypeReporter& reporter) {
// Expected Types: data, scale, zero_point, output_scale, output_zero_point
ICHECK_EQ(types.size(), 6);
const auto* x = types[0].as<TensorTypeNode>();
if (x == nullptr) return false;
ICHECK(x->dtype == DataType::Int(8) || x->dtype == DataType::UInt(8))
<< "Expected quantized type(int8, uint8) for input but was " << x->dtype;
// Check the types of scale and zero points.
for (size_t i = 1; i < 5; ++i) {
if (types[i].as<IncompleteTypeNode>()) {
return false;
}
}
ICHECK(IsScalarType(types[1], DataType::Float(32))); // scale
ICHECK(IsScalarType(types[2], DataType::Int(32))); // zero_point
ICHECK(IsScalarType(types[3], DataType::Float(32))); // output_scale
ICHECK(IsScalarType(types[4], DataType::Int(32))); // output_zero_point
// Assign types for scale and zero points.
reporter->Assign(types[1], TensorType({}, DataType::Float(32))); // scale
reporter->Assign(types[2], TensorType({}, DataType::Int(32))); // zero_point
reporter->Assign(types[3], TensorType({}, DataType::Float(32))); // output_scale
reporter->Assign(types[4], TensorType({}, DataType::Int(32))); // output_zero_point
// Collect the input tensor and output tensor devoid of scale and zero points to reuse Relay
// IdentityRel infer type function.
Array<Type> tensor_types = {types[0], types[5]};
return IdentityRel(tensor_types, 2, attrs, reporter);
}
static inline Expr LegalizeExpr(const Expr& expr) {
// Canonicalizations should not contain qnn ops, so use this
// to lower expressions automatically after using things like qnn.dequantize
// in the lowering process.
auto mod = IRModule::FromExpr(expr);
mod = transform::Legalize()(mod);
if (expr.as<FunctionNode>()) {
return mod->Lookup("main");
} else {
return mod->Lookup("main").as<FunctionNode>()->body;
}
}
/*! Quick helper macro
* - Expose a positional make function to construct the node.
* - Register op to the registry.
*
* For Unary Operators which also take in QParams.
*
* \param OpName the name of registry.
*/
#define QNN_CREATE_UNARY_ELEMENTWISE_OP(OpName) \
TVM_REGISTER_GLOBAL("relay.qnn.op._make." OpName) \
.set_body_typed( \
[](Expr x, Expr scale, Expr zero_point, Expr output_scale, Expr output_zero_point) { \
return Call(Op::Get("qnn." OpName), \
{x, scale, zero_point, output_scale, output_zero_point}, Attrs(), {}); \
}); \
\
RELAY_REGISTER_OP("qnn." OpName) \
.describe("Elementwise " OpName " for quantized tensors.") \
.set_num_inputs(5) \
.add_argument("data", "Quantized Tensor", "The input data.") \
.add_argument("scale", "Tensor", "The quantization scale of the input tensor.") \
.add_argument("zero_point", "Tensor", "The quantization zero_point of the input tensor.") \
.add_argument("output_scale", "Tensor", "The quantization scale of the output tensor.") \
.add_argument("output_zero_point", "Tensor", \
"The quantization zero_point of the output tensor.") \
.set_support_level(11) \
.add_type_rel("qnn." OpName, QnnElementwiseUnaryFuncRel) \
.set_attr<TNonComputational>("TNonComputational", true)
/*! Quick helper macro
* Create a default canonicalization for a QNN operator, which dequantizes the operator
* runs the calculation using the provided Call func, and then requantizes.
*
* FloatingPointFunc is usually a handle from "src/relay/transforms/pattern_utils.h"
*
* \param FloatingPointFunc the floating point function with function signature `Expr Erf(Expr e)`
*/
#define QNN_UNARY_OP_DEFAULT_CANONICALIZATION(FloatingPointFunc) \
[](const Attrs& attrs, const Array<Expr>& new_args, const Array<tvm::relay::Type>& arg_types) { \
QnnUnaryOpArguments args(new_args); \
QnnUnaryOpTensorType input_type(arg_types, 0); \
Expr dequantized_arg = MakeDequantize(args.x, args.scale, args.zero_point, -1); \
Expr output = FloatingPointFunc(dequantized_arg); \
Expr result = \
MakeQuantize(output, args.output_scale, args.output_zero_point, -1, input_type.dtype); \
return LegalizeExpr(result); \
}
} // namespace qnn
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_QNN_OP_OP_COMMON_H_
| https://github.com/zk-ml/tachikoma |
src/relay/qnn/op/requantize_config.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/qnn/op/requantize_config.h
* \brief QNN requantize config.
*/
#ifndef TVM_RELAY_QNN_OP_REQUANTIZE_CONFIG_H_
#define TVM_RELAY_QNN_OP_REQUANTIZE_CONFIG_H_
#include <tvm/relay/analysis.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/qnn/attrs.h>
#include <string>
#include "../../op/op_common.h"
namespace tvm {
namespace relay {
namespace qnn {
class RequantizeConfig;
/*!
* \brief Container for build configuration options
*/
class RequantizeConfigNode : public Object {
std::string rounding;
std::string compute_dtype;
public:
explicit RequantizeConfigNode(bool is_default = false) : is_default(is_default) {}
std::string get_rounding() const {
if (!rounding.empty()) return rounding;
return "UPWARD";
}
std::string get_compute_dtype() const {
if (!compute_dtype.empty()) return compute_dtype;
// For the x86 architecture, the float32 computation is expected to give significant speedup,
// with little loss in the accuracy of the requantize operation.
auto target = Target::Current(true);
auto target_has_sse41 = tvm::runtime::Registry::Get("tvm.topi.x86.utils.target_has_sse41");
ICHECK(target_has_sse41) << "Function tvm.topi.x86.utils.target_has_sse41 not found";
if (target.defined() && target->kind->name == "llvm" &&
(target->GetAttr<String>("mcpu") &&
(*target_has_sse41)(target->GetAttr<String>("mcpu").value()))) {
return "float32";
}
return "int64";
}
const bool is_default = false;
void VisitAttrs(AttrVisitor* v) {
v->Visit("rounding", &rounding);
v->Visit("compute_dtype", &compute_dtype);
}
static constexpr const char* _type_key = "relay.qnn.op.RequantizeConfig";
TVM_DECLARE_FINAL_OBJECT_INFO(RequantizeConfigNode, Object);
};
/*!
* \brief Container for build configuration options
*/
class RequantizeConfig : public ObjectRef {
public:
RequantizeConfig() {}
explicit RequantizeConfig(ObjectPtr<Object> n) : ObjectRef(n) {}
const RequantizeConfigNode* operator->() const {
return static_cast<const RequantizeConfigNode*>(get());
}
RequantizeConfigNode* operator->() { return static_cast<RequantizeConfigNode*>(get_mutable()); }
/*!
* \brief Push a new BuildConfig context onto the thread local stack.
* \param build_config The configuration to set as the current context.
*/
static void EnterRequantizeConfigScope(const RequantizeConfig& requantize_config);
/*!
* \brief Pop a build config off the thread local context stack, restoring the previous
* configuration as the current context.
*/
static void ExitRequantizeConfigScope();
/*!
* \brief Get the current BuildConfig context from thread local storage, or a default
* configuration if a BuildConfig scope has not been entered.
* \return The configuration that is the current context.
*/
static RequantizeConfig& Current();
using ContainerType = RequantizeConfigNode;
};
} // namespace qnn
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_QNN_OP_REQUANTIZE_CONFIG_H_
| https://github.com/zk-ml/tachikoma |
src/relay/qnn/utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/qnn/utils.h
* \brief Utility methods needs for quantized ops that can be shared
*/
#ifndef TVM_RELAY_QNN_UTILS_H_
#define TVM_RELAY_QNN_UTILS_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/qnn/attrs.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
#include <limits>
#include <string>
#include <utility>
#include <vector>
#include "./op/requantize_config.h"
namespace tvm {
namespace relay {
namespace qnn {
static inline Array<IndexExpr> get_shape(const Type& type) {
auto input_tt = type.as<TensorTypeNode>();
ICHECK(input_tt != nullptr) << "Type information missing."
<< " Please run infer_type pass.";
return input_tt->shape;
}
static inline int32_t GetQmin(const DataType& dtype) {
ICHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision";
if (dtype.is_int() || dtype.is_uint()) {
auto min_value_expr = tvm::min_value(dtype);
auto* min_value = tir::as_const_int(min_value_expr);
ICHECK(min_value != nullptr);
return static_cast<int32_t>(min_value[0]);
} else {
LOG(FATAL) << "Type not supported " << dtype;
return -1; // To hide the warning
}
}
static inline int32_t GetQmax(const DataType& dtype) {
ICHECK_LE(dtype.bits(), 32) << "QNN ops support int32 or lower precision";
if (dtype.is_int() || dtype.is_uint()) {
auto max_value_expr = tvm::max_value(dtype);
auto* max_value = tir::as_const_int(max_value_expr);
ICHECK(max_value != nullptr);
return static_cast<int32_t>(max_value[0]);
} else {
LOG(FATAL) << "Type not supported " << dtype;
return -1; // To hide the warning
}
}
/*
* \brief Convert FP32 representation into fixed point representation.
* \param double_multplier The input FP32 number.
* \return The pair of multiplier and shift for fixed point representation.
* \note Converts a floating point number so that it can be represented by
* integers. The representation is
* float_number = (significand) * 2^(exponent)
*
* The significand is a number between 0.5 and 1. This is represented by
* an integer number. For example, if it is int32, then the decimal point
* exists between bit 31 and 30 from LSB (or between first and second bit
* from the left).
*
* Some examples are
* 0.25 = (0.5) * 2^(-1)
* 0.125 = (0.5) * 2^(-2)
*
* Credit to TFLite reference implementation.
*/
std::pair<int32_t, int32_t> GetFixedPointMultiplierShift(double double_multiplier);
Expr RequantizeLower(const Expr& input_tensor, const Expr& input_scale,
const Expr& input_zero_point, const Expr& output_scale,
const Expr& output_zero_point, const RequantizeAttrs* param,
const Array<IndexExpr>& input_shape, const DataType& out_dtype);
std::string SelectRequntizeParameter(const std::string& arg_value, const std::string& cfg_value,
const bool is_cfg_default, const std::string& name);
static inline Expr Requantize(const Expr& data, const Array<IndexExpr>& input_shape,
const Expr& input_scale, const Expr& input_zero_point,
const Expr& output_scale, const Expr& output_zero_point,
const DataType& out_dtype, const int& axis = -1,
const std::string& rounding = "None",
const std::string& compute_dtype = "None") {
auto attrs = make_object<RequantizeAttrs>();
attrs->axis = axis;
attrs->out_dtype = std::move(out_dtype);
const RequantizeConfig& cfg = RequantizeConfig::Current();
attrs->rounding =
SelectRequntizeParameter(rounding, cfg->get_rounding(), cfg->is_default, "rounding");
attrs->compute_dtype = SelectRequntizeParameter(compute_dtype, cfg->get_compute_dtype(),
cfg->is_default, "compute_dtype");
return RequantizeLower(data, input_scale, input_zero_point, output_scale, output_zero_point,
attrs.operator->(), input_shape, attrs->out_dtype);
}
Expr MakeRequantize(Expr data, Expr input_scale, Expr input_zero_point, Expr output_scale,
Expr output_zero_point, int axis, String rounding, String compute_dtype,
DataType out_dtype);
Expr DequantizeLower(const Expr& input_tensor, const Expr& input_scale,
const Expr& input_zero_point, const Array<tvm::relay::Type>& types,
const DequantizeAttrs* attrs);
static inline Expr Dequantize(const Expr& data, const Expr& input_scale,
const Expr& input_zero_point, const Array<tvm::relay::Type>& types,
const int& axis = -1) {
auto attrs = make_object<DequantizeAttrs>();
attrs->axis = std::move(axis);
return DequantizeLower(data, input_scale, input_zero_point, types, attrs.operator->());
}
Expr MakeDequantize(Expr data, Expr input_scale, Expr input_zero_point, int axis);
Expr QuantizeLower(const Expr& input_tensor, const Expr& output_scale,
const Expr& output_zero_point, const Array<tvm::relay::Type>& types,
const QuantizeAttrs* attrs);
static inline Expr Quantize(const Expr& data, const Expr& output_scale,
const Expr& output_zero_point, const DataType& out_dtype,
const Array<tvm::relay::Type>& types, const int& axis = -1) {
auto attrs = make_object<QuantizeAttrs>();
attrs->axis = std::move(axis);
attrs->out_dtype = std::move(out_dtype);
return QuantizeLower(data, output_scale, output_zero_point, types, attrs.operator->());
}
Expr MakeQuantize(Expr data, Expr output_scale, Expr output_zero_point, int axis,
DataType out_dtype);
static inline int64_t get_const_int(const tvm::PrimExpr& x) {
auto* value_ptr = tir::as_const_int(x);
ICHECK(value_ptr) << "Expr is not a constant int";
return value_ptr[0];
}
/*
* \brief Fixed point multiplication between integer tensor with floating point
* scalar. This implementation rounds to the nearest value when it is midway
* between two representable values.
* \param tensor The quantized input tensor of dtype int64.
* \param multiplier The scalar multiplier.
* \param input_shape Shape of the input tensor.
* \return The sequence of Relay ops for fixed point multiplication with TONEARES rounding.
* \note Original compuation is scale_fp32 * quantized_tensor. To convert into
* integer computation, the multiplication with fp32 scalar can be
* replaced by multiplication with an int value and then right shifting
* the result. This approximates the floating point computation with a
* fixed point computation.
*
* Computation of fixed point multiplication is consist of following
steps:
* 1) Multiply the fixed point multiplier with quantized tensor.
* 2) Round the result.
* 3) Right shift the result
*/
Expr FixedPointMultiplyToNearest(Expr tensor, double multiplier,
const Array<IndexExpr>& input_shape);
/*
* \brief Fixed point multiplication between integer tensor with floating point
scalar where the input tensor is per-axis/per-channel quantized..
* \param tensor The quantized input tensor of dtype int64.
* \param multiplier The scalar multiplier.
* \param input_shape Shape of the input tensor.
* \param channel_axis The channel_axis along which the input tensor is quantized. Default value is
-1 which corresponds to the last channel_axis.
* \param rounding "UPWARD" or "TONEAREST". The rounding direction when the value
is midway between" "two representable values.
* \return The sequence of Relay ops for fixed point multiplication.
* \note Original compuation is scale_fp32 * quantized_tensor. To convert into
* integer computation, the multiplication with fp32 vector can be
* replaced by multiplication with an int vector and then right shifting
* the result. This approximates the floating point computation with a
* fixed point computation.
*
* Computation of fixed point multiplication is consist of following
steps:
* 1) Multiply the fixed point multiplier with quantized tensor.
* 2) Round the result.
* 3) Right shift the result
*/
Expr FixedPointMultiplyPerChannel(Expr tensor, std::vector<double> multiplier,
const Array<IndexExpr>& input_shape, int channel_axis,
const std::string& rounding);
/*
* Wrapper for 'FixedPointMultiplyPerChannel' with rounding parameter == "TONEAREST".
*/
Expr FixedPointMultiplyPerChannelToNearest(Expr tensor, std::vector<double> multiplier,
const Array<IndexExpr>& input_shape, int channel_axis);
/*
* \brief Creates FixedPointMultiply operation where the input tensor is
per-axis/per-channel quantized..
* \param tensor The quantized input tensor.
* \param multipliers List of scalar multipliers.
* \param channel_axis The channel_axis along which the input tensor is quantized.
* \return The Relay op.
*/
Expr FixedPointMultiplyPerChannel(Expr tensor, const std::vector<double>& multipliers, int axis);
/*
* \brief Checks whether an expr type is scalar of a given data type.
* \param expr_type The type of expr to be checked.
* \param dtype The expected dtype.
* \return True if the type is a scalar of given dtype
*/
static inline bool IsScalarType(const Type& expr_type, const DataType& dtype) {
const auto* tensor_type = expr_type.as<TensorTypeNode>();
ICHECK(tensor_type) << "Only tensor type can be checked for scalar values. But got"
<< AsText(expr_type, false);
ICHECK_EQ(tensor_type->shape.size(), 0);
ICHECK(tensor_type->dtype == dtype) << "Expected " << dtype << " but got " << tensor_type->dtype;
return true;
}
/*
* \brief Checks whether an expr type is scalar.
* \param expr_type The type of expr to be checked.
* \return True if the type is a scalar
*/
static inline bool IsScalarType(const Type& expr_type) {
const auto* tensor_type = expr_type.as<TensorTypeNode>();
CHECK(tensor_type) << "Only tensor type can be checked for scalar values. But got"
<< AsText(expr_type, false);
return tensor_type->shape.size() == 0;
}
/*
* \brief Checks and assigns types to scale and zero points.
* \param expr_type The type of expr to be checked.
* \param dtype The expected dtype.
* \param shape The shape at C dim of original tensor.
* \param reporter The type reported of original InferType call.
*/
static inline void AssignType(const Type& expr_type, const DataType& dtype, const IndexExpr& shape,
const TypeReporter& reporter) {
// Scale/Zero_points can be either const scalar or a vector with C axis num elems.
const auto* tensor_type = expr_type.as<TensorTypeNode>();
ICHECK(tensor_type) << "Can assign type to Tensor type only. But got "
<< AsText(expr_type, false);
const auto tensor_dtype = tensor_type->dtype;
ICHECK(tensor_dtype == dtype) << "Expected type is " << dtype << " but received " << tensor_dtype;
if (tensor_type->shape.size() != 0) {
reporter->Assign(expr_type, TensorType({shape}, tensor_type->dtype));
}
}
static inline std::vector<float> GetFloatVectorFromConstant(const Expr& expr) {
const auto* n = expr.as<ConstantNode>();
std::vector<float> vals;
ICHECK(n) << "Expr must be a constant expr - " << AsText(expr, false);
int64_t num_elems = 1;
auto shape = n->data.Shape();
for (size_t i = 0; i < shape.size(); i++) {
num_elems *= shape[i];
}
for (int64_t i = 0; i < num_elems; i++) {
vals.push_back(static_cast<float*>(n->data->data)[i]);
}
return vals;
}
Expr MakeQnnConv2D(Expr data, Expr weight, Expr input_zero_point, Expr kernel_zero_point,
Expr input_scale, Expr kernel_scale, Array<IndexExpr> strides,
Array<IndexExpr> padding, Array<IndexExpr> dilation, int groups,
IndexExpr channels, Array<IndexExpr> kernel_size, String data_layout,
String kernel_layout, String out_layout, DataType out_dtype);
} // namespace qnn
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_QNN_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/quantize/quantize.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/relay/quantize.h
* \brief Header of definitions for quantization
*/
#ifndef TVM_RELAY_QUANTIZE_QUANTIZE_H_
#define TVM_RELAY_QUANTIZE_QUANTIZE_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <string>
#include "../transforms/pattern_utils.h"
namespace tvm {
namespace relay {
namespace quantize {
/*! \brief Kind of annotate field */
enum QAnnotateKind : int { kQIdentity = 0, kQInput = 1, kQWeight = 2, kQActivation = 3 };
/*! \brief Attribute for simulated quantize operator */
struct SimulatedQuantizeAttrs : public tvm::AttrsNode<SimulatedQuantizeAttrs> {
int kind;
bool sign;
std::string rounding;
TVM_DECLARE_ATTRS(SimulatedQuantizeAttrs, "relay.attrs.SimulatedQuantizeAttrs") {
TVM_ATTR_FIELD(kind).describe("kind of field, hint for nbit/dtype configuration.");
TVM_ATTR_FIELD(sign).set_default(true).describe("whether to use signed data type.");
TVM_ATTR_FIELD(rounding).set_default("round").describe(
"rounding mode. Can be 'floor', 'ceil', 'round'");
}
};
class QConfig;
/*!
* \brief Container for build configuration options
*/
class QConfigNode : public Object {
public:
int nbit_input = 8;
int nbit_weight = 8;
int nbit_activation = 32;
DataType dtype_input = DataType::Int(8);
DataType dtype_weight = DataType::Int(8);
DataType dtype_activation = DataType::Int(32);
std::string calibrate_mode = "global_scale";
double global_scale = 8.0;
std::string weight_scale = "power2";
bool skip_dense_layer = true;
Array<Expr> skip_conv_layers = Array<Expr>(ObjectPtr<Object>(nullptr));
bool do_simulation = false;
bool round_for_shift = true;
Array<Expr> debug_enabled_ops = Array<Expr>(ObjectPtr<Object>(nullptr));
std::string rounding = "UPWARD";
int calibrate_chunk_by = -1;
std::string partition_conversions = "disabled";
void VisitAttrs(AttrVisitor* v) {
v->Visit("nbit_input", &nbit_input);
v->Visit("nbit_weight", &nbit_weight);
v->Visit("nbit_activation", &nbit_activation);
v->Visit("dtype_input", &dtype_input);
v->Visit("dtype_weight", &dtype_weight);
v->Visit("dtype_activation", &dtype_activation);
v->Visit("calibrate_mode", &calibrate_mode);
v->Visit("global_scale", &global_scale);
v->Visit("weight_scale", &weight_scale);
v->Visit("skip_dense_layer", &skip_dense_layer);
v->Visit("skip_conv_layers", &skip_conv_layers);
v->Visit("do_simulation", &do_simulation);
v->Visit("round_for_shift", &round_for_shift);
v->Visit("debug_enabled_ops", &debug_enabled_ops);
v->Visit("rounding", &rounding);
v->Visit("calibrate_chunk_by", &calibrate_chunk_by);
v->Visit("partition_conversions", &partition_conversions);
}
static constexpr const char* _type_key = "relay.quantize.QConfig";
TVM_DECLARE_FINAL_OBJECT_INFO(QConfigNode, Object);
};
/*!
* \brief Container for build configuration options
*/
class QConfig : public ObjectRef {
public:
QConfig() {}
explicit QConfig(ObjectPtr<Object> n) : ObjectRef(n) {}
const QConfigNode* operator->() const { return static_cast<const QConfigNode*>(get()); }
QConfigNode* operator->() { return static_cast<QConfigNode*>(get_mutable()); }
/*!
* \brief Push a new BuildConfig context onto the thread local stack.
* \param build_config The configuration to set as the current context.
*/
static void EnterQConfigScope(const QConfig& qconfig);
/*!
* \brief Pop a build config off the thread local context stack, restoring the previous
* configuration as the current context.
*/
static void ExitQConfigScope();
/*!
* \brief Get the current BuildConfig context from thread local storage, or a default
* configuration if a BuildConfig scope has not been entered.
* \return The configuration that is the current context.
*/
static QConfig& Current();
using ContainerType = QConfigNode;
};
/*!
* \brief RAII container to provide a scoped BuildConfig context. Pushes a configuration onto the
* context stack when constructed, and pops it when destructed.
*/
struct QConfigContext {
/*!
* \brief Enter a new BuildConfig context. The given BuildConfig becomes the new current
* context. When the BuildConfigContext is destructed, the previous context is restored.
* \param build_config The BuildConfig to set as the new current context.
*/
explicit QConfigContext(const QConfig& qconfig) { QConfig::EnterQConfigScope(qconfig); }
/*! \brief Destructor. Pops the context off the thread local stack. */
~QConfigContext() { QConfig::ExitQConfigScope(); }
};
} // namespace quantize
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_QUANTIZE_QUANTIZE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/quantize/realize.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file realize.h
*
* \brief Header of definitions for op realizations
*
*/
#ifndef TVM_RELAY_QUANTIZE_REALIZE_H_
#define TVM_RELAY_QUANTIZE_REALIZE_H_
#include <tvm/relay/transform.h>
namespace tvm {
namespace relay {
namespace quantize {
class QRealizeExprNode : public TempExprNode {
public:
Expr data;
static constexpr const char* _type_key = "relay.quantize.QRealizeExpr";
TVM_DECLARE_BASE_OBJECT_INFO(QRealizeExprNode, TempExprNode);
};
class QRealizeExpr : public TempExpr {
public:
TVM_DEFINE_OBJECT_REF_METHODS(QRealizeExpr, TempExpr, QRealizeExprNode);
};
class QRealizeIntExprNode : public QRealizeExprNode {
public:
Expr dom_scale;
DataType dtype;
void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("data", &data);
v->Visit("dom_scale", &dom_scale);
v->Visit("dtype", &dtype);
}
Expr Realize() const final;
static constexpr const char* _type_key = "relay.quantize.QRealizeIntExpr";
TVM_DECLARE_FINAL_OBJECT_INFO(QRealizeIntExprNode, QRealizeExprNode);
};
class QRealizeIntExpr : public QRealizeExpr {
public:
TVM_DLL QRealizeIntExpr(Expr data, Expr dom_scale, DataType dtype);
TVM_DEFINE_OBJECT_REF_METHODS(QRealizeIntExpr, QRealizeExpr, QRealizeIntExprNode);
};
} // namespace quantize
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_QUANTIZE_REALIZE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/auto_scheduler_layout_rewrite.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file auto_scheduler_layout_rewrite.h
* \brief Rewrite the layout of "layout free" tensors (e.g., the weight tensors in
* conv2d and dense layers) according to the tile structure generated by the auto-scheduler.
*/
#ifndef TVM_RELAY_TRANSFORMS_AUTO_SCHEDULER_LAYOUT_REWRITE_H_
#define TVM_RELAY_TRANSFORMS_AUTO_SCHEDULER_LAYOUT_REWRITE_H_
#include <tvm/relay/expr_functor.h>
#include <deque>
#include <string>
namespace tvm {
namespace relay {
class AutoSchedulerLayoutRewriter : public ExprMutator {
public:
Expr VisitExpr_(const CallNode* n) final;
// Two global variables for receiving layout information from python
static std::deque<std::string> global_ori_layouts_queue;
static std::deque<std::string> global_new_layouts_queue;
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_AUTO_SCHEDULER_LAYOUT_REWRITE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/combine_parallel_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file combine_parallel_op.h
* \brief Abstract class to combine parallel ops and their successive element-wise ops.
*/
#ifndef TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_H_
#define TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_H_
#include <tvm/relay/analysis.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/transform.h>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "./expr_subst.h"
#include "pattern_utils.h"
namespace tvm {
namespace relay {
using Branch = std::vector<const CallNode*>;
using Group = std::vector<Branch>;
using FIsSupportedOp = std::function<bool(const CallNode* n)>;
using FAreCompatibleOps = std::function<bool(const CallNode* a, const CallNode* b)>;
using ExprSubstMap = std::unordered_map<Expr, Expr, ObjectPtrHash, ObjectPtrEqual>;
/*
* Class to find parallel branches starting with op that are
* grouped if they are able to be combined. They are eligible to
* be combined if they have the same input data.
* Op can be followed by zero or more elemwise or broadcast ops,
* which are included in the group.
* Intermediate nodes have exactly one successor. It is possible that branches meet at a point,
* which should be handled in ParallelOpCombiner.
*
* data
* / \
* op op
* | |
* elem-wise elem-wise
* | |
*/
class BranchGroupFinder : private ExprVisitor {
public:
/*
* \brief Constructor
* \param op The op that indicates the start of each group
* \param fis_supported_op function that returns true if op
* is supported for combining
* \param fare_compatible_ops function that returns true if
* two ops are compatible for combining
*/
BranchGroupFinder(const Op& op, FIsSupportedOp fis_supported_op,
FAreCompatibleOps fare_compatible_ops);
/*
* \brief Finds all groups that can be combined.
* \param expr Relay expression that represents function
* to look at for groups to be combined
* \return Vector of groups which can be combined.
*/
std::vector<Group> Find(const Expr& expr);
private:
/* \brief Cache the op for finding parallel branches */
const Op& cached_op_;
/* \brief function to return true if op is eligible to be combined,
* false otherwise
*/
FIsSupportedOp fis_supported_op_;
/* \brief function to return true if two parallel ops are eligible
* to be combined, false otherwise
*/
FAreCompatibleOps fare_compatible_ops_;
/* \brief ops that are on the first (logically, leftmost) branch
* of parallel ops and are eligible to be combined
*/
std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> op_roots_;
/* \brief map of Expr to CallNodes that follow it */
std::unordered_map<Expr, std::vector<const CallNode*>, ObjectPtrHash, ObjectPtrEqual>
children_map_;
/*
* \brief Creates new branch from op and its children that have
* elementwise or broadcast patterns
* \return New branch
*/
Branch CreateBranch(const CallNode* op);
/*
* \brief Expression visitor function
*/
void VisitExpr_(const CallNode* n) final;
};
/*
* Abstract class to find and combine parallel ops and the elementwise ops that follow.
*/
class ParallelOpCombiner {
public:
/*! \brief virtual destructor */
virtual ~ParallelOpCombiner() {}
/*
* \brief Constructor.
* \param op_name name of op to combine
* \param min_num_branches min number of parallel branches beginning with op
* to start combining
*/
explicit ParallelOpCombiner(const std::string& op_name, uint64_t min_num_branches);
/*
* \brief Combines ops and following elementwise or broadcast ops
* \param expr function to modify
* \return new function with combined ops
*/
Expr Combine(const Expr& expr);
protected:
/*
* \brief Checks if node is supported to be combined
* \param n node in question
* \return True if the op represented by n is supported to be the root of a branch
* to be combined. False otherwise.
*/
virtual bool IsSupportedOp(const CallNode* n) = 0;
/*
* \brief Checks if two ops can be combined
* \param a node a
* \param b node b
* \return True if a and b can be combined. False otherwise.
*/
virtual bool CanOpsBeCombined(const CallNode* a, const CallNode* b) = 0;
/*
* \brief Makes combined op from parallel ops in branches. This usually involves
* concatenating or stacking inputs, then creating a new call.
* \param branches branches that are to be combined
* \return new call with branches combined.
*/
virtual Call MakeCombinedOp(const Group& branches) = 0;
/*
* \brief Checks if argument of op following combined ops are able to be combined
* \param a node a
* \param b node b
* \param index index of argument in question
* \return True if argument of a and b and index can be combined
*/
virtual bool IsArgCompatible(const CallNode* a, const CallNode* b, size_t index) = 0;
/*
* \brief Create combined call from ops that follow the initial combined op at the depth-th level.
* This usually involves concatenating or stacking inputs, then creating a new call.
* Only called if IsArgCompatbile returns true for each arg.
* \param data combined op
* \param branches branches of parallel ops to be combined
* \param depth depth at which to combine ops
* \param parent_index index of arg that corresponds to original input that was shared among
* all combined ops
* \return new combined call
*/
virtual Call MakeCombinedCallFromFollowingOps(const Expr& data, const Group& branches,
size_t depth, size_t parent_index) = 0;
/*
* \brief Updates map of expr to substitute with combined expr. This usually involves
* slicing or splitting data.
* \param data combined op
* \param branches branches of parallel ops to be combined
* \param depth depth at which to substitute
* \param subst_map map of Expr to replace with Expr to replace it with
*/
virtual void UpdateGroupOutput(const Expr& data, const Group& branches, size_t depth,
ExprSubstMap* subst_map) = 0;
private:
/* \brief Cache the op to be combined */
const Op& cached_op_;
/* \brief minimum number of parallel branches to combine */
uint64_t min_num_branches_;
/* \brief map of Expr to Expr to substitute it with after running pass */
ExprSubstMap subst_map_;
/*
* \brief Combine parallel branches and updates subst_map_ with Exprs
* to be substituted
* \param branches branches to be combined
*/
void CombineBranches(const Group& branches);
/*
* \brief Combine parallel branches and updates subst_map_ with Exprs
* to be substituted
* \param branches parallel branches to potentially be combined
* \param depth depth at which to look at op
* \param parent_index index of arg that corresponds to original input that was shared among
* all combined ops
* \return true if parallel ops at depth can be combined, false otherwise
*/
bool CheckLevel(const Group& branches, size_t depth, size_t parent_index);
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/combine_parallel_op_batch.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file combine_parallel_op_batch.h
* \brief Combine parallel ops into a single batch op.
*/
#ifndef TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_BATCH_H_
#define TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_BATCH_H_
#include <tvm/relay/analysis.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/transform.h>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include "./combine_parallel_op.h"
#include "./expr_subst.h"
#include "pattern_utils.h"
namespace tvm {
namespace relay {
/*
* Class to find and combine parallel ops and following element-wise
* and broadcast ops into a single batch op. Ops can be combined
* if they have the same input data. Batch op is formed by
* stacking inputs. Final results are retrieved by splitting output.
* For example:
*
* data
* / \
* dense (2,2) dense (2,2)
* | |
* elemwise/bcast (2,2) elemwise/bcast (2,2)
*
* Would become:
*
* data
* |
* batch_matmul+elemwise/bcast (2,2,2)
*/
class ParallelOpBatchCombiner : public ParallelOpCombiner {
public:
/*
* \brief Constructor.
* \param op_name name of op to combine
* \param batch_op_name name of op that combined branches will be joined into
* \param min_num_branches min number of parallel branches beginning with op
* to start combining
*/
ParallelOpBatchCombiner(const std::string& op_name, const std::string& batch_op_name,
uint64_t min_num_branches);
protected:
/*
* \brief Checks if node is supported to be combined
* \param n node in question
* \return True by default
*/
virtual bool IsSupportedOp(const CallNode* n);
/*
* \brief Checks if two ops can be combined
* \param a node a
* \param b node b
* \return True if shapes and dtypes of all args of a and b are the same
*/
virtual bool CanOpsBeCombined(const CallNode* a, const CallNode* b);
/*
* \brief Makes combined op from parallel ops in branches. This usually involves
* concatenating or stacking inputs, then creating a new call.
* \param branches branches that are to be combined
* \return new call with branches combined as batch op by stacking args
*/
virtual Call MakeCombinedOp(const Group& branches);
/*
* \brief Checks if argument of op following combined ops are able to be combined
* \param a node a
* \param b node b
* \param index index of argument in question
* \return True if shapes and dtypes of args[index] a and b are the same
*/
bool IsArgCompatible(const CallNode* a, const CallNode* b, size_t index) final;
/*
* \brief Create combined call from ops that follow the initial combined op at the depth-th level.
* This usually involves concatenating or stacking inputs, then creating a new call.
* Only called if IsArgCompatbile returns true for each arg.
* \param data combined op
* \param branches branches of parallel ops to be combined
* \param depth depth at which to combine ops
* \param parent_index index of arg that corresponds to original input that was shared among
* all combined ops
* \return new combined call as batch op by stacking args
*/
Call MakeCombinedCallFromFollowingOps(const Expr& data, const Group& branches, size_t depth,
size_t parent_index) final;
/*
* \brief Updates map of expr to substitute with combined expr. This usually involves
* slicing or splitting data.
* \param data combined op
* \param branches branches of parallel ops to be combined
* \param depth depth at which to substitute
* \param subst_map map of Expr to replace with Expr to replace it with
*/
void UpdateGroupOutput(const Expr& data, const Group& branches, size_t depth,
ExprSubstMap* subst_map) final;
private:
/* \brief name of op to replace combined ops with. for example,
* for combining parallel dense, this will will be set to
* nn.batch_matmul
*/
std::string batch_op_name_;
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_COMBINE_PARALLEL_OP_BATCH_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/compiler_function_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/transforms/compiler_function_utils.h
* \brief Helper passes for working with functions with the "Compiler" attribute.
*
* Those wishing to use the "RelayToTIR" custom pass machinery to do IRModule-at-a-time external
* codegen may find the following helpers useful:
*
* - The \p OutlineCompilerFunctionsWithExistingGlobalSymbols pass will lift inline functions with
* a matching "Compiler" attribute to be global functions, using the "global_symbol" attribute
* already assigned. Can be used before custom lowering.
*
* Note that ideally "Compiler" attributed functions would be made global functions as early as
* possible and would stay that way. However, the GraphExecutorCodegen and AOTExecutorCodegen
* assume the entire model can be represented by a single 'main' function, and the Inline pass
* is run to respect that assumption. So this pass is mostly just to undo that Pass after modules
* have passed through the 'codegen' keyhole.
*
* See also OutlineCompilerFunctionsMutator in src/relay/backend/contrib/ethosu/codegen.cc.
*
* - (The \p OutlineCompilerFunctions pass is a more general version of the above which can use
* a custom cache to both allocate "global_symbol" names and ensure two structurally equal
* functions are assigned the same name, and thus lowered only once. This is used by Collage
* when preparing the optimally partitioned IRModule).
*
* - The \p MarkCompilerFunctionsAsExtern pass will update the attributes of global functions
* with a matching "Compiler" attribute to have just the "Extern" attribute. That will signal
* the function has been dealt with. However calls to such functions will be left unchanged.
* Can be used after lowering to cleanup the IRModule.
*
* - The \p InlineCompilerFunctions pass can selectively inline global functions with a matching
* "Compiler" attribute who's name appears in the given set. Obviously it's more sensible to
* not create that function in the first place, however some external codegen have rules to
* accept or reject partitionings based on the overall partitioned function body. This pass
* can be used do the legwork, and will take care to not only inline the outer "Compiler"
* annotated funcition, but also any "Composite" annotated functions in its body.
*/
#ifndef TVM_RELAY_TRANSFORMS_COMPILER_FUNCTION_UTILS_H_
#define TVM_RELAY_TRANSFORMS_COMPILER_FUNCTION_UTILS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include "tvm/ir/transform.h"
#include "tvm/relay/function.h"
namespace tvm {
namespace relay {
namespace transform {
/*!
* \brief Abstract class representing a cache of unique global vars keyed by functions. This can
* be used to ensure structurally equal functions are assigned the same global var object, and
* thus lowered at most once.
*/
class GlobalSymbolCache {
public:
virtual ~GlobalSymbolCache();
virtual GlobalVar GetGlobalSymbol(const Function& function) = 0;
};
/*!
* \brief A \p GlobalSymbolCache that requires every "Compiler" attributed function to already
* have a "global_symbol" attribute.
*/
class ExistingGlobalSymbolCache : public GlobalSymbolCache {
public:
ExistingGlobalSymbolCache() = default;
GlobalVar GetGlobalSymbol(const Function& function) final;
private:
/*! \brief Maps already seen global symbol names to their corresponding GlobalVar objects. */
std::unordered_map<std::string, GlobalVar> global_vars_;
};
/*!
* \brief A pass to outline all let-bound and literal functions in direct call positions which have
* a "Compiler" attribute. The given \p GlobalSymbolCache is used to determine a unique global
* symbol for each function, which is also assigned to the "global_symbol" attribute of the new
* global function.
*
* At most one function with the same global symbol is outlined.
*
* If \p compiler_filter is non-empty only functions with that as their attribute value are
* outlined.
*/
tvm::transform::Pass OutlineCompilerFunctions(std::shared_ptr<GlobalSymbolCache> cache,
std::string compiler_filter = "");
/*!
* \brief A pass to outline all let-bound and literal functions in direct call positions which have
* a "Compiler" attribute. The functions are bound to unique global vars according to their
* existing "global_symbol" attribute. At most one function with the same global symbol is outlined.
*
* If \p compiler_filter is non-empty only functions with that as their attribute value are
* outlined.
*
* This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism
* to prepare the IRModule before custom lowering.
*/
tvm::transform::Pass OutlineCompilerFunctionsWithExistingGlobalSymbols(
std::string compiler_filter = "");
/*!
* \brief A pass to mark all global functions which have a "Compiler" attribute matching
* compiler_filter as 'extern' by replacing all attributes with a single "Extern" attribute.
* Calls to such functions are not changed.
*
* If \p compiler_filter is non-empty only functions with that as their attribute value are
* outlined.
*
* This pass may be useful for external codegen using the "RelayToTIR" custom pass mechanism to
* cleanup the IRModule after custom lowering.
*/
tvm::transform::Pass MarkCompilerFunctionsAsExtern(std::string compiler_filter = "");
/*!
* \brief A pass to inline all global "Compiler" functions which are bound to a global var
* in \p global_vars. Both the global function and any calls to "Composite" functions it its body
* are inlined.
*
* This pass may be useful for external codegen which needs to undo partitioning based on
* properties of the entire partition.
*/
tvm::transform::Pass InlineCompilerFunctionsBoundTo(Array<GlobalVar> global_vars);
} // namespace transform
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_COMPILER_FUNCTION_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/device_aware_visitors.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/transforms/device_aware_visitors.h
* \brief Visitors which track the device for the current Relay expression and Relay Vars.
*/
#ifndef TVM_RELAY_TRANSFORMS_DEVICE_AWARE_VISITORS_H_
#define TVM_RELAY_TRANSFORMS_DEVICE_AWARE_VISITORS_H_
#include <dlpack/dlpack.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/function.h>
#include <unordered_map>
#include <utility>
#include <vector>
#include "../op/annotation/annotation.h"
#include "../op/memory/on_device.h"
namespace tvm {
namespace relay {
namespace transform {
/*!
* \brief Helper class for expression transformers which need to keep track of the \p VirtualDevice
* holding the results of expressions. This is recovered from function attributes and "on_device"
* CallNodes added by the PlanDevices pass.
*
* \sa \p DeviceAwareExpr{Functor,Visitor,Mutator}.
*/
class LexicalOnDeviceMixin {
protected:
explicit LexicalOnDeviceMixin(const Optional<IRModule>& maybe_mod);
/*!
* \brief Returns the \p VirtualDevice on which the result of \p expr should/will be stored,
* assuming {Push,Pop}{VirtualDevice,BoundVar} have been correctly called. May return the
* unconstrained \p VirtualDevice if the device planning pass has not been run.
*/
VirtualDevice GetVirtualDevice(const Expr& expr) const;
/*! \brief Indicate a function body is being entered. */
void EnterFunctionBody();
/*! \brief Indicate a function body has been processed. */
void ExitFunctionBody();
/*! \brief Push an \p VirtualDevice onto the lexical VirtualDevice stack. Ignore if unconstrained.
*/
void PushVirtualDevice(const VirtualDevice& virtual_device);
/*! \brief Pop an \p VirtualDevice from the lexical VirtualDevice stack. Ignore if stack is empty.
*/
void PopVirtualDevice();
/*! \brief Remember that \p var will be stored at \p virtual_device. Ignore if unconstrained.
*
* CAUTION: Despite the name we don't support re-entering the same function body.
*/
void PushBoundVar(Var var, const VirtualDevice& virtual_device);
/*! \brief Remove the binding for \p var to its \p VirtualDevice. Ignore if var is not bound. */
void PopBoundVar(const Var& var);
/*!
* \brief Returns the number of function definitions wrapping the currently visited expression.
*/
int function_nesting() const { return function_nesting_; }
private:
/*!
* \brief The number of function bodies entered. Since many transforms need to distinguish global
* functions from local functions this supports the mixin's \p is_global() helper method.
*/
int function_nesting_ = 0;
/*!
* \brief The stack of lexically enclosing "on_device" \p VirtualDevices, from outermost to
* innermost. When visiting an expression other than a variable we can assume the expression's
* result is to be stored on \p expr_virtual_devices.back().
*/
std::vector<VirtualDevice> expr_virtual_devices_;
/*!
* \brief A map from in-scope local variables to their \p VirtualDevices. We may assume the
* variable is only ever bound to a value stored on this \p VirtualDevice at runtime.
*
* Note: We're playing it safe and keying by object refs here just in case the Relay expression
* being rewritten has no module or other global to keep it alive.
*/
std::unordered_map<Var, VirtualDevice, runtime::ObjectPtrHash, runtime::ObjectPtrEqual>
var_virtual_devices_;
/*!
* \brief A map from global variables to their \p VirtualDevices, ie the "result_virtual_device"
* of the function they are bound to in the module we are working on. We calculate and store this
* explicitly so that we don't need to hold on to any module, which is often in the process of
* being rewritten.
*/
std::unordered_map<GlobalVar, VirtualDevice, runtime::ObjectPtrHash, runtime::ObjectPtrEqual>
global_var_virtual_devices_;
};
template <typename FType>
class DeviceAwareExprFunctor;
/*!
* \brief ExprFunctor which tracks \p VirtualDevices. We only support 'visitor' style implementation
* with no additional arguments, thus this is equivalent to \p DeviceAwareExprVisitor without
* any memoization.
*/
template <>
class DeviceAwareExprFunctor<void(const Expr& n)> : public ExprFunctor<void(const Expr& n)>,
public LexicalOnDeviceMixin {
private:
using TSuper = ExprFunctor<void(const Expr& n)>;
public:
explicit DeviceAwareExprFunctor(const Optional<IRModule>& maybe_mod)
: LexicalOnDeviceMixin(maybe_mod) {}
void VisitExpr_(const FunctionNode* function_node) {
if (function_node->HasNonzeroAttr(attr::kPrimitive)) {
// No tracking inside primitive functions.
return DeviceAwareVisitExpr_(function_node);
} else {
// Function parameters come into scope.
for (auto param : function_node->params) {
PushBoundVar(param, param->virtual_device());
}
// Entering scope of function body.
VirtualDevice virtual_device = function_node->virtual_device();
VLOG(2) << "entering " << virtual_device << " for function:" << std::endl
<< PrettyPrint(GetRef<Function>(function_node));
PushVirtualDevice(virtual_device);
EnterFunctionBody();
DeviceAwareVisitExpr_(function_node);
// Leaving scope of function body.
ExitFunctionBody();
PopVirtualDevice();
VLOG(2) << "leaving " << virtual_device << " for function:" << std::endl
<< PrettyPrint(GetRef<Function>(function_node));
// Function parameters go out of scope.
for (size_t i = 0; i < function_node->params.size(); ++i) {
PopBoundVar(function_node->params[i]);
}
}
}
void VisitExpr_(const LetNode* let_node) {
PreVisitLetBlock_(let_node);
std::vector<const LetNode*> bindings;
Expr expr = GetRef<Expr>(let_node);
while (const auto* inner_let_node = expr.as<LetNode>()) {
// Let-bound var (in pre visited version) goes into scope.
// (We'll just assume this is a letrec.)
VirtualDevice virtual_device = GetVirtualDevice(inner_let_node->value);
VLOG(2) << "var '" << inner_let_node->var->name_hint() << "' has virtual device "
<< virtual_device;
PushBoundVar(inner_let_node->var, virtual_device);
PreVisitLetBinding_(inner_let_node->var, inner_let_node->value);
bindings.emplace_back(inner_let_node);
expr = inner_let_node->body;
}
VisitExpr(expr);
for (auto itr = bindings.rbegin(); itr != bindings.rend(); ++itr) {
// Let-bound var goes out of scope.
const LetNode* visited_let_node = *itr;
PopBoundVar(visited_let_node->var);
PostVisitLet_(visited_let_node);
}
PostVisitLetBlock_(let_node);
}
void VisitExpr_(const CallNode* call_node) {
OnDeviceProps props = GetOnDeviceProps(call_node);
if (props.body.defined() && props.is_fixed()) {
// Entering lexical scope of "on_device" call.
VLOG(2) << "entering " << props.virtual_device << " for on_device:" << std::endl
<< PrettyPrint(GetRef<Call>(call_node));
PushVirtualDevice(props.virtual_device);
VisitExpr(props.body);
// Leaving lexical scope of "on_device" call.
PopVirtualDevice();
VLOG(2) << "leaving " << props.virtual_device << " for on_device:" << std::endl
<< PrettyPrint(GetRef<Call>(call_node));
} else {
DeviceAwareVisitExpr_(call_node);
}
}
/*!
* \brief These are as for VisitExpr_. \p VirtualDevices for expressions and function parameters
* will be tracked automatically. Default implementation defers to ExprMutator::VisitExpr_. For
* functions the function_nesting count will already include that of \p function_node.
*/
virtual void DeviceAwareVisitExpr_(const FunctionNode* function_node) {
return TSuper::VisitExpr_(function_node);
}
virtual void DeviceAwareVisitExpr_(const CallNode* call_node) {
return TSuper::VisitExpr_(call_node);
}
/*!
* \brief Visit the first let in a chain of let expressions before any let bindings or final
* body has been visited. Default implementation is a no-op.
*/
virtual void PreVisitLetBlock_(const LetNode* let_node) { /* no-op */
}
/*!
* \brief Visit a let-bound expression before the let body has been visited. Devices for the
* let-bound variable will be tracked automatically. Default implementation just visits var and
* value.
*/
virtual void PreVisitLetBinding_(const Var& var, const Expr& value) {
VisitExpr(var);
VisitExpr(value);
}
/*!
* \brief Visit a let expression after the let-bound value and body have been visited.
* Default implementation is a no-op.
*/
virtual void PostVisitLet_(const LetNode* let_node) { /* no-op */
}
/*!
* \brief Visit the first let in a chain of let expressions after it has been visited.
* Default implementation is a no-op.
*/
virtual void PostVisitLetBlock_(const LetNode* let_node) {}
};
/*! \brief ExprVisitor which tracks \p VirtualDevices. */
class DeviceAwareExprVisitor : public ExprVisitor, public LexicalOnDeviceMixin {
public:
explicit DeviceAwareExprVisitor(const Optional<IRModule>& maybe_mod)
: LexicalOnDeviceMixin(maybe_mod) {}
using ExprVisitor::VisitExpr_;
void VisitExpr_(const FunctionNode* function_node) final;
void VisitExpr_(const LetNode* let_node) final;
void VisitExpr_(const CallNode* call_node) final;
/*!
* \brief These are as for VisitExpr_. \p VirtualDevices for expressions and function parameters
* will be tracked automatically. Default implementation defers to ExprMutator::VisitExpr_. For
* functions the function_nesting count will already include that of \p function_node.
*/
virtual void DeviceAwareVisitExpr_(const FunctionNode* function_node);
virtual void DeviceAwareVisitExpr_(const CallNode* call_node);
/*!
* \brief Visit the first let in a chain of let expressions before any let bindings or final
* body has been visited. Default implementation is a no-op.
*/
virtual void PreVisitLetBlock_(const LetNode* let_node);
/*!
* \brief Visit a let-bound expression before the let body has been visited. \p VirtualDevices for
* the let-bound variable will be tracked automatically. Default implementation just visits var
* and value.
*/
virtual void PreVisitLetBinding_(const Var& var, const Expr& value);
/*!
* \brief Visit a let expression after the let-bound value and body have been visited.
* Default implementation is a no-op.
*/
virtual void PostVisitLet_(const LetNode* let_node);
/*!
* \brief Visit the first let in a chain of let expressions after it has been visited.
* Default implementation is a no-op.
*/
virtual void PostVisitLetBlock_(const LetNode* let_node);
};
/*! \brief ExprMutator which tracks \p VirtualDevices. */
class DeviceAwareExprMutator : public ExprMutator, public LexicalOnDeviceMixin {
public:
explicit DeviceAwareExprMutator(const Optional<IRModule>& maybe_mod)
: LexicalOnDeviceMixin(maybe_mod) {}
Expr VisitExpr_(const FunctionNode* function_node) final;
Expr VisitExpr_(const LetNode* let_node) final;
Expr VisitExpr_(const CallNode* call_node) final;
/*!
* \brief These are as for VisitExpr_. \p VirtualDevices for expressions and function parameters
* will be tracked automatically. Default implementation defers to ExprMutator::VisitExpr_. For
* functions the function_nesting count will already include that of \p function_node.
*/
virtual Expr DeviceAwareVisitExpr_(const FunctionNode* function_node);
virtual Expr DeviceAwareVisitExpr_(const CallNode* call_node);
/*!
* \brief Visit the first let in a chain of let expressions before any let bindings or final
* body has been visited. Default implementation is a no-op.
*/
virtual void PreVisitLetBlock_(const LetNode* let_node);
/*!
* \brief Visit a let-bound expression before the let body has been visited. \p VirtualDevices for
* the let-bound variable will be tracked automatically. Default implementation just visits var
* and value.
*/
virtual std::pair<Var, Expr> PreVisitLetBinding_(const Var& var, const Expr& value);
/*!
* \brief Visit a let expression after the let-bound value and body have been visited.
* Default implementation just returns a reference to the post-visited node.
*/
virtual Expr PostVisitLet_(const LetNode* pre_let_node, const LetNode* post_let_node);
/*!
* \brief Visit the first let in a chain of let expressions after it has been visited.
* Default implementation returns reference to let node.
*/
virtual Expr PostVisitLetBlock_(const LetNode* pre_let_node, const LetNode* post_let_node);
};
/*!
* \brief Returs a map from Relay expression node to its virtual device using the annotations
* and \p virtual_device fields of \p expr. The map's lifetime must not exceed that of
* \p expr itself.
*/
std::unordered_map<const ExprNode*, VirtualDevice> RecoverVirtualDeviceMap(const IRModule& mod,
const Expr& expr);
} // namespace transform
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_DEVICE_AWARE_VISITORS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/device_domains.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/analysis/device_domains.h
* \brief Unification domain for the device planner.
*/
#ifndef TVM_RELAY_TRANSFORMS_DEVICE_DOMAINS_H_
#define TVM_RELAY_TRANSFORMS_DEVICE_DOMAINS_H_
#include <dlpack/dlpack.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/type.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/target/compilation_config.h>
#include <tvm/target/virtual_device.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace tvm {
namespace relay {
namespace transform {
class DeviceDomain;
using DeviceDomainPtr = std::shared_ptr<DeviceDomain>;
class DeviceDomains;
/*!
* \brief Represents the domain over which we collect equality constraints.
*
* \code
* D ::= ?x? -- first order, free
* | <virtual_device> -- first order, bound to specific virtual device
* | fn(D1, ..., Dn):Dr -- higher order
* \endcode
*
* We require a function value to be on the same device as its result. To support that we need
* a notion of the 'result domain' of a domain:
* \code
* result_domain(?x?) = ?x?
* result_domain(<virtual_device>) = <virtual_device>
* result_domain(fn(D1, ..., Dn):Dr) = result_domain(Dr)
* \endcode
*
* TODO(mbs): We currently don't allow sub-VirtualDevice constraints. Eg for a function we can
* express that the argument and result VirtualDevices must be exactly equal, but we cannot express
* that though the devices and targets for arguments and results must be equal, it is ok for
* memory scopes to differ. At the moment we can get away with this since we run PlanDevices
* twice: once with all memory scopes unconstrained, then again with just memory scopes as
* the new property to flow. However we're on thin ice here and better would be to allow
* constraints on VirtualDevices to be exploded into their device/target component and their
* memory scope component. Should we fold layout constraints into VirtualDevices then they would
* probably be grouped with memory scopes.
*/
class DeviceDomain {
public:
/*!
* \brief Constructs a first-order domain for \p virtual_device, which may be
* fully free (ie virtual_device is unconstrained), partially free (ie virtual_device has at
* least on of its target, device id or memory scopes known), or fully fixed (ie virtual_device
* has its target, device id and memory scopes set).
*
* CAUTION: Use DeviceDomains::MakeFirstOrderDomain instead of this ctor.
*/
explicit DeviceDomain(VirtualDevice virtual_device)
: virtual_device_(std::move(virtual_device)) {}
/*!
* \brief Constructs a higher-order domain, where \p args_and_result contain the
* function argument and result domains in order.
*
* CAUTION: Use DeviceDomains::MakeHigherOrderDomain instead of this ctor.
*/
explicit DeviceDomain(std::vector<DeviceDomainPtr> args_and_result)
: virtual_device_(VirtualDevice::FullyUnconstrained()),
args_and_result_(std::move(args_and_result)) {}
bool is_higher_order() const { return !args_and_result_.empty(); }
VirtualDevice first_order_virtual_device() const {
ICHECK(args_and_result_.empty()) << "expecting domain to be first-order";
return virtual_device_;
}
size_t function_arity() const {
ICHECK(!args_and_result_.empty()) << "expecting domain to be higher-order";
return args_and_result_.size() - 1UL;
}
DeviceDomainPtr function_param(size_t i) const {
ICHECK(!args_and_result_.empty()) << "expecting domain to be higher-order";
ICHECK_LT(i + 1, args_and_result_.size()) << "parameter index is out of range";
return args_and_result_[i];
}
DeviceDomainPtr function_result() const {
ICHECK(!args_and_result_.empty());
return args_and_result_.back();
}
private:
/*!
* \brief If this is a function domain then always fully unconstrained. Otherwise will be
* fully unconstrained (the domain is still completely free), partially constrained
* (for example, the \p target and \p device_type are constrained but the \p virtual_device_id and
* \p memory_scope are still unconstrained), or fully constrained (everything is known).
*/
const VirtualDevice virtual_device_;
/*!
* \brief If this is a function domain then the sub-domains for each of the function's
* arguments, and the domain for its result. Otherwise empty.
*/
const std::vector<DeviceDomainPtr> args_and_result_;
friend class DeviceDomains;
};
/*!
* \brief Tracks the device domains for a set of expressions w.r.t. an equivalence relation
* built up by calls to \p UnifyOrNull.
*/
class DeviceDomains {
public:
explicit DeviceDomains(CompilationConfig config);
const CompilationConfig& config() const { return config_; }
/*!
* \brief Returns the domain representing \p virtual_device. If \p virtual_device is fully
* constrained then the domain will be unique that \p virtual_device.
*/
DeviceDomainPtr MakeFirstOrderDomain(const VirtualDevice& virtual_device);
/*!
* \brief Returns a higher-order domain with \p args_and_results.
*/
DeviceDomainPtr MakeHigherOrderDomain(std::vector<DeviceDomainPtr> arg_and_results) {
return std::make_shared<DeviceDomain>(std::move(arg_and_results));
}
/*!
* \brief Returns a domain appropriate for \p type who's result domain is bound to \p
* virtual_device. If \p type is a function then all parameter domains will be completely free. It
* is valid for \p virtual_device to be fully unconstrained.
*/
DeviceDomainPtr MakeDomain(const Type& type, const VirtualDevice& virtual_device);
/*!
* \brief Returns a domain with the given result appropriate \p non_canonical_virtual_device,
* which cannot be fully unconstrained. We first canonicalize the virtual device to unsure it has
* a target and is unique.
*/
DeviceDomainPtr ForVirtualDevice(const Type& type,
const VirtualDevice& non_canonical_virtual_device);
/*! \brief Returns a free domain appropriate for \p type. */
DeviceDomainPtr Free(const Type& type) {
return MakeDomain(type, VirtualDevice::FullyUnconstrained());
}
/*! \brief Returns the domain representing the equivalence class containing \p domain. */
DeviceDomainPtr Lookup(DeviceDomainPtr domain);
/*!
* \brief Returns the most constrained domain which agrees with both \p lhs and \p rhs. Returns
* null if no such domain exists, ie some first-order component of \p lhs is constrained
* differently than the corresponding component of \p rhs.
*/
DeviceDomainPtr JoinOrNull(const DeviceDomainPtr& lhs, const DeviceDomainPtr& rhs);
/*!
* \brief Unifies \p lhs and \p rhs, returning the most-bound of the two. Returns null if
* \p lhs and \p rhs are not unifiable, in which case the constraint system may be left in
* a partially modified state.
*/
// TODO(mbs): I don't think we need an occurs check since the program is well-typed, but
// given we have refs to functions I'm prepared to be surprised.
DeviceDomainPtr UnifyOrNull(DeviceDomainPtr lhs, DeviceDomainPtr rhs);
/*
* \brief Force all domains in \p higher_order_domain to unify with \p first_order_domain.
* This can be used to handle functions within tuples, references and ADTs since we don't
* attempt to track anything beyond 'the device' for expressions of those first-order types.
*
* Returns false if any unification fails.
*/
bool CollapseOrFalse(const DeviceDomainPtr& first_order_domain,
const DeviceDomainPtr& higher_order_domain);
/*!
* \brief Unifies \p lhs_first_order and \p rhs_maybe_higher_order. If \p rhs_maybe_higher_order
* is indeed higher-order, require all of its arguments and result to unify with
* \p lhs_first_order. Otherwise same as \p Unify. Returns false if unification is not possible.
*
* In an expression such as:
* \code
* (fn(...) {...}, ...).0
* \endcode
* we need to force all the devices of the inner function to be the same as the device for the
* overall tuple since the device domain does not understand tuples. Similarly for references
* and ADTs.
*/
bool UnifyCollapsedOrFalse(const DeviceDomainPtr& lhs_first_order,
const DeviceDomainPtr& rhs_maybe_higher_order);
/*! \brief Returns true if a domain is known for \p expr. */
bool contains(const Expr& expr) const { return expr_to_domain_.count(expr.get()); }
/*! \brief Returns the domain representing \p expr. */
DeviceDomainPtr DomainFor(const Expr& expr);
/*!
* \brief Returns the domain representing the callee (ie 'op') in \p call expression. If the
* callee is a primitive or special operation we handle it specially. Otherwise defers to \p
* DomainFor(call->op).
*
* This special handling is needed:
* - To handle the "on_device" and "device_copy" ops which constrain devices to the given
* devices.
* - To handle some special ops which constrain devices to the CPU.
* - To allow the same primitive to be called on different devices at different call sites.
* Since each call to the op can have a different domain we index the ops by the call expression
* rather than the op itself.
*/
DeviceDomainPtr DomainForCallee(const Call& call);
/*!
* \brief Unifies the domains for expressions \p lhs and \p rhs.
*
* Aborts if unification fails.
*/
void UnifyExprExact(const Expr& lhs, const Expr& rhs);
/*!
* \brief Attempts to unify the domains for expressions \p lhs and \p rhs, however if they
* cannot be unified then returns with no change to the unification system.
*/
void OptionalUnifyExprExact(const Expr& lhs, const Expr& rhs);
/*!
* \brief Unifies the domain for \p expr with \p expected_domain.
*
* Aborts if unification fails.
*/
void UnifyExprExact(const Expr& expr, const DeviceDomainPtr& expected_domain);
/*!
* \brief Unifies the domain for \p expr with \p expected_domain.
* If \p expected_domain is higher-order but \p expr is first-order, require all arguments
* and the result of \p expected_domain to have the same domain as for \p expr.
*
* Aborts if unification fails.
*/
void UnifyExprCollapsed(const Expr& expr_first_order,
const DeviceDomainPtr& expected_domain_maybe_higher_order);
/*! \brief Returns true if \p domain is fully constrainted. */
bool IsFullyConstrained(DeviceDomainPtr domain);
/*! \brief Force all \p VirtualDevices in \p domain to default to \p default_virtual_device. */
void SetDefault(DeviceDomainPtr domain, const VirtualDevice& default_virtual_device);
/*!
* \brief If \p domain is higher-order default it's result domain to \p default_virtual_device.
* Then force all remaining \p VirtualDevices to the result domain (freshly defaulted or
* original). If \p domain is first-order same as \p SetDefault.
*/
void SetResultDefaultThenParams(const DeviceDomainPtr& domain_maybe_higher_order,
const VirtualDevice& default_virtual_device);
/*!
* \brief Returns the result domain for \p domain (see defn in DeviceDomain comment).
*/
DeviceDomainPtr ResultDomain(DeviceDomainPtr domain);
/*!
* \brief Returns the result \p VirtualDevice (possibly unconstrained) for \p domain
* (see defn in DeviceDomain comment).
*/
VirtualDevice ResultVirtualDevice(const DeviceDomainPtr& domain) {
return ResultDomain(domain)->first_order_virtual_device();
}
/*! \brief Returns one-line description of \p domain for debugging. */
std::string ToString(DeviceDomainPtr domain);
/*! \brief Returns description of entire system of constraints for debugging */
std::string ToString();
private:
/*! \brief Intrinsics we need to handle specially. */
const Op& alloc_storage_op = Op::Get("memory.alloc_storage");
const Op& alloc_tensor_op = Op::Get("memory.alloc_tensor");
const Op& shape_of_op = Op::Get("vm.shape_of");
const Op& invoke_tvm_op = Op::Get("vm.invoke_tvm_op");
const Op& reshape_tensor_op = Op::Get("vm.reshape_tensor");
CompilationConfig config_;
/*!
* \brief The domain for first-order expressions of non-tensor type, such as shapes and
* buffer dimensions. Generally this will be a CPU.
*/
DeviceDomainPtr host_domain_;
/*! \brief Maps expressions to their domains as determined during analysis. */
std::unordered_map<const ExprNode*, DeviceDomainPtr> expr_to_domain_;
/*!
* \brief Maps call expressions to the domains for their callee where the callee is a primitive.
*/
std::unordered_map<const CallNode*, DeviceDomainPtr> call_to_callee_domain_;
/*! \brief Maps device domains to their equivalent domains as determined during unification. */
std::unordered_map<DeviceDomainPtr, DeviceDomainPtr> domain_to_equiv_;
/*!
* \brief Maps fully constrained \p VirtualDevices to their corresponding domains. By sharing
* those domains we can ensure:
*
* \code
* domain0 != domain1 && domain0 fully constrained && domain1 fully constrained
* ==> domain0 and domain1 are incompatible
* \endcode
*/
std::unordered_map<VirtualDevice, DeviceDomainPtr, runtime::ObjectPtrHash,
runtime::ObjectPtrEqual>
fully_constrained_virtual_device_to_domain_;
};
} // namespace transform
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_DEVICE_DOMAINS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/expr_subst.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file expr_subst.h
* \brief Utility functions for substituting expressions.
*/
#ifndef TVM_RELAY_TRANSFORMS_EXPR_SUBST_H_
#define TVM_RELAY_TRANSFORMS_EXPR_SUBST_H_
#include <tvm/relay/expr.h>
#include <unordered_map>
namespace tvm {
namespace relay {
Expr ExprSubst(const Expr& expr,
std::unordered_map<Expr, Expr, ObjectPtrHash, ObjectPtrEqual> subst_map);
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_EXPR_SUBST_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/fake_quantization_to_integer.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/transforms/fake_quantization_to_integer.h
* \brief Extract subgraph of a fake quantized region.
*/
#ifndef TVM_RELAY_TRANSFORMS_FAKE_QUANTIZATION_TO_INTEGER_H_
#define TVM_RELAY_TRANSFORMS_FAKE_QUANTIZATION_TO_INTEGER_H_
#include <tvm/ir/affine_type.h>
#include <tvm/relay/expr_functor.h>
#include <unordered_set>
namespace tvm {
namespace relay {
class SubgraphExtractor : public ExprVisitor {
public:
const std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual> GetSubgraph(const Expr& expr);
const Map<Expr, AffineType> GetAffineTypes();
void VisitExpr(const Expr& expr) override;
protected:
void VisitExpr_(const CallNode* call_node) override;
private:
const Op quantize_op_ = Op::Get("qnn.quantize");
const Op dequantize_op_ = Op::Get("qnn.dequantize");
bool is_fake_quantized_ = true;
Map<Expr, AffineType> affine_types_;
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_FAKE_QUANTIZATION_TO_INTEGER_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/fold_constant.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file fold_constant.h
* \brief Utility functions for folding constants in expressions.
*/
#ifndef TVM_RELAY_TRANSFORMS_FOLD_CONSTANT_H_
#define TVM_RELAY_TRANSFORMS_FOLD_CONSTANT_H_
#include <tvm/relay/expr.h>
namespace tvm {
namespace relay {
namespace transform {
/*!
* \brief Apply constant folding on an expression.
*
* \param expr The expression to fold.
* \param fold_qnn Whether to fold constants for QNN operations.
* \returns The new folded expression.
*/
Expr FoldConstantExpr(const Expr& expr, bool fold_qnn = true);
/*!
* \brief Returns \p expr with any constants expressions evaluated and let-bound constants
* inlined. Returns \p expr unchanged if no change.
*
* CAUTION: The importers rely on this function returning \p expr unchanged to preserve sharing
* from their p.o.v. Furthermore, this function can be called before conversion to ANF so
* we must avoid all recursion.
*/
Expr FoldConstantExpr(const Expr& expr, const IRModule& mod, bool fold_qnn);
} // namespace transform
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_FOLD_CONSTANT_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/gradient.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file gradient.h
* \brief Utility functions for Automatic Differentiation in Relay.
*/
#ifndef TVM_RELAY_TRANSFORMS_GRADIENT_H_
#define TVM_RELAY_TRANSFORMS_GRADIENT_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/function.h>
#include <vector>
namespace tvm {
namespace relay {
inline Type GradRetType(const Function& f) {
// if type annotations are provided, we will construct a ret type;
// otherwise, leave it to be inferred
if (!f->ret_type.defined()) {
return Type();
}
std::vector<Type> vt;
for (const auto& p : f->params) {
if (!p->type_annotation.defined()) {
return Type();
}
vt.push_back(p->type_annotation);
}
return TupleType({f->ret_type, TupleType(vt)});
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_GRADIENT_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/infer_layout_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file infer_layout_utils.h
* \brief Utility functions to alter the layouts of operators or replace primitive operators with
other expressions. This pass can be used for computing convolution in
custom layouts or other general weight pre-transformation.
*/
#ifndef TVM_RELAY_TRANSFORMS_INFER_LAYOUT_UTILS_H_
#define TVM_RELAY_TRANSFORMS_INFER_LAYOUT_UTILS_H_
#include <tvm/relay/expr.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/tir/data_layout.h>
#include <string>
#include <tuple>
#include <utility>
#include "pattern_utils.h"
namespace tvm {
namespace relay {
/*!
* \brief Returns a new layout where the subordinate factors are adjusted based on the tensor
* shape.
* \param old_layout The old layout before any transformation.
* \param old_shape The shape of the original tensor.
* \return The adjusted Layout.
*/
Layout AdjustSubordinateFactors(const Layout& src_layout, const Layout& old_layout,
const Array<tvm::PrimExpr>& old_shape);
bool Isomorphic(const Layout& lhs, const Layout& rhs);
/*!
* \brief Try transforming `old` in as the smae way as how`ref_old` is transformed to `ref_new`.
* `old` and `ref_old` are expected to describe two broadcastable tensors. Layout with fewer rank
* will be expanded. For example,
* if old = 'NW', ref_old = 'NC', ref_new = 'NC1c', then the result is 'NW1w';
* if old = 'W', ref_old = 'NC', ref_new = 'NC1c', then the result is 'NW1w'.
* When `old` and `ref_old` are isomorphic (same structure, only differ in naming), the transform
* is guaranteed to succeed, in which case the function is simply renaming the axes of `ref_new`
* to conform to `old`'s naming.
* \param old The layout to be transformed.
* \param ref_old The reference layout before transform.
* \param ref_new The reference layout after transform.
* \return The transformed layout.
*/
Layout TryTransformLike(const Layout& old, const Layout& ref_old, const Layout& ref_new);
/*
* \brief An output structure to hold results from FInferCorrectLayout calls.
* \tparam input_layouts Inferred input layouts.
* \tparam output_layouts Inferred output layouts.
* \tparam new_attrs Updated attributes consistent with inferred layouts.
*/
class InferCorrectLayoutOutputNode : public Object {
public:
Array<Layout> input_layouts;
Array<Layout> output_layouts;
Attrs new_attrs;
void VisitAttrs(tvm::AttrVisitor* v) {
v->Visit("input_layouts", &input_layouts);
v->Visit("output_layouts", &output_layouts);
v->Visit("new_attrs", &new_attrs);
}
TVM_DECLARE_BASE_OBJECT_INFO(InferCorrectLayoutOutputNode, Object);
static constexpr const char* _type_key = "relay._transform.InferCorrectLayoutOutput";
};
class InferCorrectLayoutOutput : public ObjectRef {
public:
InferCorrectLayoutOutput(Array<Layout> input_layouts, Array<Layout> output_layouts,
Attrs new_attrs) {
auto n = make_object<InferCorrectLayoutOutputNode>();
n->input_layouts = std::move(input_layouts);
n->output_layouts = std::move(output_layouts);
n->new_attrs = std::move(new_attrs);
data_ = n;
}
TVM_DEFINE_OBJECT_REF_METHODS(InferCorrectLayoutOutput, ObjectRef, InferCorrectLayoutOutputNode);
};
/*!
* \brief Infer & correct function of node layout. See \p Layout for layout convention
* \param attrs The attribute of the node.
* \param new_in_layouts The layouts of input arguments after alter_op_layout.
* This can be undefined, which means we call this function before alternating
* any operators.
* \param old_in_layouts The layouts of input arguments before alter_op_layout.
* \param old_in_types The types of old input arguments.
* \return infer_layout_output Inferred layouts and updated attributes stored in
* InferCorrectLayoutOutput above.
*/
using FInferCorrectLayout = runtime::TypedPackedFunc<InferCorrectLayoutOutput(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types)>;
inline InferCorrectLayoutOutput ElemwiseArbitraryLayout(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
Layout ret;
if (new_in_layouts.defined()) {
ICHECK_GE(new_in_layouts.size(), 1);
ret = new_in_layouts[0];
} else {
for (size_t i = 0; i < old_in_layouts.size(); ++i) {
if (old_in_layouts[i].defined()) {
ret = old_in_layouts[i];
break;
}
}
}
return InferCorrectLayoutOutput(Array<Layout>(old_in_layouts.size(), ret), {ret}, attrs);
}
std::pair<Array<Layout>, Array<Layout>> BinaryBroadcastLayoutHelper(
const Attrs& attrs, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types);
/*! \brief Infer layout for binary broadcast operators */
inline InferCorrectLayoutOutput BinaryBroadcastLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
auto inferred_layout =
BinaryBroadcastLayoutHelper(attrs, new_in_layouts, old_in_layouts, old_in_types);
return InferCorrectLayoutOutput(inferred_layout.first, inferred_layout.second, attrs);
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_INFER_LAYOUT_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/let_list.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file let_list.h
* \brief LetList record let binding and insert let expression implicitly.
* using it, one can treat AST as value instead of expression,
* and pass them around freely without fear of AST explosion (or effect duplication).
* for example, if one write 'b = a + a; c = b + b; d = c + c', the AST will contain 8 'a'.
* if one instead write 'b = ll.Push(a + a); c = ll.Push(b + b); d = ll.Get(c + c);',
* the AST will contain 2 'a', as b and c are now variables.
*/
#ifndef TVM_RELAY_TRANSFORMS_LET_LIST_H_
#define TVM_RELAY_TRANSFORMS_LET_LIST_H_
#include <tvm/relay/analysis.h>
#include <tvm/relay/expr.h>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "tvm/relay/type.h"
namespace tvm {
namespace relay {
/*!
* \brief LetList allow you to transform expression into variables, so you can copy them around.
* one can insert into the LetList by calling Push, and wrap an expression with bindings with Get.
* additionally, there is the 'With' function, which automatically call Get.
*/
class LetList {
public:
~LetList() {
if (lets_.size() > 0 && !used_) {
LOG(WARNING) << "letlist not used";
}
}
/*!
* \brief insert a binding.
*
* \param pv the var of the binding.
*
* \param expr the value of the binding.
*
* \return a Var that hold the inserted expr.
*/
Var Push(Var pv, Expr expr) {
ICHECK(!used_);
ICHECK(WellFormed(expr)) << "expression:" << std::endl << PrettyPrint(expr);
lets_.emplace_back(std::make_pair(pv, expr));
return pv;
}
/*!
* \brief insert a binding.
*
* \param expr the value of the binding.
*
* \param ty the type of the binding.
*
* \return a Var that hold the inserted expr.
*/
Var Push(Expr expr, Type ty) { return Push(Var::GenSym(ty), expr); }
/*!
* \brief insert a binding.
*
* \param expr the value of the binding.
*
* \return a Var that hold the inserted expr.
*/
Var Push(Expr expr) { return Push(expr, Type()); }
/*!
* \brief wrap an expr around the LetList.
*
* \param body the Expression to be wrapped around.
*
* \return the wrapped expr.
*/
Expr Get(const Expr& body) {
ICHECK(!used_);
Expr ret = body;
for (auto rit = lets_.rbegin(); rit != lets_.rend(); ++rit) {
ret = Let(std::get<0>(*rit), std::get<1>(*rit), ret);
}
used_ = true;
return ret;
}
/*! \brief get the number of let bindings in the let list.
*
* \return the let list size.
*/
size_t size() const { return lets_.size(); }
/*! \brief generate an LetList and wrap the result automatically.
*
* \param f a function that generate the unwrapped Expr.
*
* \code
* // Example code that generate `16 * a` using 4 plus instead of 15 plus.
* Expr mult_sixteen(const Var& a) {
* Op plus = Op::Get("plus");
* // Automatically call Get with LetList::With
* return LetList::With([&](LetList* ll) {
* // Turn a call to plus into a variable to avoid duplication of code
* Var b = ll->Push(Call(plus, {a, a}));
* Var c = ll->Push(Call(plus, {b, b}));
* Var d = ll->Push(Callplus, {c, c}));
* return Call(plus, {d, d});
* });
* }
* \endcode
*
* \return the wrapped Expr.
*/
template <typename F>
static Expr With(F&& f) {
LetList ll;
return ll.Get(f(&ll));
}
static Expr LetBind(const Expr& e, const std::function<Expr(const Var&)>& f) {
return With([&](LetList* ll) { return f(ll->Push(e)); });
}
private:
std::vector<std::pair<Var, Expr>> lets_;
bool used_ = false;
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_LET_LIST_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/meta_schedule_layout_rewrite.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RELAY_TRANSFORMS_META_SCHEDULE_LAYOUT_REWRITE_H_
#define TVM_RELAY_TRANSFORMS_META_SCHEDULE_LAYOUT_REWRITE_H_
#include <tvm/relay/expr_functor.h>
#include <tvm/tir/index_map.h>
namespace tvm {
namespace relay {
class MetaScheduleLayoutRewriter : public ExprMutator {
public:
Expr VisitExpr_(const CallNode* n) final;
static void LayoutQueuePush(const tir::IndexMap& index_map);
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_META_SCHEDULE_LAYOUT_REWRITE_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/pass_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file tvm/relay/_transforms/pass_utils.h
* \brief Utilities for writing passes
*/
#ifndef TVM_RELAY_TRANSFORMS_PASS_UTILS_H_
#define TVM_RELAY_TRANSFORMS_PASS_UTILS_H_
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/op.h>
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include "../analysis/dependency_graph.h"
#include "../op/annotation/annotation.h"
#include "../op/memory/on_device.h"
#include "./let_list.h"
namespace tvm {
namespace relay {
/*!
* \brief Check if expr is positive constant.
* \param expr The expression to be checked.
* \return Whether all elements of expr is positive constant.
*/
bool IsAllPositiveConstant(const Expr& expr);
/*!
* \brief Substitute var with subst.
* \param type The type to be substituted.
* \param tvar The type variable to be substituted.
* \param subst The target of substitution.
* \return The substituted result.
*/
Type TypeSubst(const Type& type, const TypeVar& tvar, const Type& subst);
/*!
* \brief Substitute var with subst.
* \param expr The expr to be substituted.
* \param tvar The type variable to be substituted.
* \param subst The target of substitution.
* \return The substituted result.
*/
Expr TypeSubst(const Expr& expr, const TypeVar& tvar, const Type& subst);
/*!
* \brief Substitute type vars in type.
* \param type The type to be substituted.
* \param subst_map The map of substitution.
* \return The substituted result.
*/
Type TypeSubst(const Type& type, const tvm::Map<TypeVar, Type>& subst_map);
/*!
* \brief Substitute type vars in type.
* \param expr The expr to be substituted.
* \param subst_map The map of substitution.
* \return The substituted result.
*/
Expr TypeSubst(const Expr& expr, const tvm::Map<TypeVar, Type>& subst_map);
/*!
* \brief Check if type is dynamic.
* \param ty The type to be checked.
* \return Whether the type is dynamic.
*/
bool IsDynamic(const Type& ty);
/*!
* \brief Check if call is data dependent.
* \param call The call to be checked.
* \return Whether the call is data dependent.
*/
bool IsDataDependent(const CallNode* call);
/*!
* \brief Make arbitrary transformation preserve the out most function.
* \param func The transformation.
* \param e The expression
* \return the transformed expression. If e is a function the return is also a function.
*/
inline Expr TransformF(const std::function<Expr(const Expr&)>& func, const Expr& e) {
if (const FunctionNode* f = e.as<FunctionNode>()) {
return WithFields(GetRef<Function>(f), f->params, func(f->body));
} else {
return func(e);
}
}
/*!
* \brief Decide whether the expression atomic or not?
* \param e the expression
* \return
* is it atomic?
* if so, the compute cost of the expression is bounded so it can be copy without graph mode.
*/
inline bool IsAtomic(const Expr& expr) {
Expr true_expr = IgnoreOnDevice(expr);
return true_expr.as<VarNode>() || true_expr.as<OpNode>() || true_expr.as<ConstructorNode>() ||
true_expr.as<GlobalVarNode>() ||
true_expr.as<ConstantNode>(); // Constant is always by reference.
}
/*!
* \brief Cache the compiler_begin annotation op to reduce registry lookup overhead
* \param void
* \return compiler_begin op
*/
inline const Op& CompilerBeginOp() {
static auto op = Op::Get("annotation.compiler_begin");
return op;
}
/*!
* \brief Cache the compiler_end annotation op to reduce registry lookup overhead
* \param void
* \return compiler_end op
*/
inline const Op& CompilerEndOp() {
static auto op = Op::Get("annotation.compiler_end");
return op;
}
template <typename ConditionObjectPtr>
struct TreeNode {
typedef std::shared_ptr<TreeNode<ConditionObjectPtr>> pointer;
virtual ~TreeNode() {}
};
template <typename ConditionObjectPtr>
struct TreeLeafNode : TreeNode<ConditionObjectPtr> {
using TreeObjectPtr = typename TreeNode<ConditionObjectPtr>::pointer;
Expr body;
explicit TreeLeafNode(Expr body) : body(body) {}
static TreeObjectPtr Make(Expr body) { return std::make_shared<TreeLeafNode>(body); }
~TreeLeafNode() {}
};
template <typename ConditionObjectPtr>
struct TreeLeafFatalNode : TreeNode<ConditionObjectPtr> {
using TreeObjectPtr = typename TreeNode<ConditionObjectPtr>::pointer;
TreeLeafFatalNode() = default;
static TreeObjectPtr Make() { return std::make_shared<TreeLeafFatalNode>(); }
~TreeLeafFatalNode() {}
};
template <typename ConditionObjectPtr>
struct TreeBranchNode : TreeNode<ConditionObjectPtr> {
using TreeObjectPtr = typename TreeNode<ConditionObjectPtr>::pointer;
ConditionObjectPtr cond;
TreeObjectPtr then_branch;
TreeObjectPtr else_branch;
TreeBranchNode(ConditionObjectPtr cond, TreeObjectPtr then_branch, TreeObjectPtr else_branch)
: cond(cond), then_branch(then_branch), else_branch(else_branch) {}
static TreeObjectPtr Make(ConditionObjectPtr cond, TreeObjectPtr then_branch,
TreeObjectPtr else_branch) {
return std::make_shared<TreeBranchNode>(cond, then_branch, else_branch);
}
~TreeBranchNode() {}
};
struct ScopeNode;
using Scope = std::shared_ptr<ScopeNode>;
using NodeScopeMap = std::unordered_map<DependencyGraph::Node*, Scope>;
using ExprSet = std::unordered_set<Expr, ObjectPtrHash, ObjectPtrEqual>;
/* Invariant: when parent is null level is 0
* Invariant: when parent is not null level is 1 + parent->level
*/
struct ScopeNode {
// the level of the scope
size_t level;
// the parent scope
Scope parent;
// the corresponding let list which holds all let bindings in the scope
std::shared_ptr<LetList> let_list = std::make_shared<LetList>();
explicit ScopeNode(const Scope& parent) : level(1 + parent->level), parent(parent) {}
ScopeNode() : level(0) {}
};
/*! \brief Calculate the scope of nodes in the dependency graph by least common ancestor.
*
* \param dg the input dependency graph
* \param expr_scope the output node -> scope mapping for all nodes.
* \param lifted_exprs the output set of expressions whose scope is lifted due to dependency
*/
std::pair<NodeScopeMap, ExprSet> CalcScope(const DependencyGraph& dg);
/*! \brief find the least common ancestor of lhs scope and rhs scope.
*/
Scope LCA(Scope lhs, Scope rhs);
// For basic block normal form.
Expr ToBasicBlockNormalFormAux(const Expr& e);
// ToANormalForm for expressions and as a Pass are declared in transform.h
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_PASS_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/pattern_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file tvm/relay/transforms/pattern_utils.h
* \brief Header of internal operator functions
* These can be used for writing passes.
*/
#ifndef TVM_RELAY_TRANSFORMS_PATTERN_UTILS_H_
#define TVM_RELAY_TRANSFORMS_PATTERN_UTILS_H_
#include <builtin_fp16.h>
#include <tvm/node/structural_equal.h>
#include <tvm/relay/analysis.h>
#include <tvm/relay/attrs/nn.h>
#include <tvm/relay/attrs/reduce.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/op.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/runtime/registry.h>
#include <tvm/tir/data_layout.h>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "../backend/utils.h"
#include "../op/make_op.h"
namespace tvm {
namespace relay {
/*!
* \brief Dispatch DataType to the C++ data type
* during runtime.
*/
#define TVM_DTYPE_DISPATCH(type, DType, ...) \
if (type == DataType::Float(64)) { \
typedef double DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Float(32)) { \
typedef float DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Float(16)) { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::BFloat(16)) { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Int(64)) { \
typedef int64_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Int(32)) { \
typedef int32_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Int(16)) { \
typedef int16_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Int(8)) { \
typedef int8_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::UInt(64)) { \
typedef uint64_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::UInt(32)) { \
typedef uint32_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::UInt(16)) { \
typedef uint16_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::UInt(8)) { \
typedef uint8_t DType; \
{ __VA_ARGS__ } \
} else if (type == DataType::Bool()) { \
typedef bool DType; \
{ __VA_ARGS__ } \
} else if ((*tvm::runtime::Registry::Get("runtime._datatype_get_type_registered"))( \
static_cast<uint8_t>(type.code()))) { \
typedef double DType; \
{ __VA_ARGS__ } \
} else { \
LOG(FATAL) << "unknown data type " << type; \
}
/*!
* \brief Try to do the type inference over expr:
*
* Do the infer_type over each node in expr
*
* \param expr The IR expression
* \return infered expr if succeed.
*/
inline Expr InferType(const Expr& expr) {
auto mod = IRModule::FromExpr(expr);
mod = transform::InferType()(mod);
if (expr.as<FunctionNode>()) {
return mod->Lookup("main");
} else {
return mod->Lookup("main").as<FunctionNode>()->body;
}
}
/*!
* \brief Try to match lhs and rhs via broadcasting rule, such that:
*
* rhs matches the dimension of lhs specified by lhs_axes
* rhs's value equals 1 on rest of dimensions.
*
* \param tlhs The type of left operand (data)
* \param trhs The type right operand (bias)
* \param lhs_axes The axes on lhs to match.
* \param rhs_value A squeezed version of rhs which only contains matched dimension.
* \return Whether match is successful.
*/
inline bool MatchBroadcastToLeftAxes(const TensorTypeNode* tlhs, const TensorTypeNode* trhs,
const Array<Integer>& lhs_axes, Expr* rhs_value = nullptr) {
if (tlhs->shape.size() < trhs->shape.size()) return false;
StructuralEqual equal;
size_t base = tlhs->shape.size() - trhs->shape.size();
size_t j = 0;
// handle case trhs is simple constant
if (trhs->shape.size() == 0 && rhs_value != nullptr && lhs_axes.size() > 0) {
*rhs_value = MakeExpandDims(*rhs_value, 0, lhs_axes.size());
for (size_t i = 0; i < lhs_axes.size(); i++) {
int repeat_value =
tlhs->shape[static_cast<size_t>(lhs_axes[j]->value)].as<IntImmNode>()->value;
*rhs_value = MakeRepeat(*rhs_value, repeat_value, i);
}
return true;
}
ObjectPtr<SqueezeAttrs> squeeze_attrs;
if (rhs_value != nullptr) {
squeeze_attrs = make_object<SqueezeAttrs>();
}
for (size_t i = 0; i < tlhs->shape.size(); ++i) {
if (j < lhs_axes.size() && i == static_cast<size_t>(lhs_axes[j]->value)) {
if (i < base || !equal(tlhs->shape[i], trhs->shape[i - base])) {
return false;
}
++j;
} else if (i >= base) {
if (!tir::is_const_int(trhs->shape[i - base], 1)) {
return false;
}
if (rhs_value != nullptr) {
squeeze_attrs->axis.push_back(static_cast<int>(i - base));
}
}
}
if (rhs_value != nullptr && squeeze_attrs->axis.size() != 0) {
static const Op& squeeze_op = Op::Get("squeeze");
*rhs_value = Call(squeeze_op, {rhs_value[0]}, Attrs(squeeze_attrs), {});
}
return true;
}
/*!
* \brief Expand 1D Tensor to match axis.
*
* The result bias can be used to add or multiply to
* the target Tensor on the specified axis via broadcasting rule.
*
* \param bias The bias.
* \param target_ndim Target dimension.
* \param axes The axis on the output we want to match on.
*/
inline Expr ExpandBiasToMatchAxis(Expr bias, int target_ndim, const Array<Integer>& axes) {
static const Op& expand_dims = Op::Get("expand_dims");
for (size_t i = axes.size(); i != 0; --i) {
if (i == axes.size()) {
int64_t num_pad_axis = target_ndim - axes[i - 1]->value - 1;
if (num_pad_axis > 0) {
auto attrs = make_object<ExpandDimsAttrs>();
attrs->axis = i;
attrs->num_newaxis = static_cast<int>(num_pad_axis);
bias = Call(expand_dims, {bias}, Attrs(attrs), {});
}
} else {
int64_t diff = axes[i]->value - axes[i - 1]->value;
ICHECK_GE(diff, 0L);
if (diff > 0) {
auto attrs = make_object<ExpandDimsAttrs>();
attrs->axis = i;
attrs->num_newaxis = static_cast<int>(diff);
bias = Call(expand_dims, {bias}, Attrs(attrs), {});
}
}
}
return bias;
}
/*!
* \brief Check if the call is depthwise conv3d.
*
* \param call The conv call.
* \param param The conv attributes.
* \return Whether it is depthwise_conv3d.
*/
template <typename ATTRS>
inline bool IsDepthwiseConv(const Call& call, ATTRS param, const Layout& kernel_layout) {
static const Layout kOIXX =
backend::IsOp(call.as<CallNode>(), "nn.conv2d") ? Layout("OIHW") : Layout("OIDHW");
const auto bilayout = tir::BijectiveLayout(kernel_layout, kOIXX);
auto wshape = bilayout.ForwardShape(call->args[1]->type_as<TensorTypeNode>()->shape);
return tir::is_const_int(wshape[0], param->groups) && tir::is_const_int(wshape[1], 1);
}
/*!
* \brief Get super-dimension of output channels of conv2d
* \param call The conv2d call.
* \return Super-dimension size of output channels of conv2d.
*/
inline int64_t GetConv2DSuperChannelsDim(const CallNode* call) {
auto param = call->attrs.as<Conv2DAttrs>();
auto tweight = call->args[1]->type_as<TensorTypeNode>();
auto index = param->kernel_layout.operator std::string().find('O');
ICHECK_NE(index, std::string::npos);
auto channels = tir::as_const_int(tweight->shape[index]);
return *channels;
}
/*!
* \brief Is single value tensor (scalar).
* \param expr The expr.
* \return True if single value tensor.
*/
inline bool IsScalar(const Expr& expr) {
if (auto tensor_type = expr->checked_type().as<TensorTypeNode>()) {
for (auto dim_index_expr : tensor_type->shape) {
if (auto dim_index = dim_index_expr.as<IntImmNode>()) {
if (dim_index->value != 1) {
return false;
}
} else {
return false;
}
}
} else {
return false;
}
return true;
}
/*!
* \brief Check if expr is a const scalar.
* \param expr The expr.
* \return True if const scalar.
*/
inline bool IsConstScalar(const Expr& expr) {
const auto* const_expr = expr.as<ConstantNode>();
if (const_expr) {
return const_expr->is_scalar();
}
return false;
}
/*!
* \brief Create a Constant with a scalar
*
* \param dtype The data type.
* \param value The value of the scalar.
* \return A Constant.
*/
template <typename T>
inline Constant MakeConstantScalar(DataType dtype, T value) {
runtime::NDArray arr = runtime::NDArray::Empty({}, dtype, {kDLCPU, 0});
TVM_DTYPE_DISPATCH(dtype, DType, {
if (dtype == DataType::Float(16)) {
// convert to float16
// storage is uint16_t
*static_cast<DType*>(arr->data) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 10>(static_cast<float>(value));
} else if (dtype == DataType::BFloat(16)) {
// convert to bfloat16
// storage is uint16_t
*static_cast<DType*>(arr->data) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 7>(static_cast<float>(value));
} else {
*static_cast<DType*>(arr->data) = value;
}
})
return Constant(arr);
}
/*!
* \brief Create a Constant with a tensor.
*
* \param dtype The data type.
* \param value The vector of the tensor values.
* \return A Constant.
*/
template <typename T>
static inline Constant MakeConstantTensor(DataType dtype, std::vector<int64_t> shape,
std::vector<T> value) {
runtime::NDArray arr = runtime::NDArray::Empty(shape, dtype, {kDLCPU, 0});
TVM_DTYPE_DISPATCH(dtype, DType, {
for (size_t i = 0; i < value.size(); i++) {
if (dtype == DataType::Float(16)) {
// convert to float16
// storage is uint16_t
// Similar handling as that in MakeConstantScalar
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 10>(
static_cast<float>(value[i]));
} else if (dtype == DataType::BFloat(16)) {
// convert to bfloat16
// storage is uint16_t
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 7>(
static_cast<float>(value[i]));
} else {
*(static_cast<DType*>(arr->data) + i) = value[i];
}
}
})
return Constant(arr);
}
/*!
* \brief Create a Constant with a tensor.
*
* \param dtype The data type.
* \param value The array of the tensor values.
* \return A Constant.
*/
template <typename T>
static inline Constant MakeConstantTensor(DataType dtype, std::vector<int64_t> shape,
Array<T> value) {
runtime::NDArray arr = runtime::NDArray::Empty(shape, dtype, {kDLCPU, 0});
TVM_DTYPE_DISPATCH(dtype, DType, {
for (size_t i = 0; i < value.size(); i++) {
if (dtype == DataType::Float(16)) {
// convert to float16
// storage is uint16_t
// Similar handling as that in MakeConstantScalar
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 10>(
static_cast<float>(value[i]));
} else if (dtype == DataType::BFloat(16)) {
// convert to bfloat16
// storage is uint16_t
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 7>(
static_cast<float>(value[i]));
} else {
*(static_cast<DType*>(arr->data) + i) = value[i];
}
}
})
return Constant(arr);
}
/*!
* \brief Create a Constant tensor of zeros.
*
* \param dtype The data type.
* \param shape The shape of the output constant tensor.
* \return A Constant.
*/
static inline Constant MakeConstantZeros(DataType dtype, std::vector<int64_t> shape) {
runtime::NDArray arr = runtime::NDArray::Empty(shape, dtype, {kDLCPU, 0});
int64_t data_size = 1;
for (int64_t dim : shape) {
data_size *= dim;
}
TVM_DTYPE_DISPATCH(dtype, DType, {
for (int64_t i = 0; i < data_size; i++) {
if (dtype == DataType::Float(16)) {
// convert to float16
// storage is uint16_t
// Similar handling as that in MakeConstantScalar
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 10>(static_cast<float>(0));
} else if (dtype == DataType::BFloat(16)) {
// convert to bfloat16
// storage is uint16_t
*(static_cast<DType*>(arr->data) + i) =
__truncXfYf2__<float, uint32_t, 23, uint16_t, uint16_t, 7>(static_cast<float>(0));
} else {
*(static_cast<DType*>(arr->data) + i) = 0;
}
}
})
return Constant(arr);
}
/*!
* \brief Check whether a shape is static and create corresponding Constant.
Eventually this will be removed and replaced with CheckConstantShapeArrayInteger
*
* \param shape The Array of the shape values.
* \return A Constant.
*/
static inline Constant CheckConstantShape(const Array<IndexExpr>& shape) {
auto shape_array =
runtime::NDArray::Empty({int64_t(shape.size())}, DataType::Int(64), {kDLCPU, 0});
auto* shape_data = static_cast<int64_t*>(shape_array->data);
for (size_t i = 0; i < shape.size(); ++i) {
const auto& dim_val = shape[i].as<IntImmNode>();
ICHECK(dim_val) << "Do not support symbolic shape for "
"Array format. Pass shape as Expr instead.";
shape_data[i] = dim_val->value;
}
return Constant(shape_array);
}
/*!
* \brief Check whether a shape is static and create corresponding Array<Integer>. Will replace
* CheckConstantShape after dynamic refactorization is complete
*
* \param shape The Array of the shape values.
* \return A Constant.
*/
static inline Array<Integer> CheckConstantShapeArrayInteger(const Array<IndexExpr>& shape) {
Array<Integer> constShape;
for (size_t i = 0; i < shape.size(); ++i) {
const auto& dim_val = shape[i].as<IntImmNode>();
ICHECK(dim_val) << "Do not support symbolic shape for "
"Array format. Pass shape as Expr instead.";
constShape.push_back(dim_val->value);
}
return constShape;
}
/*!
* \brief Check if two expressions are equal scalars.
* \param a The expression to be checked.
* \param b The expression to be checked
* \return Whether two expressions are equal scalars.
*/
inline bool IsEqualScalar(const Expr& a, const Expr& b) {
const auto* constant_a = a.as<ConstantNode>();
const auto* constant_b = b.as<ConstantNode>();
if (!constant_a || !constant_b || !constant_a->is_scalar() || !constant_b->is_scalar()) {
return false;
}
return tvm::StructuralEqual()(a, b);
}
/*!
* \brief Convert an element of a NDArray with type int or float to scalar.
* \param array Input NDArray
* \param i element index
* \return Converted scalar value, or None if conversion failed
*/
static inline std::optional<long double> TryToScalar(const runtime::NDArray& array, size_t i = 0) {
if (array->dtype.code == kDLInt) {
if (array->dtype.bits == 8) {
return std::optional<long double>(reinterpret_cast<int8_t*>(array->data)[i]);
} else if (array->dtype.bits == 16) {
return std::optional<long double>(reinterpret_cast<int16_t*>(array->data)[i]);
} else if (array->dtype.bits == 32) {
return std::optional<long double>(reinterpret_cast<int32_t*>(array->data)[i]);
} else if (array->dtype.bits == 64) {
return std::optional<long double>(reinterpret_cast<int64_t*>(array->data)[i]);
}
} else if (array->dtype.code == kDLUInt) {
if (array->dtype.bits == 1) { // bool
return std::optional<long double>(reinterpret_cast<uint8_t*>(array->data)[i]);
} else if (array->dtype.bits == 8) {
return std::optional<long double>(reinterpret_cast<uint8_t*>(array->data)[i]);
} else if (array->dtype.bits == 16) {
return std::optional<long double>(reinterpret_cast<uint16_t*>(array->data)[i]);
} else if (array->dtype.bits == 32) {
return std::optional<long double>(reinterpret_cast<uint32_t*>(array->data)[i]);
} else if (array->dtype.bits == 64) {
return std::optional<long double>(reinterpret_cast<uint64_t*>(array->data)[i]);
}
} else if (array->dtype.code == kDLFloat) {
if (array->dtype.bits == 16) {
return std::optional<long double>(
__extendXfYf2__<uint16_t, uint16_t, 10, float, uint32_t, 23>(
reinterpret_cast<uint16_t*>(array->data)[i]));
}
if (array->dtype.bits == 32) {
return std::optional<long double>(reinterpret_cast<float*>(array->data)[i]);
} else if (array->dtype.bits == 64) {
return std::optional<long double>(reinterpret_cast<double*>(array->data)[i]);
}
} else if (array->dtype.code == kDLBfloat) {
if (array->dtype.bits == 16) {
return std::optional<long double>(__extendXfYf2__<uint16_t, uint16_t, 7, float, uint32_t, 23>(
reinterpret_cast<uint16_t*>(array->data)[i]));
}
}
return std::nullopt;
}
/*!
* \brief Convert an element of a NDArray with type int or float to scalar.
* \param array Input NDArray
* \param i element index
* \return Converted scalar value
*/
static inline long double ToScalar(const runtime::NDArray& array, size_t i = 0) {
auto try_value = TryToScalar(array, i);
ICHECK(try_value) << "Unknown data type: " << tvm::runtime::DLDataType2String(array->dtype);
return try_value.value();
}
/*!
* \brief Convert a NDArray with type int or float to Array<Integer>.
* \param array Input NDArray
* \return Converted Array.
*/
static inline Array<Integer> ToVector(const runtime::NDArray& array) {
size_t ndim = array.Shape().size();
ICHECK_EQ(ndim, 1) << "This function should only be used for 1D NDArrays";
size_t len = array.Shape().front();
Array<Integer> out;
for (size_t i = 0; i < len; ++i) {
long double elem_val = ToScalar(array, i);
out.push_back(Integer(IntImm(DataType::Int(32), static_cast<int64_t>(elem_val))));
}
return out;
}
/*!
* \brief Convert a NDArray with type int or float to Array<FloatImm>.
* \param array Input NDArray
* \return Converted Array.
*/
static inline Array<FloatImm> ToFloatVector(const runtime::NDArray& array) {
size_t ndim = array.Shape().size();
ICHECK_EQ(ndim, 1) << "This function should only be used for 1D NDArrays";
size_t len = array.Shape().front();
Array<FloatImm> out;
for (size_t i = 0; i < len; ++i) {
long double elem_val = ToScalar(array, i);
out.push_back(FloatImm(DataType::Float(32), static_cast<float>(elem_val)));
}
return out;
}
/*!
* \brief Convert a NDArray with type int or float to Array<Array<Integer>>.
* \param array Input NDArray
* \return Converted Array.
*/
static inline Array<Array<Integer>> ToMatrix(const runtime::NDArray& array) {
size_t ndim = array.Shape().size();
ICHECK_EQ(ndim, 2) << "This function should only used for 2D NDArrays";
size_t dim1 = array.Shape().at(0);
size_t dim2 = array.Shape().at(1);
Array<Array<Integer>> out;
for (size_t i = 0; i < dim1; ++i) {
Array<Integer> inner_out;
for (size_t j = 0; j < dim2; ++j) {
double elem_val = ToScalar(array, i * dim2 + j);
inner_out.push_back(Integer(static_cast<int>(elem_val)));
}
out.push_back(inner_out);
}
return out;
}
inline Expr GetField(Expr t, size_t i) { return TupleGetItem(t, i); }
inline Expr Pair(Expr l, Expr r) { return Tuple({l, r}); }
inline Expr Exp(Expr e) {
static const Op& op = Op::Get("exp");
return Call(op, {e});
}
inline Expr Erf(Expr e) {
static const Op& op = Op::Get("erf");
return Call(op, {e});
}
inline Expr FastExp(Expr e) {
static const Op& op = Op::Get("fast_exp");
return Call(op, {e});
}
inline Expr FastErf(Expr e) {
static const Op& op = Op::Get("fast_erf");
return Call(op, {e});
}
inline Expr FastTanh(Expr e) {
static const Op& op = Op::Get("fast_tanh");
return Call(op, {e});
}
inline Expr FastSoftmax(Expr e, tvm::Attrs attr) {
static const Op& op = Op::Get("nn.fast_softmax");
return Call(op, {e}, attr);
}
inline Expr Log(Expr e) {
static const Op& op = Op::Get("log");
return Call(op, {e});
}
inline Expr Tanh(Expr e) {
static const Op& op = Op::Get("tanh");
return Call(op, {e});
}
inline Expr Abs(Expr e) {
static const Op& op = Op::Get("abs");
return Call(op, {e});
}
/*!
* \brief Get an immediate scalar from a Constant expr.
*
* \param expr The Constant expr.
* \return A scalar with type T.
*/
template <typename T>
T GetScalarFromConstant(Expr expr) {
const auto* n = expr.as<ConstantNode>();
ICHECK(n) << "Expr must be a constant expr - " << AsText(expr, false);
ICHECK(n->is_scalar());
return static_cast<T*>(n->data->data)[0];
}
inline Expr Cast(Expr x, DataType dtype) { return MakeCast(x, dtype); }
inline Expr Negative(Expr x) {
static const Op& op = Op::Get("negative");
return Call(op, {x}, Attrs(), {});
}
inline Expr Sqrt(Expr x) {
static const Op& op = Op::Get("sqrt");
return Call(op, {x}, Attrs(), {});
}
inline Expr Sigmoid(Expr x) {
static const Op& op = Op::Get("sigmoid");
return Call(op, {x}, Attrs(), {});
}
inline Expr Rsqrt(Expr x) {
static const Op& op = Op::Get("rsqrt");
return Call(op, {x}, Attrs(), {});
}
inline Expr Relu(Expr x) {
static const Op& op = Op::Get("nn.relu");
return Call(op, {x}, Attrs(), {});
}
inline Expr Round(Expr x) {
static const Op& op = Op::Get("round");
return Call(op, {x}, Attrs(), {});
}
inline Expr Floor(Expr x) {
static const Op& op = Op::Get("floor");
return Call(op, {x}, Attrs(), {});
}
inline Expr Clip(Expr x, double a_min, double a_max) { return MakeClip(x, a_min, a_max); }
inline Expr FixedPointMultiply(Expr x, int32_t multiplier, int32_t shift) {
static const Op& op = Op::Get("fixed_point_multiply");
auto attrs = make_object<FixedPointMultiplyAttrs>();
attrs->multiplier = multiplier;
attrs->shift = shift;
return Call(op, {x}, Attrs(attrs), {});
}
inline Expr FixedPointMultiplyPerAxis(Expr x, Expr m, Expr lshift, Expr rshift,
bool is_lshift_required, bool is_rshift_required,
Array<Integer> axes) {
return MakeFixedPointMultiplyPerAxis(x, m, lshift, rshift, is_lshift_required, is_rshift_required,
axes);
}
inline Expr Add(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("add");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr Subtract(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("subtract");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr Multiply(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("multiply");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr Divide(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("divide");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr Maximum(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("maximum");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr ZerosLike(Expr e) {
static const Op& op = Op::Get("zeros_like");
return Call(op, {e});
}
inline Expr Zeros(Array<IndexExpr> shape, DataType dtype) {
return MakeZeros(CheckConstantShapeArrayInteger(shape), dtype);
}
inline Expr OnesLike(Expr e) {
static const Op& op = Op::Get("ones_like");
return Call(op, {e});
}
inline Expr Ones(Array<IndexExpr> shape, DataType dtype) {
return MakeOnes(CheckConstantShapeArrayInteger(shape), dtype);
}
inline Expr CollapseSumLike(Expr e) {
static const Op& op = Op::Get("collapse_sum_like");
return Call(op, {e});
}
inline Expr Power(Expr lhs, Expr rhs) {
static const Op& op = Op::Get("power");
return Call(op, {lhs, rhs}, Attrs(), {});
}
inline Expr RightShift(Expr x, Expr nbit) {
static const Op& op = Op::Get("right_shift");
return Call(op, {x, nbit}, Attrs(), {});
}
inline Expr LeftShift(Expr x, Expr nbit) {
static const Op& op = Op::Get("left_shift");
return Call(op, {x, nbit}, Attrs(), {});
}
inline Expr ReshapeLike(Expr lhs, Expr rhs, int lhs_begin, Integer lhs_end, int rhs_begin,
Integer rhs_end) {
return MakeReshapeLike(lhs, rhs, lhs_begin, lhs_end, rhs_begin, rhs_end);
}
inline Expr Copy(Expr data) {
static const Op& op = Op::Get("copy");
return Call(op, {data}, Attrs(), {});
}
inline Expr Mean(Expr data, Array<Integer> axis, bool keepdims, bool exclude) {
return MakeReduce(data, axis, keepdims, exclude, "mean");
}
inline Expr Variance(Expr data, Expr mean, Array<Integer> axis, bool keepdims, bool exclude,
bool unbiased = false) {
return MakeVariance(data, mean, axis, keepdims, exclude, unbiased);
}
static inline Expr Where(const Expr& condition, const Expr& x, const Expr& y) {
static const Op& op = Op::Get("where");
return Call(op, {condition, x, y});
}
static inline Expr LogicalOr(const Expr& lhs, const Expr& rhs) {
static const Op& op = Op::Get("logical_or");
return Call(op, {lhs, rhs}, Attrs(), {});
}
static inline Expr GreaterEqual(const Expr& lhs, const Expr& rhs) {
static const Op& op = Op::Get("greater_equal");
return Call(op, {lhs, rhs}, Attrs(), {});
}
static inline Expr Equal(const Expr& lhs, const Expr& rhs) {
static const Op& op = Op::Get("equal");
return Call(op, {lhs, rhs}, Attrs(), {});
}
static inline Expr Less(const Expr& lhs, const Expr& rhs) {
static const Op& op = Op::Get("less");
return Call(op, {lhs, rhs}, Attrs(), {});
}
static inline Expr IsFinite(const Expr x) {
static const Op& op = Op::Get("isfinite");
return Call(op, {x}, Attrs(), {});
}
static inline Expr Full(Expr fill_value, Array<IndexExpr> shape, DataType dtype) {
return MakeFull(fill_value, CheckConstantShapeArrayInteger(shape), dtype);
}
static inline Expr Conv2D(Expr data, Expr weight, Array<IndexExpr> strides,
Array<IndexExpr> padding, Array<IndexExpr> dilation, int groups,
IndexExpr channels, Array<IndexExpr> kernel_size, std::string data_layout,
std::string kernel_layout, std::string out_layout, DataType out_dtype) {
return MakeConv<Conv2DAttrs>(data, weight, strides, padding, dilation, groups, channels,
kernel_size, data_layout, kernel_layout, out_layout, out_dtype,
"nn.conv2d");
}
static inline Expr Dense(Expr data, Expr weight, IndexExpr units, DataType out_dtype) {
return MakeDense(data, weight, units, out_dtype);
}
static inline Expr Sum(Expr data, Array<Integer> axis, bool keepdims, bool exclude) {
return MakeReduce(data, axis, keepdims, exclude, "sum");
}
static inline Expr Prod(Expr data, Array<Integer> axis, bool keepdims, bool exclude) {
return MakeReduce(data, axis, keepdims, exclude, "prod");
}
static inline Expr Reshape(Expr data, Array<Integer> newshape) {
return MakeReshape(data, newshape);
}
static inline Expr AvgPool2D(Expr data, Array<IndexExpr> pool_size, Array<IndexExpr> strides,
Array<IndexExpr> dilation, Array<IndexExpr> padding,
std::string layout, std::string out_layout, bool ceil_mode,
bool count_include_pad) {
return MakeAvgPool<AvgPool2DAttrs>(data, pool_size, strides, dilation, padding, layout,
out_layout, ceil_mode, count_include_pad, "nn.avg_pool2d");
}
static inline Expr Pad(Expr data, Array<Array<IndexExpr>> pad_width, Expr pad_value,
std::string pad_mode) {
Array<Array<Integer>> pad_width_int;
for (size_t i = 0; i < pad_width.size(); ++i) {
pad_width_int.push_back(CheckConstantShapeArrayInteger(pad_width[i]));
}
return MakePad(data, pad_width_int, pad_value, pad_mode);
}
static inline Expr Tile(Expr data, Array<Integer> reps) { return MakeTile(data, reps); }
static inline Expr BroadCastTo(Expr data, Array<IndexExpr> shape) {
return MakeBroadCastTo(data, CheckConstantShapeArrayInteger(shape));
}
inline Expr Hardswish(Expr x) {
auto three = MakeConstantScalar(DataType::Float(32), 3.0);
auto six = MakeConstantScalar(DataType::Float(32), 6.0);
auto x2 = Add(x, three);
x2 = Clip(x2, 0.0, 6.0);
x2 = Multiply(x, x2);
x2 = Divide(x2, six);
return x2;
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_PATTERN_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/simplify_expr.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/relay/transforms/simplify_expr.h
* \brief Utility data structures for simplifying Relay expressions.
*/
#ifndef TVM_RELAY_TRANSFORMS_SIMPLIFY_EXPR_H_
#define TVM_RELAY_TRANSFORMS_SIMPLIFY_EXPR_H_
#include <tvm/relay/dataflow_matcher.h>
#include <tvm/relay/expr.h>
#include <memory>
#include <vector>
namespace tvm {
namespace relay {
/*! \brief A wrapper class defining a rewrite matching a specific pattern. */
class DFPatternRewrite {
public:
/*! \brief Returns the rewritten expression. */
virtual Expr Callback(const Expr& pre, const Expr& post,
const Map<DFPattern, Array<Expr>>& node_map) const = 0;
virtual ~DFPatternRewrite() = default;
/*! \brief Returns the pattern to be used for matching and rewriting. */
inline DFPattern Pattern() const { return pattern_; }
inline bool RequireType() const { return require_type_; }
inline DFPatternCallback MakeCallback() const {
auto func = [this](TVMArgs args, TVMRetValue* rv) {
Expr pre = args[0];
Expr post = args[1];
Map<DFPattern, Array<Expr>> node_map = args[2];
*rv = this->Callback(pre, post, node_map);
};
return DFPatternCallback(pattern_, PackedFunc(func), require_type_);
}
protected:
/*! \brief The pattern for matching and rewriting. */
DFPattern pattern_;
/*! \brief Whether or not the rewrite requires types to be inferred. */
bool require_type_ = true;
};
/*! \brief Helper class for composing rewrites and getting callbacks. */
class DFPatternRewriteComposer {
public:
template <typename T, typename... Args>
inline void AddRewrite(Args... args) {
rewrites_.push_back(std::make_shared<T, Args&...>(args...));
}
inline Array<DFPatternCallback> MakeCallbacks() const {
Array<DFPatternCallback> callbacks;
for (const auto& rewrite : rewrites_) {
callbacks.push_back(rewrite->MakeCallback());
}
return callbacks;
}
private:
/*! \brief the rewrites to be composed. */
std::vector<std::shared_ptr<DFPatternRewrite>> rewrites_;
};
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_SIMPLIFY_EXPR_H_
| https://github.com/zk-ml/tachikoma |
src/relay/transforms/transform_layout.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
*
* \file transform_layout.h
* \brief Common infrastructure for transforming the layouts. This is used for AlterOpLayout and
* ConvertLayout pass. */
#ifndef TVM_RELAY_TRANSFORMS_TRANSFORM_LAYOUT_H_
#define TVM_RELAY_TRANSFORMS_TRANSFORM_LAYOUT_H_
#include <tvm/relay/expr.h>
#include <tvm/tir/data_layout.h>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
#include "infer_layout_utils.h"
#include "pattern_utils.h"
namespace tvm {
namespace relay {
/*!
* \brief Memorizes layout transformations to reuse.
*/
class TransformMemorizerNode : public Object {
public:
/*! \brief The key for the memorizer map is (Expr, src_layout, dst_layout). */
using TransformKey = std::tuple<const Object*, std::string, std::string>;
struct key_hash : public std::function<std::size_t(TransformKey)> {
std::size_t operator()(const TransformKey& k) const {
return dmlc::HashCombine<std::string>(
dmlc::HashCombine<std::string>(std::hash<const Object*>()(std::get<0>(k)),
std::get<1>(k)),
(std::get<2>(k)));
}
};
/*!
* \brief Defines the call transformation for derived passes. The new layouts are defined by
* used for different targets using a packed func.
* \param ref_call The original call.
* \param new_attrs Updated attributes consistent with new layouts.
* \param new_args The traversed/recursed args to the call.
* \return The new Call after calling the packed func.
*/
virtual Call CallWithNewLayouts(const Call& ref_call, Attrs new_attrs,
const std::vector<Expr>& new_args) = 0;
virtual Call CallWithNewLayouts(const Call& ref_call, const std::vector<Expr>& new_args) {
return CallWithNewLayouts(ref_call, ref_call->attrs, new_args);
}
/*! \brief The memorizer map. */
std::unordered_map<TransformKey, Expr, key_hash> memo;
static constexpr const char* _type_key = "relay.alter_op_layout.TransformMemorizerNode";
TVM_DECLARE_FINAL_OBJECT_INFO(TransformMemorizerNode, Object);
};
/*!
* \brief Container that transforms the layouts and memorizes them.
*/
class TransformMemorizer : public ObjectRef {
public:
TransformMemorizer() = default;
explicit TransformMemorizer(ObjectPtr<Object> n) : ObjectRef(n) {}
TransformMemorizerNode* operator->() {
return static_cast<TransformMemorizerNode*>(get_mutable());
}
/*
* \brief Memorizes and transforms the layout.
* \param expr The initial expr.
* \param src_layout The source layout.
* \param dst_layout The dest layout.
* \return The new expr with the dst layout.
*/
Expr Transform(Expr raw, const Layout& src_layout, const Layout& dst_layout) {
if (src_layout.Equals(dst_layout)) {
return raw;
}
std::tuple<const Object*, std::string, std::string> key =
std::make_tuple<>(raw.get(), src_layout.name(), dst_layout.name());
auto& memo = operator->()->memo;
auto iter = memo.find(key);
if (iter != memo.end()) {
return iter->second;
} else {
Expr transform = TransformHelper(raw, src_layout, dst_layout);
memo[key] = transform;
return transform;
}
}
/*
* \brief Helper to transform the layouts.
* \param expr The initial expr.
* \param src_layout The source layout.
* \param dst_layout The dest layout.
* \return The new expr with the dst layout.
* \note It performs following 2 operations
* 1) If src_layout ndim is smaller then dst_layout, expand_dim is inserted to match the dim
* size. For example, src_layout = C, dst_layout = NCHW16c. The src is expanded to NHWC.
* 2) Call layout transform with new src layout.
*/
Expr TransformHelper(Expr raw, Layout src_layout, Layout dst_layout) {
if (src_layout.Equals(dst_layout)) {
return raw;
}
// 1) Check if the shape lengths are different. If yes, expand dims.
Expr input_expr = raw;
Layout new_src_layout = src_layout;
if (src_layout.ndim_primal() < dst_layout.ndim_primal()) {
// If scalar, then no need of layout transformation as scalar can be broadcasted easily even
// if the other operand has a transformed layout.
if (input_expr->checked_type_.defined() && IsScalar(input_expr)) {
return raw;
}
int num_new_axis = dst_layout.ndim_primal() - src_layout.ndim_primal();
new_src_layout = src_layout.ExpandPrimal(dst_layout);
input_expr = MakeExpandDims(input_expr, 0, num_new_axis);
if (new_src_layout.Equals(dst_layout)) {
return input_expr;
}
}
// 2) Insert layout transform on the transformed src.
ICHECK(new_src_layout.defined() && dst_layout.defined())
<< "Cannot insert layout transform because there are undefined layouts";
ICHECK(tir::BijectiveLayout(new_src_layout, dst_layout).defined())
<< "Cannot insert layout transform because there are inconvertible layouts: "
<< new_src_layout << " v.s. " << dst_layout;
return MakeLayoutTransform(input_expr, new_src_layout.name(), dst_layout.name());
}
using ContainerType = TransformMemorizerNode;
};
/*
* \brief TempExprNode during layout transform. Instance of this expr will be Realized to normal
* expr ultimately.
* \tparam TransformMemorizerT The derived TransformMemorizer type.
*/
template <class TransformMemorizerT>
class LayoutAlternatedExprNode : public TempExprNode {
public:
Expr value;
Layout old_layout;
Layout new_layout;
TransformMemorizerT memorizer;
Expr Realize() const final {
// NOTE: use a copy to discard the "const" qualifier
TransformMemorizerT tmp_memorizer = memorizer;
// fallback to old layout
return tmp_memorizer.Transform(value, new_layout, old_layout);
}
void VisitAttrs(AttrVisitor* v) {
v->Visit("value", &value);
v->Visit("old_layout", &old_layout);
v->Visit("new_layout", &new_layout);
}
static constexpr const char* _type_key = "relay.alter_op_layout.LayoutAlternatedExprNode";
TVM_DECLARE_FINAL_OBJECT_INFO(LayoutAlternatedExprNode, TempExprNode);
};
/*!
* \brief Container for the layout alternated expr.
* \tparam TransformMemorizerT The derived TransformMemorizer type.
*/
template <class TransformMemorizerT>
class LayoutAlternatedExpr : public ObjectRef {
public:
LayoutAlternatedExpr() {}
explicit LayoutAlternatedExpr(ObjectPtr<Object> n) : ObjectRef(n) {}
LayoutAlternatedExprNode<TransformMemorizerT>* operator->() {
return static_cast<LayoutAlternatedExprNode<TransformMemorizerT>*>(get_mutable());
}
using ContainerType = LayoutAlternatedExprNode<TransformMemorizerT>;
};
/*!
* Call registered FInferCorrectLayout of an op.
* Parameters are the same as the parameters for FInferCorrectLayout
* Returns inferred_input_layout, inferred_output_layout, updated attributes, and a flag
* indicating whether or not layout conversion is successful.
*/
static inline std::tuple<InferCorrectLayoutOutput, bool> InferCorrectLayouts(
const Call& call, const Array<Layout>& new_in_layouts, const Array<Layout>& old_in_layouts,
const Array<tvm::relay::Type>& old_in_types) {
static auto finfer_layout = Op::GetAttrMap<FInferCorrectLayout>("FInferCorrectLayout");
auto null_res = std::make_tuple(
InferCorrectLayoutOutput(Array<Layout>(nullptr), Array<Layout>(nullptr), Attrs(nullptr)),
false);
if (!call->op.as<OpNode>()) {
return null_res;
}
Op op = Downcast<Op>(call->op);
if (finfer_layout.count(op)) {
auto out = finfer_layout[op](call->attrs, new_in_layouts, old_in_layouts, old_in_types);
for (auto inferred_layouts : {out->input_layouts, out->output_layouts}) {
for (auto layout : inferred_layouts) {
if (!layout.defined()) { // inference fails
return null_res;
}
}
}
return std::make_tuple(out, true);
} else {
return null_res;
}
}
/*
* \brief Used with ForwardRewrite to transform the expr. The input args are same as
* FForwardRewrite.
* \param ref_call The reference old call type to be rewritten.
* We can make use of the op and type information.
* \param new_args The new arguments (some of them could be TempExpr).
* \param ctx Optional context information about ref_call.
* \tparam TransformMemorizerT The derived TransformMemorizer type.
* \return The rewriten result call, can also return nullptr,
* which indicate the rewriter should use the default fallback
* rule that realizes all its input and compose the call.
*
* \note The ctx can be used to provide extra information during transformation. The ctx is
* templated to reuse across AlterOpLayout and ConvertLayout pass. The steps are
* - Extract the original layouts.
* - Use ctx transformation to get a Call with new layouts - CallWithNewLayouts.
* - Extract the new layouts from the returned Call.
* - Transform the original call to reuse the new layouts using TransformMemorizer.
*/
template <class TransformMemorizerT>
Expr LayoutRewriter(const Call& ref_call, const Array<Expr>& new_args, const ObjectRef& ctx) {
std::vector<LayoutAlternatedExpr<TransformMemorizerT>> inputs;
std::vector<Expr> normal_new_args;
// NOTE: discard the "const" qualifier
// TransformMemorizer memorizer = Downcast<TransformMemorizer>(ctx);
// TransformMemorizerT* ctx_transformer =
// static_cast<TransformMemorizerT*>(memorizer.operator->());
TransformMemorizerT memorizer = Downcast<TransformMemorizerT>(ctx);
// fill incomplete state and flatten tuple
auto push_back_one_arg = [&inputs, memorizer](Expr arg) {
// We always expect LayoutAlternatedExpr<TransformMemorizerT>.
// This is used to convert the normal Expr to LayoutAlternatedExpr<TransformMemorizerT>.
if (const LayoutAlternatedExprNode<TransformMemorizerT>* inp =
arg.as<LayoutAlternatedExprNode<TransformMemorizerT>>()) {
inputs.push_back(GetRef<LayoutAlternatedExpr<TransformMemorizerT>>(inp));
return inp->value;
} else {
auto inode = make_object<LayoutAlternatedExprNode<TransformMemorizerT>>();
inode->value = arg;
inode->memorizer = memorizer;
inputs.push_back(LayoutAlternatedExpr<TransformMemorizerT>(inode));
return arg;
}
};
for (auto new_arg : new_args) {
// NOTE: do not support nested tuple
if (new_arg->IsInstance<TupleNode>()) {
Tuple tuple_new_arg = Downcast<Tuple>(new_arg);
Array<Expr> fields;
fields.reserve(tuple_new_arg->fields.size());
for (auto x : tuple_new_arg->fields) {
Expr tmp = push_back_one_arg(x);
fields.push_back(tmp);
}
normal_new_args.push_back(WithFields(tuple_new_arg, fields));
} else {
Expr tmp = push_back_one_arg(new_arg);
normal_new_args.push_back(tmp);
}
}
// If there is no FInferCorrectLayout for the type, then we just assume the layout is correct.
static auto finfer_layout = Op::GetAttrMap<FInferCorrectLayout>("FInferCorrectLayout");
if (Op::HasAttrMap("FTVMAlterOpLayout")) {
static auto falter_layout = Op::GetAttrMap<FTVMAlterOpLayout>("FTVMAlterOpLayout");
if (ref_call->op.as<OpNode>()) {
Op op = Downcast<Op>(ref_call->op);
if (falter_layout.count(op) && !finfer_layout.count(op)) {
return memorizer->CallWithNewLayouts(ref_call, normal_new_args);
}
}
}
// old_prd, new_prd = state[inputs]
// different ops can view a tensor with different layouts, e.g. conv_1->transpose(H, W)->conv_2
// transpose view its output having NCWH layout, but conv_2 still views it as NCHW to operate
// old_prd, new_prd: the input layouts from the perspective of the producer (transpose)
// old_cur, new_cur: the input layouts from the perspective of the current node (conv_2)
// old_prd->new_prd tells how producer changed the layout
// old_cur->new_cur tells what change the current node wants to see
// No layout transforms are needed when they mean the same (NCHW->NCHW4c == NCWH->NCWH4c)
// The workflow:
// 1. Run InferCorrectLayouts(NULL, old_prd) to get old_cur
// 2. Run InferCorrectLayouts(new_prd, old_prd) to get new_cur and rewrite the current op
Array<Layout> old_prd, old_cur, old_out, new_prd, new_out, new_cur;
for (auto inp : inputs) {
old_prd.push_back(inp->old_layout);
new_prd.push_back(inp->new_layout);
}
// Collect input types to pass on to Infer Correct Layout.
tvm::Array<tvm::relay::Type> types;
for (auto arg : ref_call->args) {
types.push_back(arg->checked_type());
}
bool success = false;
InferCorrectLayoutOutput infer_out;
std::tie(infer_out, success) =
InferCorrectLayouts(ref_call, Array<Layout>(nullptr), old_prd, types);
old_cur = infer_out->input_layouts;
old_out = infer_out->output_layouts;
if (!success) {
return Expr(nullptr);
}
ICHECK_EQ(old_cur.size(), new_prd.size());
// for backward compatibility of InferCorrectLayouts
Array<Layout> new_prd_inferred = new_prd;
// if new_prd_inferred == 'undef': new_prd_inferred = old_cur
for (size_t i = 0; i < new_prd_inferred.size(); ++i) {
if (!new_prd_inferred[i].defined()) {
new_prd_inferred.Set(i, old_cur[i]);
}
}
Array<Layout> old_prd_inferred = old_prd;
// if old_prd_inferred == 'undef': old_prd_inferred = old_cur
for (size_t i = 0; i < old_prd_inferred.size(); ++i) {
if (!old_prd_inferred[i].defined()) {
old_prd_inferred.Set(i, old_cur[i]);
}
}
// new_op = alter(op)
Call new_call = memorizer->CallWithNewLayouts(ref_call, infer_out->new_attrs, normal_new_args);
// new_cur, new_out = op.infer(new_prd)
if (new_call->op->IsInstance<OpNode>()) {
success = false;
std::tie(infer_out, success) =
InferCorrectLayouts(new_call, new_prd_inferred, old_prd_inferred, types);
new_cur = infer_out->input_layouts;
new_out = infer_out->output_layouts;
if (!success) {
return Expr(nullptr);
}
} else {
return Expr(nullptr);
}
ICHECK_EQ(new_out.size(), old_out.size())
<< "The number of output nodes should keep the same during alter_op_layout";
ICHECK_EQ(new_prd.size(), new_cur.size())
<< "The number of input nodes should keep the same during alter_op_layout";
auto transform_layout = [&memorizer](Expr arg_item, const Layout& old_prd, const Layout& old_cur,
const Layout& new_prd, const Layout& new_cur) {
if (old_cur.Equals(old_prd)) { // the two transforms can be fused to one
arg_item = memorizer.Transform(arg_item, new_prd, new_cur);
} else {
if (old_prd.defined()) arg_item = memorizer.Transform(arg_item, new_prd, old_prd);
arg_item = memorizer.Transform(arg_item, old_cur, new_cur);
}
return arg_item;
};
DLOG(INFO) << "Transforming layout for `" << ref_call->op << "`";
DLOG(INFO) << " old_prd=" << old_prd;
DLOG(INFO) << " new_prd=" << new_prd;
DLOG(INFO) << " old_cur=" << old_cur;
DLOG(INFO) << " new_cur=" << new_cur;
// if (new_prd != new_cur): insert transform (new_prd -> new_cur)
Array<Expr> transformed_args;
size_t pt = 0;
for (auto arg : new_call->args) {
if (arg->IsInstance<TupleNode>()) { // unflatten tuple
Tuple tuple_arg = Downcast<Tuple>(arg);
Array<Expr> transformed_tuple_arg;
transformed_tuple_arg.reserve(tuple_arg->fields.size());
for (auto arg_item : tuple_arg->fields) {
transformed_tuple_arg.push_back(
transform_layout(arg_item, old_prd[pt], old_cur[pt], new_prd[pt], new_cur[pt]));
pt++;
}
transformed_args.push_back(WithFields(tuple_arg, transformed_tuple_arg));
} else {
transformed_args.push_back(
transform_layout(arg, old_prd[pt], old_cur[pt], new_prd[pt], new_cur[pt]));
pt++;
}
}
ICHECK_EQ(pt, inputs.size());
// state[node] = (old_out, new_out)
// (handle tuple output)
if (ref_call->checked_type()->IsInstance<TupleTypeNode>()) {
Expr tuple_output = Call(new_call->op, transformed_args, infer_out->new_attrs);
Array<Expr> fields;
for (size_t i = 0; i < new_out.size(); ++i) {
auto rnode = make_object<LayoutAlternatedExprNode<TransformMemorizerT>>();
rnode->value = TupleGetItem(tuple_output, i);
rnode->old_layout = old_out[i];
rnode->new_layout = new_out[i];
rnode->memorizer = memorizer;
fields.push_back(Expr(rnode));
}
return Tuple(fields);
} else {
auto rnode = make_object<LayoutAlternatedExprNode<TransformMemorizerT>>();
ICHECK_EQ(new_out.size(), 1);
rnode->value = Call(new_call->op, transformed_args, infer_out->new_attrs, {}, ref_call->span);
rnode->old_layout = old_out[0];
rnode->new_layout = new_out[0];
rnode->memorizer = memorizer;
return Expr(rnode);
}
}
} // namespace relay
} // namespace tvm
#endif // TVM_RELAY_TRANSFORMS_TRANSFORM_LAYOUT_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/aot_executor/aot_executor.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief Defines an implementation of Module-based Model Runtime Interface that works with
* Ahead-of-Time compilation.
* \file aot_executor.h
*/
#ifndef TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_H_
#define TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_H_
#include <tvm/runtime/metadata.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/object.h>
#include <tvm/runtime/packed_func.h>
#include <string>
#include <vector>
namespace tvm {
namespace runtime {
class TVM_DLL AotExecutor : public ModuleNode {
public:
/*!
* \brief Implements member function lookup for this Module for the frontend.
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) override;
/*!
* \return The type key of the executor.
*/
const char* type_key() const final { return "AotExecutor"; }
void Run();
/*!
* \brief Initialize the AOT executor with metadata, runtime::Module, and device.
* \param module The module containing the compiled functions for the host
* processor.
* \param devs A 1-element vector. The Device which AOT compute will run on. Currently, only
* Device(kDLCPU, 0) is supported.
*/
AotExecutor(tvm::runtime::Module module, const std::vector<Device>& devs);
/*!
* \brief Get the input index given the name of input.
* \param name The name of the input.
* \return The index of input.
*/
int GetInputIndex(const std::string& name);
/*!
* \brief Get the output index given the name of output.
* \param name The name of the output.
* \return The index of output.
*/
int GetOutputIndex(const std::string& name);
/*!
* \brief set index-th input to the graph.
* \param index The input index.
* \param data_in The input data.
*/
void SetInput(int index, DLTensor* data_in);
/*!
* \brief set index-th input to the graph without copying the data
* \param index The input index.
* \param data_ref The input data that is referred.
*/
void SetInputZeroCopy(int index, DLTensor* data_ref);
/*!
* \brief set index-th output to the graph without copying the data.
* \param index The output index.
* \param data_ref The output data that is referred.
*/
void SetOutputZeroCopy(int index, DLTensor* data_ref);
/*!
* \brief Get the number of outputs
*
* \return The number of outputs from graph.
*/
int NumOutputs() const;
/*!
* \brief Get the number of inputs
*
* \return The number of inputs to the graph.
*/
int NumInputs() const;
/*!
* \brief Return NDArray for given input index.
* \param index The input index.
*
* \return NDArray corresponding to given input node index.
*/
NDArray GetInput(int index) const;
/*!
* \brief Return NDArray for given output index.
* \param index The output index.
*
* \return NDArray corresponding to given output node index.
*/
NDArray GetOutput(int index) const;
/*!
* \brief Copy index-th output to data_out.
* \param index The output index.
* \param data_out the output data.
*/
void CopyOutputTo(int index, DLTensor* data_out);
private:
/*! \brief Metadata provided to the runtime from the compiler. */
metadata::Metadata metadata_;
/*! \brief Runtime module which contains the AOT top-level function. */
Module module_;
/*! \brief The devices which should be used to execute the computations. */
std::vector<Device> devices_;
/*! \brief Holds one NDArray per function argument in the same order. */
std::vector<NDArray> args_;
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/aot_executor/aot_executor_factory.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/runtime/aot_executor/aot_executor_factory.h
* \brief Aot executor factory creating aot executor.
*/
#ifndef TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_FACTORY_H_
#define TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_FACTORY_H_
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/module.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
#include <algorithm>
#include <functional>
#include <numeric>
#include <string>
#include <unordered_map>
#include <vector>
#include "./aot_executor.h"
namespace tvm {
namespace runtime {
class TVM_DLL AotExecutorFactory : public runtime::ModuleNode {
public:
/*!
* \brief Construct the AotExecutorFactory.
* \param params The params of aot.
* \param module_name The module name of aot.
*/
AotExecutorFactory(const std::unordered_map<std::string, tvm::runtime::NDArray>& params,
const std::string& module_name);
/*!
* \brief Get member function to front-end
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) final;
/*!
* \return The type key of the executor.
*/
const char* type_key() const final { return "AotExecutorFactory"; }
/*!
* \brief Save the module to binary stream.
* \param stream The binary stream to save to.
*/
void SaveToBinary(dmlc::Stream* stream) override;
/*!
* \brief Create a specific executor module
* \param devs The device of the host and devices where the model will be
* executed.
* \return created executor module
*/
Module ExecutorCreate(const std::vector<Device>& devs);
/*!
* \brief Set params.
* \param aot_executor The aot executor we want to set the params into.
* \param params The aot params value we want to set.
*/
void SetParams(AotExecutor* aot_executor,
const std::unordered_map<std::string, tvm::runtime::NDArray>& params) const {
std::unordered_map<std::string, tvm::runtime::NDArray> value = params;
// upload big arrays first to avoid memory issue in rpc mode
std::vector<std::string> keys;
for (const auto& p : value) {
keys.emplace_back(p.first);
}
std::sort(std::begin(keys), std::end(keys),
[&](const std::string& lhs, const std::string& rhs) -> bool {
auto lhs_size = GetDataSize(*value[lhs].operator->());
auto rhs_size = GetDataSize(*value[rhs].operator->());
return lhs_size > rhs_size;
});
for (const auto& key : keys) {
int in_idx = aot_executor->GetInputIndex(key);
if (in_idx >= 0) {
aot_executor->SetInput(in_idx, const_cast<DLTensor*>(value[key].operator->()));
}
}
}
protected:
/*! \brief The params. */
std::unordered_map<std::string, tvm::runtime::NDArray> params_;
/*! \brief module name */
std::string module_name_;
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_AOT_EXECUTOR_AOT_EXECUTOR_FACTORY_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/const_loader_module.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file const_loader_module.h
* \brief Defines an interface to use the ConstLoaderModule.
*/
#ifndef TVM_RUNTIME_CONST_LOADER_MODULE_H_
#define TVM_RUNTIME_CONST_LOADER_MODULE_H_
#include <tvm/runtime/ndarray.h>
#include <string>
#include <unordered_map>
#include <vector>
namespace tvm {
namespace runtime {
/*!
* \brief Create a ConstLoader module object.
*
* \param const_var_ndarray Maps consts var name to NDArray containing data for the var.
* \param const_vars_by_symbol Maps the name of a module init function to a list of names of
* const vars whose data will be passed to that init function.
*
* \return The created ConstLoaderModule.
*/
Module ConstLoaderModuleCreate(
const std::unordered_map<std::string, NDArray>& const_var_ndarray,
const std::unordered_map<std::string, std::vector<std::string>>& const_vars_by_symbol);
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONST_LOADER_MODULE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/arm_compute_lib/acl_allocator.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/arm_compute_lib/acl_allocator.h
* \brief ACL Allocator implementation that requests memory from TVM.
*/
#ifndef TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_ALLOCATOR_H_
#define TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_ALLOCATOR_H_
#include <arm_compute/runtime/IAllocator.h>
#include <arm_compute/runtime/IMemoryRegion.h>
#include <arm_compute/runtime/MemoryRegion.h>
#include <tvm/runtime/data_type.h>
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/registry.h>
#include <memory>
namespace tvm {
namespace runtime {
namespace contrib {
/*!
* \brief Override ACL memory allocator and replace with TVM workspace based allocation.
*/
class ACLAllocator : public arm_compute::IAllocator {
public:
ACLAllocator() = default;
/*!
* \brief Allocate bytes to ACL runtime.
*
* Specific implementation requests memory from TVM using their device api.
*
* \param size Size to allocate.
* \param alignment Alignment that the returned pointer should comply with.
* \return A pointer to the allocated memory.
*/
void* allocate(size_t size, size_t alignment) override;
/*!
* \brief Free memory from ACL runtime.
*
* \param ptr Pointer to workspace to free.
*/
void free(void* ptr) override;
/*!
* \brief Create self-managed memory region.
*
* \param size Size of the memory region.
* \param alignment Alignment of the memory region.
* \return The memory region object.
*/
std::unique_ptr<arm_compute::IMemoryRegion> make_region(size_t size, size_t alignment) override;
private:
/*! \brief Always allocate data in the context of the current CPU. */
const Device device_{kDLCPU, 0};
/*! \brief Device API which allows requests for memory from TVM. */
runtime::DeviceAPI* device_api_ = runtime::DeviceAPI::Get(device_);
};
/*!
* \brief Memory region that can request TVM memory for ACL to use.
*/
class ACLMemoryRegion : public arm_compute::IMemoryRegion {
public:
ACLMemoryRegion(size_t size, size_t alignment);
ACLMemoryRegion(void* ptr, size_t size);
~ACLMemoryRegion() override;
/*! \brief Prevent instances of this class from being copied (As this class contains
* pointers). */
ACLMemoryRegion(const ACLMemoryRegion&) = delete;
/*! \brief Default move constructor. */
ACLMemoryRegion(ACLMemoryRegion&&) = default;
/*! \brief Prevent instances of this class from being copied (As this class
* contains pointers) */
ACLMemoryRegion& operator=(const ACLMemoryRegion&) = delete;
/*! Default move assignment operator. */
ACLMemoryRegion& operator=(ACLMemoryRegion&&) = default;
void* buffer() override { return this->ptr_; }
const void* buffer() const override { return this->ptr_; }
/*!
* \brief Extract a sub-region from the memory.
*
* \warning Ownership is maintained by the parent memory,
* while a wrapped raw memory region is returned by this function.
* Thus parent memory should not be released before this.
*
* \param offset Offset to the region.
* \param size Size of the region.
* \return A wrapped memory sub-region with no ownership of the
* underlying memory.
*/
std::unique_ptr<arm_compute::IMemoryRegion> extract_subregion(size_t offset,
size_t size) override;
private:
/*! \brief Points to a region of memory allocated by TVM. */
void* ptr_;
/*! \brief A subregion doesn't manage TVM memory so we don't need to free it. */
bool is_subregion_ = false;
/*! \brief Always allocate data in the context of the current CPU. */
const Device device_{kDLCPU, 0};
/*! \brief Device API which allows requests for memory from TVM. */
runtime::DeviceAPI* device_api_ = runtime::DeviceAPI::Get(device_);
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_ALLOCATOR_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/arm_compute_lib/acl_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/arm_compute_lib/acl_utils.h
* \brief Utils and common functions for the interface.
*/
#ifndef TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_
#define TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_
#include <arm_compute/core/Types.h>
#include <arm_compute/runtime/MemoryManagerOnDemand.h>
#include <arm_compute/runtime/Tensor.h>
#include <memory>
#include <string>
#include <vector>
#include "../json/json_node.h"
namespace tvm {
namespace runtime {
namespace contrib {
using JSONGraphNode = tvm::runtime::json::JSONGraphNode;
/*!
* \brief Check if there are any errors from acl and forward them to TVM.
*
* Status values:
* - 0 => OK
* - 1 => RUNTIME_ERROR
* - 2 => UNSUPPORTED_EXTENSION_USE
*
* \param status status of called function.
*/
void CheckACLError(const arm_compute::Status& status);
/*!
* \brief Make an acl tensor from JSON tensor representation.
*
* \param tensor_rep A JSON tensor representation.
* \param data (optional) Initialize the tensor with memory.
* \param scale (optional) The quantization scale.
* \param offset (optional) The quantization offset.
* \return arm_compute::Tensor.
*/
arm_compute::Tensor MakeACLTensor(const JSONGraphNode& tensor_rep, void* data = nullptr,
const DLTensor* scale = nullptr, const DLTensor* offset = nullptr,
bool apply_dim_correction = true, bool increase_dim_unit = true,
uint32_t entry_index = 0);
/*!
* \brief Make an acl tensor info object from JSON tensor
* representation.
*
* \param shape The shape of the tensor to create.
* \param dtype The data type of the tensor to create.
* \param scale (optional) The quantization scale.
* \param offset (optional) The quantization offset.
* \return arm_compute::TensorInfo.
*/
arm_compute::TensorInfo MakeACLTensorInfo(const std::vector<int64_t>& shape,
const DLDataType& dtype, const DLTensor* scale = nullptr,
const DLTensor* offset = nullptr,
bool apply_dim_correction = true,
bool increase_dim_unit = true);
/*!
* \brief Create a memory manager for use with a layer that
* requires working memory.
*
* \return reference counted memory manager.
*/
std::shared_ptr<arm_compute::MemoryManagerOnDemand> MakeACLMemoryManager();
/*!
* \brief Convert TVM padding and stride format to acl PadStrideInfo.
*
* \param pad The pad vector.
* \param stride The stride vector.
* \param ceil_mode Dimensions rounding.
* \return arm_compute::PadStrideInfo
*/
arm_compute::PadStrideInfo MakeACLPadStride(const std::vector<std::string>& pad,
const std::vector<std::string>& stride,
bool ceil_mode = false);
/*!
* \brief Convert DLDataType to arm_compute::DataType.
*
* \param data_type The data type to convert.
* \return arm_compute::DataType.
*/
arm_compute::DataType MakeACLDataType(const DLDataType& data_type);
/*!
* \brief Convert string to arm_compute::ActivationLayerInfo
*
* \param activation_type A string representing activation function.
* Currently supports the following options: "relu".
* \return arm_compute::ActivationLayerInfo.
*/
arm_compute::ActivationLayerInfo MakeACLActivationInfo(const std::string& activation_type);
/*!
* \brief Get a vector from DLTensor data.
* \note Performs a copy of data.
*
* \tparam T The type of the vector.
* \param tensor The tensor to convert.
* \return Vector of type T.
*/
template <typename T>
std::vector<T> GetVectorFromDLTensor(const DLTensor* tensor);
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_ARM_COMPUTE_LIB_ACL_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/bnns/bnns_wrp.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* \file
* \brief C++ wrappers and helpers to handle BNNS objects
*/
#ifndef TVM_RUNTIME_CONTRIB_BNNS_BNNS_WRP_H_
#define TVM_RUNTIME_CONTRIB_BNNS_BNNS_WRP_H_
#include <Accelerate/Accelerate.h>
#include <algorithm>
#include <functional>
#include <memory>
#include <numeric>
#include <tuple>
#include <vector>
namespace tvm {
namespace runtime {
namespace contrib {
namespace BNNS {
using Dim = size_t;
using Shape = std::vector<Dim>;
using Dtype = BNNSDataType;
using HDL = void*;
void* default_alloc(size_t size) { return malloc(size); }
void default_free(void* ptr) { free(ptr); }
/**
* Main abstraction for tensor representation
*
* Contains buffer handler and common attributes like shape and dtype.
*/
class Tensor {
public:
Tensor() = delete;
Tensor(Tensor&) = delete;
Tensor(Shape shape, Dtype dtype, void* hdl) {
auto rank = shape.size();
ICHECK(rank < BNNS_MAX_TENSOR_DIMENSION);
desc_ = {BNNSNDArrayFlags(0),
getPlainLayout(rank),
{}, // shape
{}, // strides
hdl, // data handler
dtype, // data type
nullptr, // table_data (clustering case), is not used
dtype,
1.f,
0.f};
std::copy(shape.rbegin(), shape.rend(), std::begin(desc_.size));
desc_.data = hdl;
is_external_data = true;
}
~Tensor() {
if (desc_.data && !is_external_data) {
default_free(desc_.data);
desc_.data = nullptr;
}
}
void allocate_memory() {
if (desc_.data && !is_external_data) {
default_free(desc_.data);
}
const size_t buff_size = getSize(desc_) * getElementSize(desc_);
desc_.data = default_alloc(buff_size);
ICHECK(desc_.data);
is_external_data = false;
}
void* get_data_hdl() const { return desc_.data; }
void set_data_hdl(void* hdl) {
if (desc_.data && !is_external_data) {
default_free(desc_.data);
desc_.data = nullptr;
}
desc_.data = hdl;
is_external_data = true;
}
const BNNSNDArrayDescriptor& get_desc() const { return desc_; }
static BNNSDataLayout getPlainLayout(size_t rank) {
ICHECK(rank <= BNNS_MAX_TENSOR_DIMENSION);
return static_cast<BNNSDataLayout>((rank << 16) | 0x8001);
}
static size_t getRank(BNNSDataLayout layout) { return (layout & 0xF0000) >> 16; }
static size_t getRank(BNNSNDArrayDescriptor desc) { return getRank(desc.layout); }
static size_t getSize(BNNSNDArrayDescriptor desc) {
auto rank = getRank(desc);
return std::accumulate(desc.size, desc.size + rank, 1, std::multiplies<int>());
}
/** return size of element in bytes */
static size_t getElementSize(Dtype dtype) { return (dtype & 0xFFFF) / 8; }
/** return size of element in bytes */
static size_t getElementSize(const BNNSNDArrayDescriptor& desc) {
return getElementSize(desc.data_type);
}
private:
bool is_external_data = false;
BNNSNDArrayDescriptor desc_;
};
using TensorPtr = std::shared_ptr<Tensor>;
/**
* Tensor View object which represent how provided BNNS::Tensor will be considered
*
* The single BNNS::Tensor can be treated in different form depend on particular primitive
* expectation. More other some primitive supports only external form of batching. So we have
* some abstraction to describe how primitive will handle provided tensor.
*
* Batched View
* View with extracted dimension as external batch value
* example: Tensor [2, 3, 224, 224] -> View [3, 224, 224] with ext batch 2
*
* Party View
* The collection of view on the same tensor, can be the same view or with some stride
* example: Tensor [6, 5, 3, 3] -> 3 x View [2, 5, 3, 3] with stride 45
*/
class TView {
public:
/** Make view on provided tensor as is */
static TView as_is(const TensorPtr& origin) {
TView res;
res.origin_ = origin;
res.view_desc_ = origin->get_desc();
return res;
}
/** Extract outer dimension to separate batch field. TView will became batched view */
TView extract_outer_dim() const {
auto rank = Tensor::getRank(view_desc_);
TView res = *this;
res.batch_size_ = view_desc_.size[rank - 1];
res.batch_stride_ =
std::accumulate(view_desc_.size, view_desc_.size + rank - 1, 1, std::multiplies<>());
res.view_desc_.size[rank - 1] = 0;
res.view_desc_.layout = Tensor::getPlainLayout(rank - 1);
return res;
}
/** Squeeze all dims equal 1 */
TView squeeze(size_t min_rank = 1) const {
auto rank = Tensor::getRank(view_desc_);
size_t squeezed_shape[BNNS_MAX_TENSOR_DIMENSION] = {};
size_t squeezed_rank = 0;
for (int i = 0; i < rank; i++)
if (view_desc_.size[i] != 1) squeezed_shape[squeezed_rank++] = view_desc_.size[i];
if (min_rank > squeezed_rank) {
std::fill(squeezed_shape + squeezed_rank, squeezed_shape + min_rank, 1);
squeezed_rank = min_rank;
}
TView res = *this;
std::copy(squeezed_shape, squeezed_shape + squeezed_rank, res.view_desc_.size);
std::fill(res.view_desc_.size + squeezed_rank, res.view_desc_.size + rank, 0);
res.view_desc_.layout = Tensor::getPlainLayout(squeezed_rank);
return res;
}
/** Expand the shape of an array */
TView expand_dims(std::vector<size_t> axes) const {
auto rank = Tensor::getRank(view_desc_);
TView res = *this;
size_t unsqueezed_shape[BNNS_MAX_TENSOR_DIMENSION] = {};
size_t unsqueezed_rank = axes.size() + rank;
ICHECK_LE(unsqueezed_rank, BNNS_MAX_TENSOR_DIMENSION);
for (const auto& axis : axes) {
ICHECK_LT(axis, unsqueezed_rank);
unsqueezed_shape[axis] = 1;
}
for (int i = 0, orig_idx = 0; i < unsqueezed_rank; ++i) {
if (unsqueezed_shape[i] == 1) continue;
unsqueezed_shape[i] = view_desc_.size[orig_idx++];
}
std::copy(unsqueezed_shape, unsqueezed_shape + unsqueezed_rank, res.view_desc_.size);
res.view_desc_.layout = Tensor::getPlainLayout(unsqueezed_rank);
return res;
}
/** Unsqueeze tensor to a new rank */
TView unsqueeze(size_t new_rank) const {
ICHECK_LE(new_rank, BNNS_MAX_TENSOR_DIMENSION);
auto rank = Tensor::getRank(view_desc_);
ICHECK_GT(new_rank, rank);
std::vector<size_t> axes(new_rank - rank);
std::iota(axes.begin(), axes.end(), rank);
return expand_dims(axes);
}
/** Construct new TView with specified layout if it applicable */
TView with_layout(BNNSDataLayout layout) const {
ICHECK_EQ(Tensor::getRank(view_desc_), Tensor::getRank(layout));
TView res = *this;
res.view_desc_.layout = layout;
return res;
}
/** Construct party TView by splitting original TView into num parts */
TView party_split_n(size_t num) const {
ICHECK_EQ(party_size_, 1);
TView res = *this;
size_t rank = Tensor::getRank(view_desc_);
size_t size = Tensor::getSize(view_desc_);
res.party_size_ = num;
res.party_stride_ = size / num;
if (res.batch_size_ != 1) {
res.batch_size_ /= num;
} else {
res.view_desc_.size[rank - 1] /= num;
res.batch_stride_ /= num;
}
return res;
}
/** Construct party TView by duplicating original TView num times */
TView party_duplicate_n(size_t num) const {
ICHECK_EQ(party_size_, 1);
TView res = *this;
res.party_size_ = num;
res.party_stride_ = 0;
return res;
}
/** Return data buffer handler */
HDL get_data_hdl() const { return view_desc_.data; }
/** Return external batch dimension value */
size_t get_batch_size() const { return batch_size_; }
/** Return external batch dimension stride */
size_t get_stride() const { return batch_stride_; }
/** Return party element by index */
TView operator[](size_t i) const {
ICHECK_LT(i, party_size_);
TView res = *this;
res.party_size_ = 1;
if (origin_) {
auto hdl = reinterpret_cast<uint8_t*>(origin_->get_data_hdl());
hdl += i * party_stride_ * Tensor::getElementSize(view_desc_.data_type);
res.view_desc_.data = hdl;
}
return res;
}
/** Check if view is empty and doesn't relay to any tensor */
operator bool() const { return origin_ != nullptr; }
/** Get BNNS descriptor for particular View. Batch and Party attributed are ignored. */
const BNNSNDArrayDescriptor& get_bnns_view() const { return view_desc_; }
private:
/** Original tensor object to view on */
TensorPtr origin_;
/** Batched view parameters */
BNNSNDArrayDescriptor view_desc_ = {};
size_t batch_size_ = 1;
size_t batch_stride_ = 0;
/** Party representation parameters */
size_t party_size_ = 1;
size_t party_stride_ = 0;
};
/**
* Wrapper on top of BNNSFilter and src/dst TensorView.
*
* Support decomposed representation of filter and can execute sub primitives in parallel.
*/
class Primitive {
public:
Primitive(const std::vector<BNNSFilter> fs, const TView& src, const TView& dst)
: filters(fs), src_view(src), dst_view(dst) {}
virtual ~Primitive() {
for (auto& filter : filters)
if (filter) {
BNNSFilterDestroy(filter);
filter = nullptr;
}
}
/** Execute primitive with using specified src/dst */
void execute() {
auto res = TVMBackendParallelLaunch(run_task, this, filters.size());
ICHECK_EQ(res, 0) << "BNNS runtime. Primitive was not executed properly";
}
private:
virtual int execute_impl(int part_idx) {
const auto filter = this->filters[part_idx];
const auto src_view = this->src_view[part_idx];
const auto dst_view = this->dst_view[part_idx];
size_t mb = src_view.get_batch_size();
// NB! BNNS limitations
// * Do not use simple BNNSFilterApply. There is a bug inside BNNS,
// BNNSFilterApply doesn't work for grouped convolution.
// * Group convolution doesn't support arbitrary stride for Batch dim.
// The tensor should be dense.
return BNNSFilterApplyBatch(filter, mb, src_view.get_data_hdl(), src_view.get_stride(),
dst_view.get_data_hdl(), dst_view.get_stride());
}
static int run_task(int task_id, TVMParallelGroupEnv* penv, void* cdata) {
auto prim = reinterpret_cast<Primitive*>(cdata);
return prim->execute_impl(task_id);
}
protected:
/** BNNS kernels/filters collect which will execute primitive */
std::vector<BNNSFilter> filters = {};
const TView src_view;
const TView dst_view;
};
/**
* Wrapper on top of BNNS::Primitive
*
* This primitive should be used for executing primitive with two inputs.
*/
class TwoInputPrimitive : public Primitive {
public:
TwoInputPrimitive(const std::vector<BNNSFilter> fs, const TView& src, const TView& src2,
const TView& dst)
: Primitive(fs, src, dst), src2_view(src2) {}
private:
int execute_impl(int task_id) override {
const auto filter = this->filters[task_id];
const auto src_view = this->src_view[task_id];
const auto src2_view = this->src2_view[task_id];
const auto dst_view = this->dst_view[task_id];
size_t mb = src_view.get_batch_size();
return BNNSFilterApplyTwoInputBatch(filter, mb, src_view.get_data_hdl(), src_view.get_stride(),
src2_view.get_data_hdl(), src2_view.get_stride(),
dst_view.get_data_hdl(), dst_view.get_stride());
}
protected:
const TView src2_view;
};
/**
* Wrapper on top of BNNS::Primitive
*
* This primitive should be used for executing normalization filter
*/
class NormPrimitive : public Primitive {
public:
using Primitive::Primitive;
private:
int execute_impl(int task_id) override {
const auto filter = this->filters[task_id];
const auto src_view = this->src_view[task_id];
const auto dst_view = this->dst_view[task_id];
size_t mb = src_view.get_batch_size();
return BNNSNormalizationFilterApplyBatch(filter, mb, src_view.get_data_hdl(),
src_view.get_stride(), dst_view.get_data_hdl(),
dst_view.get_stride(), false);
}
};
/**
* Wrapper on top of BNNS::Primitive
*
* This primitive should be used for executing pooling filter
*/
class PoolingPrimitive : public Primitive {
public:
using Primitive::Primitive;
private:
int execute_impl(int task_id) override {
const auto filter = this->filters[task_id];
const auto src_view = this->src_view[task_id];
const auto dst_view = this->dst_view[task_id];
size_t mb = src_view.get_batch_size();
return BNNSPoolingFilterApplyBatch(filter, mb, src_view.get_data_hdl(), src_view.get_stride(),
dst_view.get_data_hdl(), dst_view.get_stride(), nullptr, 0);
}
};
/**
* Function which split primitive into sub primitives to parallel execution
*
* @param num requested num of sub primitives
* @param orig_conv_param original convolution descriptor
* @param src_view source tensor view
* @param wgh_view weight tensor view
* @param b_view bias tensor view
* @param dst_view destination tensor view
* @param num number of part to split into
* @return collection of Convolution descriptors plus corresponding src/dst tensors view
*/
static std::tuple<std::vector<BNNSLayerParametersConvolution>, TView, TView> split_to_n(
size_t num, const BNNSLayerParametersConvolution& orig_conv_param, const TView& src_view,
const TView& wgh_view, const TView& b_view, const TView& dst_view) {
size_t batch = src_view.get_batch_size();
size_t oc = dst_view.get_bnns_view().size[2];
size_t groups = orig_conv_param.groups;
BNNS::TView src_view_new;
BNNS::TView wgh_view_new;
BNNS::TView b_view_new;
BNNS::TView dst_view_new;
// TODO(apeskov): Add split by batch dim. Meanwhile we just disable it...
if (batch > 1 || oc % num != 0 || (groups > 1 && groups % num != 0)) {
return {{orig_conv_param}, src_view, dst_view};
}
// if groups > 1 split only by groups
// otherwise split inside one convolution by output channels
if (groups > 1) {
src_view_new = src_view.party_split_n(num);
groups = groups / num;
} else {
src_view_new = src_view.party_duplicate_n(num);
}
wgh_view_new = wgh_view.party_split_n(num);
b_view_new = b_view.party_split_n(num);
dst_view_new = dst_view.party_split_n(num);
std::vector<BNNSLayerParametersConvolution> res(num);
for (size_t i = 0; i < num; i++) {
auto& cur = res[i];
cur = orig_conv_param;
cur.i_desc = src_view_new[i].get_bnns_view();
cur.o_desc = dst_view_new[i].get_bnns_view();
cur.w_desc = wgh_view_new[i].get_bnns_view();
cur.bias = b_view_new[i].get_bnns_view();
cur.groups = groups;
}
return {res, src_view_new, dst_view_new};
}
} // namespace BNNS
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_BNNS_BNNS_WRP_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/cblas/gemm_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/contrib/gemm.h
* \brief Shared implementation of gemm
*/
#ifndef TVM_RUNTIME_CONTRIB_CBLAS_GEMM_COMMON_H_
#define TVM_RUNTIME_CONTRIB_CBLAS_GEMM_COMMON_H_
#include <tvm/runtime/data_type.h>
#include <tvm/runtime/registry.h>
#include <algorithm>
#include <string>
namespace tvm {
namespace contrib {
using namespace runtime;
inline int ColumnStride(DLTensor* tensor) {
// If the tensor itself is transposed then it will have strides
// backward from what we expect. Regardless, the max of the strides
// (the other stride is 1) is the column stride.
if (tensor->strides) {
return std::max(tensor->strides[0], tensor->strides[1]);
} else {
return tensor->shape[1];
}
}
inline int ElementStride(DLTensor* tensor) {
if (tensor->strides) {
return std::min(tensor->strides[0], tensor->strides[1]);
} else {
return 1;
}
}
// Reversed strides indicates an in-place transpose operation.
inline bool IsInPlaceTransposed(DLTensor* tensor) {
return tensor->strides && (tensor->strides[1] > tensor->strides[0]);
}
inline int RowCount(DLTensor* tensor, bool trans) { return tensor->shape[trans ? 1 : 0]; }
inline int ColumnCount(DLTensor* tensor, bool trans) { return tensor->shape[trans ? 0 : 1]; }
// Call a column major blas. Note that data is stored in tvm as row
// major, so this we switch the arguments.
template <typename TGemmOp>
inline void CallGemm(TVMArgs args, TVMRetValue* ret, TGemmOp op) {
DLTensor* A = args[0];
DLTensor* B = args[1];
DLTensor* C = args[2];
bool transa = args[3];
bool transb = args[4];
int bit_depth = sizeof(typename TGemmOp::TDatatype) * 8;
ICHECK_EQ(A->ndim, 2);
ICHECK_EQ(B->ndim, 2);
ICHECK_EQ(C->ndim, 2);
ICHECK_EQ(ElementStride(A), 1);
ICHECK_EQ(ElementStride(B), 1);
ICHECK_EQ(ElementStride(C), 1);
// C can never be transposed.
ICHECK(!IsInPlaceTransposed(C));
// Reversed strides indicates an in-place transpose operation.
transa = IsInPlaceTransposed(A) ? !transa : transa;
transb = IsInPlaceTransposed(B) ? !transb : transb;
ICHECK(TypeMatch(B->dtype, kDLFloat, bit_depth));
ICHECK(TypeMatch(C->dtype, kDLFloat, bit_depth));
double alpha = args.size() > 5 ? args[5] : 1.0;
double beta = args.size() > 6 ? args[6] : 0.0;
op(transb, transa, ColumnCount(B, transb), RowCount(A, transa), ColumnCount(A, transa),
static_cast<typename TGemmOp::TDatatype>(alpha),
reinterpret_cast<typename TGemmOp::TDatatype*>(static_cast<char*>(B->data) + B->byte_offset),
ColumnStride(B),
reinterpret_cast<typename TGemmOp::TDatatype*>(static_cast<char*>(A->data) + A->byte_offset),
ColumnStride(A), static_cast<typename TGemmOp::TDatatype>(beta),
reinterpret_cast<typename TGemmOp::TDatatype*>(static_cast<char*>(C->data) + C->byte_offset),
ColumnStride(C));
}
// Call a column major blas. Note that data is stored in tvm as row
// major, so this we switch the arguments.
template <typename TGemmOp>
inline void CallU8S8S32Gemm(TVMArgs args, TVMRetValue* ret, TGemmOp op) {
DLTensor* A = args[0];
DLTensor* B = args[1];
DLTensor* C = args[2];
bool transa = args[3];
bool transb = args[4];
// Set the sgemm attributes. Currently, support is limited to CblasFixOffset with all offsets
// equal to 0. This is sufficient for relay dense.
std::string offset_ctype = "CblasFixOffset";
int16_t offset_a = 0;
int16_t offset_b = 0;
int offset_c[1];
offset_c[0] = 0;
ICHECK_EQ(A->ndim, 2);
ICHECK_EQ(B->ndim, 2);
ICHECK_EQ(C->ndim, 2);
ICHECK_EQ(ElementStride(A), 1);
ICHECK_EQ(ElementStride(B), 1);
ICHECK_EQ(ElementStride(C), 1);
// C can never be transposed.
ICHECK(!IsInPlaceTransposed(C));
// Reversed strides indicates an in-place transpose operation.
transa = IsInPlaceTransposed(A) ? !transa : transa;
transb = IsInPlaceTransposed(B) ? !transb : transb;
ICHECK(TypeMatch(A->dtype, kDLUInt, 8));
ICHECK(TypeMatch(B->dtype, kDLInt, 8));
ICHECK(TypeMatch(C->dtype, kDLInt, 32));
double alpha = args.size() > 5 ? args[5] : 1.0;
double beta = args.size() > 6 ? args[6] : 0.0;
op(transb, transa, ColumnCount(B, transb), RowCount(A, transa), ColumnCount(A, transa),
static_cast<float>(alpha),
reinterpret_cast<void*>(static_cast<char*>(B->data) + B->byte_offset), ColumnStride(B),
offset_b, reinterpret_cast<void*>(static_cast<char*>(A->data) + A->byte_offset),
ColumnStride(A), offset_a, static_cast<float>(beta),
reinterpret_cast<int*>(static_cast<char*>(C->data) + C->byte_offset), ColumnStride(C),
offset_ctype, offset_c);
}
inline int ColumnStride3D(DLTensor* tensor) {
// If the tensor itself is transposed then it will have strides
// backward from what we expect. Regardless, the max of the strides
// (the other stride is 1) is the column stride.
if (tensor->strides) {
return std::max(tensor->strides[1], tensor->strides[2]);
} else {
return tensor->shape[2];
}
}
inline int ElementStride3D(DLTensor* tensor) {
if (tensor->strides) {
return std::min(tensor->strides[1], tensor->strides[2]);
} else {
return 1;
}
}
// Reversed strides indicates an in-place transpose operation.
inline bool IsInPlaceTransposed3D(DLTensor* tensor) {
return tensor->strides && (tensor->strides[2] > tensor->strides[1]);
}
inline int BatchCount3D(DLTensor* tensor) { return tensor->shape[0]; }
inline int RowCount3D(DLTensor* tensor, bool trans) { return tensor->shape[trans ? 2 : 1]; }
inline int ColumnCount3D(DLTensor* tensor, bool trans) { return tensor->shape[trans ? 1 : 2]; }
template <typename TBatchGemmOp>
inline void CallBatchGemm(TVMArgs args, TVMRetValue* ret, TBatchGemmOp op) {
using DType = typename TBatchGemmOp::TDatatype;
DLTensor* A = args[0];
DLTensor* B = args[1];
DLTensor* C = args[2];
bool transa = args[3];
bool transb = args[4];
int bit_depth = sizeof(DType) * 8;
ICHECK_EQ(A->ndim, 3);
ICHECK_EQ(B->ndim, 3);
ICHECK_EQ(C->ndim, 3);
int batch_size = BatchCount3D(C);
ICHECK_EQ(ElementStride(A), 1);
ICHECK_EQ(ElementStride(B), 1);
ICHECK_EQ(ElementStride(C), 1);
// C can never be transposed.
ICHECK(!IsInPlaceTransposed3D(C));
// Reversed strides indicates an in-place transpose operation.
transa = IsInPlaceTransposed3D(A) ? !transa : transa;
transb = IsInPlaceTransposed3D(B) ? !transb : transb;
ICHECK(TypeMatch(B->dtype, kDLFloat, bit_depth));
ICHECK(TypeMatch(C->dtype, kDLFloat, bit_depth));
double alpha = args.size() > 5 ? args[5] : 1.0;
double beta = args.size() > 6 ? args[6] : 0.0;
int A_stride = A->shape[1] * A->shape[2];
int B_stride = B->shape[1] * B->shape[2];
int C_stride = C->shape[1] * C->shape[2];
// Broadcast A or B by changing its stride.
int batch_size_a = BatchCount3D(A);
int batch_size_b = BatchCount3D(B);
if (batch_size_a != batch_size_b) {
if (batch_size_a == 1) {
A_stride = 0;
} else if (batch_size_b == 1) {
B_stride = 0;
}
} else {
ICHECK_EQ(batch_size_a, batch_size);
ICHECK_EQ(batch_size_b, batch_size);
}
DType* A_data = reinterpret_cast<typename TBatchGemmOp::TDatatype*>(static_cast<char*>(A->data) +
A->byte_offset);
DType* B_data = reinterpret_cast<typename TBatchGemmOp::TDatatype*>(static_cast<char*>(B->data) +
B->byte_offset);
DType* C_data = reinterpret_cast<typename TBatchGemmOp::TDatatype*>(static_cast<char*>(C->data) +
C->byte_offset);
op(batch_size, transb, transa, ColumnCount3D(B, transb), RowCount3D(A, transa),
ColumnCount3D(A, transa), static_cast<typename TBatchGemmOp::TDatatype>(alpha), B_data,
B_stride, ColumnStride3D(B), A_data, A_stride, ColumnStride3D(A),
static_cast<typename TBatchGemmOp::TDatatype>(beta), C_data, C_stride, ColumnStride3D(C));
}
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_CBLAS_GEMM_COMMON_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/coreml/coreml_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief CoreML runtime that can run coreml model
* containing only tvm PackedFunc.
* \file coreml_runtime.h
*/
#ifndef TVM_RUNTIME_CONTRIB_COREML_COREML_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_COREML_COREML_RUNTIME_H_
#import <CoreML/CoreML.h>
#import <Foundation/Foundation.h>
#include <dlpack/dlpack.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
namespace tvm {
namespace runtime {
/*!
* \brief CoreML model.
*/
class CoreMLModel {
public:
/*!
* \brief constructor
* \param url The directory where compiled models are located.
*/
explicit CoreMLModel(NSURL* url) {
url_ = url;
model_ = [MLModel modelWithContentsOfURL:url error:nil];
input_dict_ = [NSMutableDictionary dictionary];
output_ = nil;
}
/*!
* \brief Invoke the coreml prediction.
*/
void Invoke();
/*!
* \brief set input to the model.
* \param key The input name.
* \param data_in The input data.
*/
void SetInput(const std::string& key, DLTensor* data_in);
/*!
* \brief Return NDArray for given output index.
* \param index The output index.
*
* \return NDArray corresponding to given output node index.
*/
NDArray GetOutput(int index) const;
/*!
* \brief Return the number of outputs
*
* \return The number of outputs
*/
int GetNumOutputs() const;
// CoreML model url
NSURL* url_;
// CoreML model
MLModel* model_;
// CoreML model input dictionary
NSMutableDictionary<NSString*, id>* input_dict_;
// CoreML model output
id<MLFeatureProvider> output_;
};
/*!
* \brief CoreML runtime.
*
* This runtime can be accessed in various language via
* TVM runtime PackedFunc API.
*/
class CoreMLRuntime : public ModuleNode {
public:
/*!
* \brief Get member function to front-end.
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self);
/*!
* \brief Serialize the content of the mlmodelc directory and save it to
* binary stream.
* \param stream The binary stream to save to.
*/
void SaveToBinary(dmlc::Stream* stream) final;
/*!
* \return The type key of the executor.
*/
const char* type_key() const { return "coreml"; }
/*!
* \brief Initialize the coreml runtime with coreml model and context.
* \param symbol The symbol of this model.
* \param model_path The compiled model path.
*/
void Init(const std::string& symbol, const std::string& model_path);
/*! \brief The symbol that represents the Core ML model. */
std::string symbol_;
/*! \brief The Core ML model */
std::unique_ptr<CoreMLModel> model_;
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_COREML_COREML_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/cublas/cublas_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Use external cudnn utils function
*/
#ifndef TVM_RUNTIME_CONTRIB_CUBLAS_CUBLAS_UTILS_H_
#define TVM_RUNTIME_CONTRIB_CUBLAS_CUBLAS_UTILS_H_
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <dlpack/dlpack.h>
#include <tvm/runtime/logging.h>
#include <cstdint>
#if CUDART_VERSION >= 10010
#include <cublasLt.h>
#endif // CUDART_VERSION >= 10010
namespace tvm {
namespace contrib {
inline const char* GetCublasErrorString(int error) {
switch (error) {
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUBLAS_STATUS_NOT_INITIALIZED";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUBLAS_STATUS_ALLOC_FAILED";
case CUBLAS_STATUS_INVALID_VALUE:
return "CUBLAS_STATUS_INVALID_VALUE";
case CUBLAS_STATUS_ARCH_MISMATCH:
return "CUBLAS_STATUS_ARCH_MISMATCH";
case CUBLAS_STATUS_MAPPING_ERROR:
return "CUBLAS_STATUS_MAPPING_ERROR";
case CUBLAS_STATUS_EXECUTION_FAILED:
return "CUBLAS_STATUS_EXECUTION_FAILED";
case CUBLAS_STATUS_INTERNAL_ERROR:
return "CUBLAS_STATUS_INTERNAL_ERROR";
case CUBLAS_STATUS_NOT_SUPPORTED:
return "CUBLAS_STATUS_NOT_SUPPORTED";
case CUBLAS_STATUS_LICENSE_ERROR:
return "CUBLAS_STATUS_LICENSE_ERROR";
}
return "Unrecognized error";
}
#ifndef CHECK_CUBLAS_ERROR
#define CHECK_CUBLAS_ERROR(fn) \
do { \
int error = static_cast<int>(fn); \
ICHECK_EQ(error, CUBLAS_STATUS_SUCCESS) << "CUBLAS: " << GetCublasErrorString(error); \
} while (0) // ; intentionally left off.
#endif // CHECK_CUBLAS_ERROR
struct CuBlasThreadEntry {
CuBlasThreadEntry();
~CuBlasThreadEntry();
cublasHandle_t handle{nullptr};
static CuBlasThreadEntry* ThreadLocal();
}; // CuBlasThreadEntry
inline cudaDataType_t GetCudaDataType(DLDataType type) {
if (type.code == kDLInt) {
switch (type.bits) {
case 8:
return CUDA_R_8I;
case 32:
return CUDA_R_32I;
}
} else if (type.code == kDLUInt) {
switch (type.bits) {
case 8:
return CUDA_R_8U;
case 32:
return CUDA_R_32U;
}
} else if (type.code == kDLFloat) {
switch (type.bits) {
case 16:
return CUDA_R_16F;
case 32:
return CUDA_R_32F;
case 64:
return CUDA_R_64F;
}
}
LOG(FATAL) << "Unsupported cuda type";
return CUDA_R_16F;
}
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_CUBLAS_CUBLAS_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/cudnn/cudnn_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Use external cudnn utils function
*/
#ifndef TVM_RUNTIME_CONTRIB_CUDNN_CUDNN_UTILS_H_
#define TVM_RUNTIME_CONTRIB_CUDNN_CUDNN_UTILS_H_
#include <cudnn.h>
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/logging.h>
#include <string>
#include "../../cuda/cuda_common.h"
namespace tvm {
namespace contrib {
#define CUDNN_CALL(func) \
{ \
cudnnStatus_t e = (func); \
ICHECK_EQ(e, CUDNN_STATUS_SUCCESS) << "cuDNN: " << cudnnGetErrorString(e); \
}
/*! breif Convert DLTensor type to CuDNN type */
struct CuDNNDataType {
static cudnnDataType_t DLTypeToCuDNNType(const DLDataType& dtype);
template <int v>
static const void* GetConst(cudnnDataType_t type);
}; // struct CuDNNDataType
inline void GetStride(int nbdim, const int* dims, int* strides) {
int mul = 1;
for (int i = nbdim - 1; i >= 0; --i) {
mul *= dims[i];
strides[i] = mul;
}
}
inline void GetCudnnStride(int nbdim, const int* dims, int* strides) {
int mul = 1;
for (int i = nbdim - 1; i >= 0; --i) {
strides[i] = mul;
mul *= dims[i];
}
}
struct ConvEntry {
cudnnConvolutionDescriptor_t conv_desc;
cudnnConvolutionMode_t mode{CUDNN_CROSS_CORRELATION};
cudnnDataType_t data_type;
cudnnTensorFormat_t tensor_format;
cudnnTensorDescriptor_t input_desc;
cudnnFilterDescriptor_t filter_desc;
cudnnTensorDescriptor_t bias_desc;
cudnnActivationDescriptor_t activation_desc;
cudnnTensorDescriptor_t output_desc;
cudnnConvolutionFwdAlgo_t fwd_algo;
cudnnConvolutionBwdDataAlgo_t bwd_data_algo;
cudnnConvolutionBwdFilterAlgo_t bwd_filter_algo;
// cudnnMathType_t math_type;
Device device;
runtime::DeviceAPI* cuda_api;
void* workspace{nullptr};
size_t workspace_size{0};
ConvEntry();
~ConvEntry();
void UpdateWorkspace(const size_t wsize);
void CleanWorkspace();
}; // ConvThreadEntry
struct SoftmaxEntry {
cudnnSoftmaxMode_t mode;
cudnnDataType_t data_type;
cudnnTensorDescriptor_t shape_desc;
SoftmaxEntry();
~SoftmaxEntry();
}; // SoftmaxEntry
struct CuDNNThreadEntry {
CuDNNThreadEntry();
~CuDNNThreadEntry();
bool exists() const { return handle; }
cudnnHandle_t handle{nullptr};
ConvEntry conv_entry;
SoftmaxEntry softmax_entry;
runtime::DeviceAPI* cuda_api{nullptr};
static CuDNNThreadEntry* ThreadLocal(bool check_exists = true);
}; // CuDNNThreadEntry
void SetConvDescriptors(CuDNNThreadEntry* entry_ptr, int format, int dims, int groups,
const int pad[], const int stride[], const int dilation[], int64_t x_dim[],
int64_t w_dim[], int64_t y_dim[], DLDataType data_dtype,
const std::string& conv_dtype);
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_CUDNN_CUDNN_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/curand/helper_cuda_kernels.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_CURAND_HELPER_CUDA_KERNELS_H_
#define TVM_RUNTIME_CONTRIB_CURAND_HELPER_CUDA_KERNELS_H_
#include <curand.h>
#include <tvm/runtime/registry.h>
namespace tvm {
namespace runtime {
namespace curand {
/*!
* \brief An auxiliary function to convert an FP32 array to FP16.
* \param src The source FP32 array.
* \param dst The destination FP16 array.
* \param num The number of elements in the array.
*/
void ConvertFp32toFp16(const void* src, void* dst, int64_t num);
} // namespace curand
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_CURAND_HELPER_CUDA_KERNELS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/dnnl/dnnl_kernel.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/dnnl/dnnl_kernel.h
* \brief Use external dnnl library kernels.
*/
#ifndef TVM_RUNTIME_CONTRIB_DNNL_DNNL_KERNEL_H_
#define TVM_RUNTIME_CONTRIB_DNNL_DNNL_KERNEL_H_
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/logging.h>
#include <tvm/runtime/registry.h>
#include <vector>
#include "dnnl.hpp"
namespace tvm {
namespace runtime {
namespace contrib {
using namespace dnnl;
extern "C" TVM_DLL void dnnl_conv2d(float* data, float* weights, float* out, int p_N_, int p_C_,
int p_H_, int p_W_, int p_O_, int p_G_, int p_Ph0_, int p_Pw0_,
int p_Ph1_, int p_Pw1_, int p_Kh_, int p_Kw_, int p_Sh_,
int p_Sw_);
extern "C" TVM_DLL void dnnl_fused_conv2d_relu(float* data, float* weights, float* out, int p_N_,
int p_C_, int p_H_, int p_W_, int p_O_, int p_G_,
int p_Ph0_, int p_Pw0_, int p_Ph1_, int p_Pw1_,
int p_Kh_, int p_Kw_, int p_Sh_, int p_Sw_);
extern "C" TVM_DLL void dnnl_fused_conv2d_bias_relu(float* data, float* weights, float* bias,
float* out, int p_N_, int p_C_, int p_H_,
int p_W_, int p_O_, int p_G_, int p_Ph0_,
int p_Pw0_, int p_Ph1_, int p_Pw1_, int p_Kh_,
int p_Kw_, int p_Sh_, int p_Sw_);
extern "C" TVM_DLL void dnnl_dense(float* data, float* weight, float* out, int p_B_, int p_I_,
int p_O_);
extern "C" TVM_DLL void dnnl_relu(float* data, float* out, std::vector<int64_t> shape);
extern "C" TVM_DLL void dnnl_bn(float* data, float* gamma, float* beta, float* mean,
float* variance, float* out, float* new_mean, float* new_variance,
int p_n_, int p_c_, int p_h_, int p_w_, int p_e_);
extern "C" TVM_DLL void dnnl_binary_op(float* data, float* weight, float* out, int binary_algo,
std::vector<int64_t> shape);
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_DNNL_DNNL_KERNEL_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/dnnl/dnnl_tensor_requisite.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/dnnl/dnnl_tensor_requisite.cc
* \brief Helper TR wrapper to simplify tensors processing
*/
#ifndef TVM_RUNTIME_CONTRIB_DNNL_DNNL_TENSOR_REQUISITE_H_
#define TVM_RUNTIME_CONTRIB_DNNL_DNNL_TENSOR_REQUISITE_H_
#include <dlpack/dlpack.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
// TODO(@apeskov): Have to mute warning from dnnl headers.
// -Wzero-as-null-pointer-constant and -Wdocumentation-unknown-command
#include <dnnl.hpp>
#include "dnnl_utils.h"
namespace tvm {
namespace runtime {
namespace contrib {
using namespace utils;
/*!
* \brief Helper object to simplify tensor transformation description.
*
* Allow to specify original source tensor and future actions which should be applied to it.
* Can be treated as sequence of reordering or reinterpretation of original source tensor.
* Finally TR can be solved as proper interpretation of source memory buffer, or sequence of
* dnnl::reorder operators which will provide desired data.
*
* \note Empty TR object allow any manipulation. Empty TR will be returned.
*
* \sa TensorRegistry
*
* Example:
* \code
* dnnl::memory src_mem = ...; // 5D tensor, shape {5, 2, 128, 128, 8}
*
* // Construct TR
* auto tr = TensorRequisite.AsIs(src_mem, eid); // 5D
*
* // describe sequence of layout transformation
* tr = tr.TreatAs("ABCD8b"); // 4D
* tr = tr.Permute({0, 2, 3, 1}); // Permute axes NCHW -> NHWC
* tr = tr.Crop({1, 128, 128, 16}, {0, 0, 0}); // extract first batch element
* tr = tr.Squeeze(); // 1D
*
* // register TR
* TensorRegistry t_reg;
* auto t_id = t_reg.register(tr);
*
* // Get final dnnl::memory object
* auto solver = t_reg.MakeSolver(ext_tensor_provider);
* auto mem = solver(t_id);
* \endcode
*
*/
class TensorRequisite {
public:
using Tid = uint32_t;
static constexpr Tid kUndefinedTid = std::numeric_limits<uint32_t>::max() - 1;
/*! \brief Empty constructor */
TensorRequisite() {}
/*! \brief Construct TR on top of existing memory object */
static TensorRequisite AsIs(const dnnl::memory& mem, Tid id = kUndefinedTid) {
auto res = AsIs(mem.get_desc(), id);
if (mem.get_data_handle() != nullptr) res.mem_ = mem;
return res;
}
/*! \brief Construct TR on top of existing memory descriptor object */
static TensorRequisite AsIs(const dnnl::memory::desc& desc, Tid id = kUndefinedTid) {
return {desc, {}, false, {}, id, false};
}
/*! \brief return logical shape of tensor */
dnnl::memory::dims dims() const { return t_desc_.dims(); }
/*! \brief return data type of tensor */
dnnl::memory::data_type data_type() const { return t_desc_.data_type(); }
/*! \brief return tensor desc */
dnnl::memory::desc desc() const { return t_desc_; }
Tid eid() const {
auto res = kUndefinedTid;
if (!defined()) {
res = kUndefinedTid;
} else if (eid_ == kUndefinedTid) {
if (orig_) {
res = orig_->eid();
} else {
res = kUndefinedTid;
}
} else {
res = eid_;
}
return res;
}
/*! \brief Make TR with backward dataflow */
TensorRequisite Backward() const {
if (!defined()) return *this;
ICHECK(orig_ == nullptr);
return {t_desc_, orig_, reinterpret_, mem_, eid_, true};
}
/*! \brief Produce TR with permuted axes */
TensorRequisite Permute(const std::vector<int>& permutation) const {
if (!defined()) return *this; // nothing for empty TR
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.permute_axes(permutation);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with reinterpret data of original tr */
TensorRequisite Reshape(const dnnl::memory::dims& shape) const {
if (!defined()) return *this; // nothing for empty TR
if (t_desc_.dims() == shape) return *this;
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.reshape(shape);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with broadcasted values */
TensorRequisite Broadcast(const dnnl::memory::dims& shape) const {
if (!defined()) return *this; // nothing for empty TR
if (t_desc_.dims() == shape) return *this;
ICHECK(!reverse_data_flow_);
auto orig = std::make_shared<TensorRequisite>(*this);
// numpy like broadcast
auto extended_dims = t_desc_.dims();
auto one_filled = dnnl::memory::dims(shape.size() - extended_dims.size(), 1);
extended_dims.insert(extended_dims.begin(), one_filled.begin(), one_filled.end());
auto desc = t_desc_.reshape(extended_dims);
for (size_t i = 0; i < extended_dims.size(); i++) {
if (extended_dims[i] == shape[i]) continue;
ICHECK(extended_dims[i] == 1);
ICHECK(desc.data.dims[i] == desc.data.padded_dims[i]);
desc.data.dims[i] = shape[i];
desc.data.padded_dims[i] = shape[i];
desc.data.format_desc.blocking.strides[i] = 0;
}
// reinterpret memory buffer with new strides
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with sub memory view (ROI) */
TensorRequisite Crop(const dnnl::memory::dims& shape, const dnnl::memory::dims& offset) const {
if (!defined()) return *this; // nothing for empty TR
ICHECK_EQ(shape.size(), t_desc_.dims().size());
ICHECK_EQ(offset.size(), t_desc_.dims().size());
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.submemory_desc(shape, offset, /*allow_empty=*/true);
// Originally DNNL implementation is very limited. Let's slightly enhance it.
if (!desc && t_desc_.data.format_kind == dnnl_blocked) {
bool offset_is_zero =
std::all_of(offset.begin(), offset.end(), [](auto el) { return el == 0; });
dnnl::memory::dims block_sizes(t_desc_.dims().size(), 1);
for (int i = 0; i < t_desc_.data.format_desc.blocking.inner_nblks; i++)
block_sizes[t_desc_.data.format_desc.blocking.inner_idxs[i]] *=
t_desc_.data.format_desc.blocking.inner_blks[i];
bool shape_reduction_less_than_block = true;
for (int i = 0; i < t_desc_.data.ndims; i++) {
shape_reduction_less_than_block &= t_desc_.data.dims[i] - shape[i] < block_sizes[i];
}
// This is auto padded case. Just update dims value.
if (offset_is_zero && shape_reduction_less_than_block) {
desc = t_desc_;
std::copy(shape.begin(), shape.end(), desc.data.dims);
}
}
ICHECK(desc);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with squeeze shape */
TensorRequisite Squeeze(const dnnl::memory::dims& dims_to_squeeze = {}) const {
if (!defined()) return *this; // nothing for empty TR
dnnl::memory::dims squeezed_dims;
if (dims_to_squeeze.empty()) {
for (auto d : t_desc_.dims())
if (d != 1) squeezed_dims.push_back(d);
} else {
for (size_t i = 0; i < t_desc_.dims().size(); i++)
if (std::find(dims_to_squeeze.begin(), dims_to_squeeze.end(), i) == dims_to_squeeze.end())
squeezed_dims.push_back(t_desc_.dims()[i]);
}
if (squeezed_dims.empty()) squeezed_dims = {1};
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.reshape(squeezed_dims);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with specified layout descriptor */
TensorRequisite RequestLayout(dnnl::memory::desc desc) const {
if (!defined()) return *this; // nothing for empty TR
// If it's the same desc just return self
if (desc == t_desc_) return *this;
ICHECK(t_desc_.dims() == desc.dims()) << "Requested layout is not compatible with "
"presented shape";
auto orig = std::make_shared<TensorRequisite>(*this);
return {desc, orig, false, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Define which logical dims ordering is default for particular layout string. */
static std::string DefaultLogicLayoutFor(const std::string& layout) {
// Rank is all non digit marked dims
auto it = layout.begin();
while (it != layout.end() && !std::isdigit(*it)) it++;
int rank = std::distance(layout.begin(), it);
static const std::vector<std::string> sparse_dims = {"W", "HW", "DHW"};
if (layout.find("N") != std::string::npos) return "NC" + sparse_dims[rank - 3];
if (layout.find("G") != std::string::npos) return "GOI" + sparse_dims[rank - 4];
if (layout.find("O") != std::string::npos) return "OI" + sparse_dims[rank - 3];
LOG(FATAL) << "Unknown layout " << layout << "There is no default scheme to handle it";
return {};
}
/*!
* \brief Treat TR shape as described in layout string.
*
* Blocked dimensions will be concatenated and put into proper shape position corresponding to .
* resulting_layout_logic argument. If desired logic layout was not provided it will be deduced
* automatically based on some internal heuristics.
*
* Limitation 1. Blocking dims should be dense. Dims marked with digits use natural strides.
* Limitation 2. Blocking dims are innermost. Dims marked like 8c, 4o goes after regular
* dimensions. NC8cHW4h4cD is not valid tensor in terms of DNNL. And cannot be
* achieved with memory reinterpretation, so data copy is required. Proper layout
* looks like NCHWD_8c4h4c, first part is outer dims, second digits marked part is
* innermost.
*/
TensorRequisite TreatAs(const std::string& layout, std::string desired_logic_layout = "") const {
if (!defined()) return *this;
if (desired_logic_layout.empty()) desired_logic_layout = DefaultLogicLayoutFor(layout);
const auto origin_dims = dims();
// split layout string to tokens {size, tag} like {16, 'C'}, {4, 'O'}
std::vector<std::pair<int, char>> layout_tokens;
for (auto it = layout.begin(); it != layout.end();) {
auto start = it;
while (std::isdigit(*it)) it++;
int blk_size = start == it ? -1 : std::stoi(std::string{start, it});
layout_tokens.push_back({blk_size, std::toupper(*it)});
it++;
}
// check applicability of layout
auto it = layout_tokens.begin();
while (it != layout_tokens.end() && it->first == -1) it++;
int rank = std::distance(layout_tokens.begin(), it);
while (it != layout_tokens.end()) {
ICHECK_NE(it->first, -1) << "DNNL limitation. Blocking dims should be innermost. "
<< "But received layout is " << layout;
it++;
}
ICHECK_EQ(layout_tokens.size(), origin_dims.size());
ICHECK_EQ(rank, desired_logic_layout.size()) << layout;
std::vector<std::pair<int, char>> outermost_tokens(layout_tokens.begin(),
layout_tokens.begin() + rank);
std::vector<std::pair<int, char>> innermost_tokens(layout_tokens.begin() + rank,
layout_tokens.end());
// define dim resulting dim positions
std::map<char, int> dim_position_by_tag;
for (size_t i = 0; i < desired_logic_layout.size(); i++)
dim_position_by_tag[std::toupper(desired_logic_layout[i])] = i;
// Construct resulting desc by modifying original one
dnnl::memory::desc res_desc = t_desc_;
memset(&res_desc.data.format_desc.blocking, 0, sizeof(res_desc.data.format_desc.blocking));
std::fill(res_desc.data.dims, res_desc.data.dims + DNNL_MAX_NDIMS, 0);
std::fill(res_desc.data.padded_dims, res_desc.data.padded_dims + DNNL_MAX_NDIMS, 0);
res_desc.data.ndims = rank;
res_desc.data.format_desc.blocking.inner_nblks = innermost_tokens.size();
auto res_dims = res_desc.data.dims;
auto res_strides = res_desc.data.format_desc.blocking.strides;
auto res_inner_blks = res_desc.data.format_desc.blocking.inner_blks;
auto res_inner_idxs = res_desc.data.format_desc.blocking.inner_idxs;
std::fill(res_dims, res_dims + rank, 1);
int orig_dim_idx = 0;
for (const auto& p : outermost_tokens) {
auto tag = p.second;
auto dim_size = origin_dims[orig_dim_idx];
auto result_dim_position = dim_position_by_tag[tag];
res_dims[result_dim_position] *= dim_size;
res_strides[result_dim_position] = t_desc_.data.format_desc.blocking.strides[orig_dim_idx];
orig_dim_idx++;
}
for (const auto& p : innermost_tokens) {
auto tag = p.second;
auto dim_size = origin_dims[orig_dim_idx];
auto result_dim_position = dim_position_by_tag[tag];
ICHECK_EQ(p.first, dim_size)
<< "Blocking layout is not applicable to tensor with shape: " << origin_dims
<< ". Requested layout is " << layout;
res_dims[result_dim_position] *= dim_size;
*res_inner_blks++ = dim_size;
*res_inner_idxs++ = result_dim_position;
orig_dim_idx++;
}
// Assume tensor is dense. There is no additional padding.
std::copy(res_desc.data.dims, res_desc.data.dims + rank, res_desc.data.padded_dims);
if (t_desc_ == res_desc) return *this;
auto orig = std::make_shared<TensorRequisite>(*this);
return {res_desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*!
* \brief Produce TR with unspecified layout.
*
* Cannot be registered in TensorRegistry. Only for querying DNNL for preferred layouts.
*/
TensorRequisite LayoutAny() const {
auto orig = std::make_shared<TensorRequisite>(*this);
// Recreate tensor desc with layout 'any'
dnnl::memory::desc any_desc{t_desc_.dims(), t_desc_.data_type(), dnnl::memory::format_tag::any};
return {any_desc, orig, false, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Check is TR is constant. */
bool IsConstant() const {
if (orig_) return orig_->IsConstant();
return mem_.operator bool();
}
/*! \brief Check is tensor is scalar. */
bool IsScalar() const { return t_desc_.dims().size() == 1 && t_desc_.dims()[0] == 1; }
/*! \brief Return const data memory if available. */
dnnl::memory GetConstData() const {
if (mem_) return mem_;
if (!orig_) return {};
if (auto orig_const_data = orig_->GetConstData()) {
if (reinterpret_) {
return {t_desc_, orig_const_data.get_engine(), orig_const_data.get_data_handle()};
} else {
auto eng = orig_const_data.get_engine();
auto res = dnnl::memory{t_desc_, eng};
dnnl::reorder(orig_const_data, res).execute(dnnl::stream(eng), orig_const_data, res);
return res;
}
}
return {};
}
/*!
* \brief Return const data memory in form of vector.
*
* Same as GetConstData but use std::vector instead of dnnl::memory. Works only for 1D tensor
* and scalar TRs. Useful for specification of 1D DNNL attributes like zero_point or
* per_channel_scale
*/
template <typename T>
std::vector<T> GetConstDataLikeVec() const {
auto const_data = GetConstData();
auto desc = const_data.get_desc();
ICHECK(desc.data_type() == utils::DnnlDType<T>());
ICHECK(desc.dims().size() == 1);
auto size = desc.get_size() / sizeof(T);
auto ptr = static_cast<T*>(const_data.get_data_handle());
return std::vector<T>(ptr, ptr + size);
}
/*! \brief Get value of constant scalar tensor if possible. */
template <typename T>
T GetConstScalarData() const {
ICHECK(IsConstant());
ICHECK(IsScalar());
auto const_data = GetConstData();
auto desc = const_data.get_desc();
ICHECK(desc.data_type() == utils::DnnlDType<T>());
auto ptr = static_cast<T*>(const_data.get_data_handle());
return *ptr;
}
/*! \brief Check if tensor is not empty. */
bool defined() const { return !t_desc_.is_zero(); }
/*! \brief Same as defined */
operator bool() const { return defined(); }
/*!
* \brief Check if tensor represent a reversed data flow.
* Useful for describing output processing
*/
bool IsReversed() const { return reverse_data_flow_; }
private:
TensorRequisite(const dnnl::memory::desc& t_desc, const std::shared_ptr<TensorRequisite>& orig,
bool reinterpret, const dnnl::memory& const_mem, uint32_t eid,
bool reverse_data_flow)
: t_desc_(t_desc),
orig_(orig),
reinterpret_(reinterpret),
mem_(const_mem),
eid_(eid),
reverse_data_flow_(reverse_data_flow) {
if (mem_) ICHECK(!orig_ && !reverse_data_flow_ && eid_ == kUndefinedTid);
if (eid_ != kUndefinedTid) ICHECK(!orig_);
}
/* Descriptor of particular tensor */
dnnl::memory::desc t_desc_ = {};
/* Parent TR object which is referred from this TR */
std::shared_ptr<TensorRequisite> orig_ = {};
/* Flag to specify which action should be done with orig TR, reordering or reinterpretation */
bool reinterpret_ = false;
/* Const memory object if available */
dnnl::memory mem_ = {};
/* Entry ID of tensor if available */
uint32_t eid_ = kUndefinedTid;
/*
* Flag to describe reverse data flow case
* All operation on queue will be executed in reverse order. Actual for dst tensor description
*/
bool reverse_data_flow_ = false;
friend class TensorRegistry;
};
/*!
* \brief The registry of tensors. Implement matching of provided TRs and real memory buffers.
*
* Registration of TR performed by calling method Register(), which will return ArgId object.
* ArgId can be mapped to real memory via memory solver created by method MakeSolver().
*/
class TensorRegistry {
private:
enum ArgReqFlag {
CONST, /// < Constant tensor. ExecutionCTX independent
TMP_STORAGE, /// < Intermediate tensors. Stored inside TensorRegistry. Inaccessible outside
EXT_EID, /// < External data. Input or Output.
};
public:
struct ArgId {
TensorRegistry::ArgReqFlag flag_;
uint32_t idx_;
};
using Action = std::tuple<dnnl::primitive, std::unordered_map<int, ArgId>>;
using ActionQue = std::vector<Action>;
using DLTensorProvider = std::function<const DLTensor*(uint32_t)>;
using MemSolver = std::function<const dnnl::memory(ArgId)>;
TensorRegistry() = default;
TensorRegistry(const dnnl::engine& eng, const std::set<uint32_t>& ext_io_eid)
: tmp_mem_collection_(1), ext_io_eid_(ext_io_eid), eng_(eng), stream_(eng) {}
/*!
* \brief Register TR to registry
*
* Resolution of TR may lead to introduction of intermediate memory buffers and additional
* transformation actions which should be performed before or after usage of corresponding memory
* buffer. Additional actions will be append to provided actions queue. Corresponding to
* tr.IsReversed() value actions should be executed before or after usage of resulting ArgId.
*
* \param tr tensor requisite sequence to register
* \param action resulting action queue. If TR resolution is required execution of some
* transformation actions they will be put here
* \return associated ArgId. Should be used as argument for MemSolver.
*/
ArgId Register(const TensorRequisite& tr, ActionQue* action) {
// 1) Constant tensor. Direct reference
if (auto const_data = tr.GetConstData()) {
auto idx = const_mem_collection_.size();
const_mem_collection_.push_back(const_data);
return MakeArgReq(ArgReqFlag::CONST, static_cast<uint32_t>(idx));
}
// 2) EID mapped tensor. Direct reference
if (tr.eid_ != TensorRequisite::kUndefinedTid) {
if (ext_io_eid_.count(tr.eid_) == 0) { // Not IO tensor, means it's intermediate
if (eid2idx_tmp_.count(tr.eid_)) {
auto idx = eid2idx_tmp_.at(tr.eid_);
return MakeArgReq(ArgReqFlag::TMP_STORAGE, idx);
} else {
// register himself
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(tr.t_desc_);
eid2idx_tmp_[tr.eid_] = idx;
return MakeArgReq(ArgReqFlag::TMP_STORAGE, static_cast<uint32_t>(idx));
}
} else {
auto idx = ext_mem_collection_.size();
ext_mem_collection_.push_back({tr.eid_, tr.t_desc_});
return MakeArgReq(ArgReqFlag::EXT_EID, static_cast<uint32_t>(idx));
}
}
// 3) Tensors with transform actions
if (tr.orig_) {
// recursive register of orig TR
auto orig_arg_req = Register(*tr.orig_, action);
if (tr.reinterpret_) {
return RegisterReinterpret(orig_arg_req, tr.t_desc_);
} else {
return RegisterReorder(orig_arg_req, tr.t_desc_, tr.reverse_data_flow_, action);
}
}
// 4) Scratchpad
ICHECK(!tr.orig_ && !tr.mem_ && tr.eid_ == TensorRequisite::kUndefinedTid);
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(tr.t_desc_);
tmp_mem_mapping_[idx] = 0; // zero position tmp mem object is reserved for scratchpads
auto scratchpad_size = tr.t_desc_.get_size();
auto glob_scratchpad_size = tmp_mem_collection_[0].get_size();
if (scratchpad_size > glob_scratchpad_size) {
tmp_mem_collection_[0] =
dnnl::memory::desc({static_cast<dnnl::memory::dim>(scratchpad_size)},
dnnl::memory::data_type::u8, dnnl::memory::format_tag::a);
}
return MakeArgReq(TMP_STORAGE, static_cast<uint32_t>(idx));
}
/*!
* \brief Construct memory solver for all registered TRs.
* \param ext_provider callback to resolve external IO buffers
* \return memory solver object to match ArgId to dnnl::memory objects
*/
MemSolver MakeSolver(const DLTensorProvider& ext_provider) const {
return MemSolverImpl(eng_, ext_provider, const_mem_collection_, ext_mem_collection_,
tmp_mem_collection_, tmp_mem_mapping_);
}
void MarkInplace(const TensorRequisite& tr, const TensorRequisite& shared) {
const auto tr_id = tr.eid();
ICHECK(tr_id != TensorRequisite::kUndefinedTid);
const auto shared_id = shared.eid();
ICHECK(shared_id != TensorRequisite::kUndefinedTid);
eid2idx_tmp_[tr_id] = eid2idx_tmp_[shared_id];
}
private:
ArgId RegisterReinterpret(ArgId src_ar, const dnnl::memory::desc& desc) {
switch (src_ar.flag_) {
case TMP_STORAGE: {
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(desc);
tmp_mem_mapping_[idx] = src_ar.idx_;
return MakeArgReq(TMP_STORAGE, idx);
}
case EXT_EID: {
auto ext_req = ext_mem_collection_[src_ar.idx_];
auto idx = ext_mem_collection_.size();
ext_mem_collection_.push_back({ext_req.first, desc});
return MakeArgReq(EXT_EID, idx);
}
default:
LOG(FATAL) << "Unknown case";
}
return {};
}
ArgId RegisterReorder(ArgId src_ar, const dnnl::memory::desc& desc, bool reverse_data_flow,
ActionQue* action) {
ICHECK(src_ar.flag_ == TMP_STORAGE || src_ar.flag_ == EXT_EID);
auto src_desc = src_ar.flag_ == TMP_STORAGE ? tmp_mem_collection_[src_ar.idx_]
: ext_mem_collection_[src_ar.idx_].second;
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(desc);
auto dst_ar = MakeArgReq(TMP_STORAGE, idx);
// reorder action submit
if (reverse_data_flow) {
auto reorder_pd = dnnl::reorder::primitive_desc(eng_, desc, eng_, src_desc);
action->insert(action->begin(),
{dnnl::reorder(reorder_pd), {{DNNL_ARG_FROM, dst_ar}, {DNNL_ARG_TO, src_ar}}});
} else {
auto reorder_pd = dnnl::reorder::primitive_desc(eng_, src_desc, eng_, desc);
action->push_back(
{dnnl::reorder(reorder_pd), {{DNNL_ARG_FROM, src_ar}, {DNNL_ARG_TO, dst_ar}}});
}
return dst_ar;
}
/*! \brief Implementation of memory solver */
class MemSolverImpl {
public:
MemSolverImpl(const dnnl::engine& eng, const DLTensorProvider& ext_data_provider,
const std::vector<dnnl::memory>& const_mems,
const std::vector<std::pair<uint32_t, dnnl::memory::desc>>& ext_mems,
const std::vector<dnnl::memory::desc>& tmp_mem_descs,
const std::map<size_t, size_t>& tmp_mem_mapping)
: eng_(eng),
ext_data_provider_(ext_data_provider),
const_mems_(const_mems),
ext_mems_(ext_mems) {
// Construct temp memory objects on the fly. While we have no scratchpads
// support on VM/GraphExecutor level.
tmp_mems_.resize(tmp_mem_descs.size());
for (size_t i = 0; i < tmp_mem_descs.size(); i++) {
auto found = tmp_mem_mapping.find(i);
if (found != tmp_mem_mapping.end()) {
auto reuse_hdl = tmp_mems_[found->second].get_data_handle();
tmp_mems_[i] = dnnl::memory(tmp_mem_descs[i], eng_, reuse_hdl);
} else {
tmp_mems_[i] = dnnl::memory(tmp_mem_descs[i], eng_);
}
}
}
/*! \brief Find memory object associated with provided ArgId */
dnnl::memory operator()(const ArgId& ar) const {
switch (ar.flag_) {
case CONST:
return const_mems_.at(ar.idx_);
case TMP_STORAGE:
return tmp_mems_.at(ar.idx_);
case EXT_EID: {
auto eid_and_desc = ext_mems_.at(ar.idx_);
auto eid = eid_and_desc.first;
auto desc = eid_and_desc.second;
auto ext_dl_tensor = ext_data_provider_(eid);
ICHECK(ext_dl_tensor->data);
return dnnl::memory{desc, eng_, ext_dl_tensor->data};
}
}
return {};
}
private:
const dnnl::engine& eng_;
const DLTensorProvider& ext_data_provider_;
const std::vector<dnnl::memory>& const_mems_;
const std::vector<std::pair<uint32_t, dnnl::memory::desc>>& ext_mems_;
std::vector<dnnl::memory> tmp_mems_;
};
ArgId MakeArgReq(ArgReqFlag flag, uint32_t idx) { return {flag, idx}; }
/* Collection of const memory objects. */
std::vector<dnnl::memory> const_mem_collection_;
/* Collection of intermediate memory descriptors. Zero position is reserved for scratchpads. */
std::vector<dnnl::memory::desc> tmp_mem_collection_;
/* Mapping of some temp buffer on previously registered. */
std::map<size_t, size_t> tmp_mem_mapping_;
/* Collection of external_intermediate memory objects.
* first - eid of external buffer to ask
* second - t_desc describes how to treat external buffer */
std::vector<std::pair<uint32_t, dnnl::memory::desc>> ext_mem_collection_;
/* Map of eid to index of temp buffer in tmp_mem_collection_ */
std::unordered_map<uint32_t, size_t> eid2idx_tmp_;
/* List of external eid */
std::set<uint32_t> ext_io_eid_;
/* Engine of all tensors existing in this registry */
dnnl::engine eng_;
/* Execution stream use to reorder const data */
dnnl::stream stream_;
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_DNNL_DNNL_TENSOR_REQUISITE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/dnnl/dnnl_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/dnnl/dnnl_utils.cc
* \brief Some DNNL specific utility functions
*/
#ifndef TVM_RUNTIME_CONTRIB_DNNL_DNNL_UTILS_H_
#define TVM_RUNTIME_CONTRIB_DNNL_DNNL_UTILS_H_
#include <cstdint>
#include <ostream>
#include <string>
#include <vector>
// TODO(@apeskov): Have to mute warning from dnnl headers.
// -Wzero-as-null-pointer-constant and -Wdocumentation-unknown-command
#include <dnnl.hpp>
#include "tvm/runtime/data_type.h"
namespace tvm {
namespace runtime {
namespace contrib {
/*!
* \brief Convert a DLPack data type to a DNNL data type.
* \param dltype The DLPack data type.
* \return The corresponding DNNL data type.
*/
dnnl::memory::data_type dtype_dl2dnnl(DLDataType dltype);
/*!
* \brief Converter TVM shape to DNNL dims
* \param shape tvm shape
* \return dims in terms of dnnl
*/
dnnl::memory::dims shape_dl2dnnl(const std::vector<int64_t>& shape);
/*!
* \brief Construct plain tensor descriptor
* \param shape provided shape
* \param dltype provided data type
* \return resulting plain tensor desc
*/
dnnl::memory::desc MakePlainDesc(const std::vector<int64_t>& shape, DLDataType dltype);
namespace utils {
/*! \brief Pretty printer util for shape */
inline std::ostream& operator<<(std::ostream& o, const dnnl::memory::dims& dims) {
o << "[";
auto d = dims.begin();
if (d != dims.end()) o << *d++;
while (d != dims.end()) o << "," << *d++;
o << "]";
return o;
}
/*! \brief Pretty printer util for data type */
inline std::ostream& operator<<(std::ostream& o, const dnnl::memory::data_type& type) {
std::string name = "undef";
switch (type) {
case dnnl::memory::data_type::undef:
name = "undef";
break;
case dnnl::memory::data_type::f32:
name = "fp32";
break;
case dnnl::memory::data_type::f16:
name = "fp16";
break;
case dnnl::memory::data_type::bf16:
name = "bf16";
break;
case dnnl::memory::data_type::s32:
name = "i32";
break;
case dnnl::memory::data_type::s8:
name = "i8";
break;
case dnnl::memory::data_type::u8:
name = "u8";
break;
}
o << name;
return o;
}
/*! \brief Converter data type template arg to runtime object */
template <typename T>
inline dnnl::memory::data_type DnnlDType();
template <>
inline dnnl::memory::data_type DnnlDType<int>() {
return dnnl::memory::data_type::s32;
}
template <>
inline dnnl::memory::data_type DnnlDType<float>() {
return dnnl::memory::data_type::f32;
}
template <>
inline dnnl::memory::data_type DnnlDType<uint8_t>() {
return dnnl::memory::data_type::u8;
}
template <>
inline dnnl::memory::data_type DnnlDType<int8_t>() {
return dnnl::memory::data_type::s8;
}
} // namespace utils
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_DNNL_DNNL_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/edgetpu/edgetpu_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief EdgeTPU runtime that can run tflite model compiled
* for EdgeTPU containing only tvm PackedFunc.
* \file edgetpu_runtime.h
*/
#ifndef TVM_RUNTIME_CONTRIB_EDGETPU_EDGETPU_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_EDGETPU_EDGETPU_RUNTIME_H_
#include <edgetpu.h>
#include <memory>
#include <string>
#include "../tflite/tflite_runtime.h"
#include "edgetpu.h"
namespace tvm {
namespace runtime {
/*!
* \brief EdgeTPU runtime.
*
* This runtime can be accessed in various languages via
* the TVM runtime PackedFunc API.
*/
class EdgeTPURuntime : public TFLiteRuntime {
public:
/*!
* \brief Destructor of EdgeTPURuntime.
*
* NOTE: tflite::Interpreter member should be destruct before the EdgeTpuContext member
* destruction. If the order is reverse, occurs SEGV in the destructor of tflite::Interpreter.
*/
~EdgeTPURuntime() { interpreter_.reset(); }
/*!
* \return The type key of the executor.
*/
const char* type_key() const final { return "EdgeTPURuntime"; }
/*!
* \brief Initialize the edge TPU tflite runtime with tflite model and device.
* \param tflite_model_bytes The tflite model.
* \param dev The device where the tflite model will be executed on.
*/
void Init(const std::string& tflite_model_bytes, Device dev);
private:
std::shared_ptr<edgetpu::EdgeTpuContext> edgetpu_context_;
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_EDGETPU_EDGETPU_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/ethosn/ethosn_device.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file ethosn_device.h
* \brief Arm(R) Ethos(TM)-N NPU device integration.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_DEVICE_H_
#define TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_DEVICE_H_
#include <tvm/runtime/registry.h>
#include <vector>
#include "ethosn_runtime.h"
namespace tvm {
namespace runtime {
namespace ethosn {
namespace dl = ::ethosn::driver_library;
using tvm::runtime::TVMArgs;
bool Inference(tvm::runtime::TVMArgs args, dl::Network* npu,
const std::vector<uint32_t>& input_order, const std::vector<uint32_t>& output_order,
const std::vector<uint32_t>& input_sizes, const std::vector<uint32_t>& output_sizes);
} // namespace ethosn
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_DEVICE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/ethosn/ethosn_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file ethosn_runtime.h
* \brief Execution handling of Ethos-N command streams.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_RUNTIME_H_
#include <tvm/runtime/packed_func.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "ethosn_driver_library/Network.hpp"
#include "ethosn_support_library/Support.hpp"
namespace tvm {
namespace runtime {
namespace ethosn {
namespace sl = ::ethosn::support_library;
namespace dl = ::ethosn::driver_library;
struct OrderedCompiledNetwork {
std::unique_ptr<sl::CompiledNetwork> compiled_cmm;
std::unique_ptr<dl::Network> runtime_cmm;
std::string name;
std::vector<uint32_t> inputs;
std::vector<uint32_t> outputs;
std::vector<uint32_t> input_sizes;
std::vector<uint32_t> output_sizes;
};
class EthosnModule : public ModuleNode {
public:
/*!
* \brief The Ethos-N runtime module.
* \param cmms A vector of compiled networks with input/output orders.
*/
explicit EthosnModule(std::vector<OrderedCompiledNetwork>* cmms);
/*!
* \brief Get a PackedFunc from the Ethos-N module.
* \param name The name of the function.
* \param sptr_to_self The ObjectPtr that points to this module node.
* \return The function pointer when it is found, otherwise, PackedFunc(nullptr).
*/
PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) final;
/*!
* \brief Save a compiled network to a binary stream, which can then be
* serialized to disk.
* \param stream The stream to save the binary.
* \note See EthosnModule::LoadFromBinary for the serialization format.
*/
void SaveToBinary(dmlc::Stream* stream) final;
/*!
* \brief Load a compiled network from stream.
* \param strm The binary stream to load.
* \return The created Ethos-N module.
* \note The serialization format is:
*
* size_t : number of functions
* [
* std::string : name of function (symbol)
* std::string : serialized command stream
* size_t : number of inputs
* std::vector : order of inputs
* std::vector : buffer sizes for inputs
* size_t : number of outputs
* std::vector : order of outputs
* std::vector : buffer sizes for outputs
* ] * number of functions
*/
static Module LoadFromBinary(void* strm);
/*!
* \brief Save a module to a specified path.
* \param path Where to save the serialized module.
*/
void SaveToFile(const std::string& path, const std::string& format) override;
const char* type_key() const override { return "ethos-n"; }
private:
/*! \brief A map between ext_symbols (function names) and ordered compiled networks. */
std::map<std::string, OrderedCompiledNetwork> network_map_;
};
/*!
* \brief Error codes for evaluating the result of inference on the NPU.
*/
enum class InferenceWaitErrorCode { kSuccess = 0, kTimeout = 1, kError = 2 };
/*!
* \brief A helper class holding the status of inference on the NPU and
* associated error message(s) if any occurred.
*
* Similar to the implementation of 'WaitStatus' in the driver stack:
* https://github.com/ARM-software/ethos-n-driver-stack/blob/22.08/armnn-ethos-n-backend/workloads/EthosNPreCompiledWorkload.cpp#L48
*/
class InferenceWaitStatus {
public:
InferenceWaitStatus() : error_code_(InferenceWaitErrorCode::kSuccess), error_description_("") {}
explicit InferenceWaitStatus(InferenceWaitErrorCode errorCode, std::string errorDescription = "")
: error_code_(errorCode), error_description_(errorDescription) {}
InferenceWaitStatus(const InferenceWaitStatus&) = default;
InferenceWaitStatus(InferenceWaitStatus&&) = default;
InferenceWaitStatus& operator=(const InferenceWaitStatus&) = default;
InferenceWaitStatus& operator=(InferenceWaitStatus&&) = default;
explicit operator bool() const { return error_code_ == InferenceWaitErrorCode::kSuccess; }
InferenceWaitErrorCode GetErrorCode() const { return error_code_; }
std::string GetErrorDescription() const { return error_description_; }
private:
InferenceWaitErrorCode error_code_;
std::string error_description_;
};
} // namespace ethosn
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_ETHOSN_ETHOSN_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/ethosu/bare_metal/tvm_ethosu_runtime.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "tvm_ethosu_runtime.h"
#include <ethosu_driver.h>
int32_t TVMEthosULaunch(tvm_device_ethos_u_t* context, void* cms_data, size_t cms_data_size,
uint64_t* base_addrs, size_t* base_addrs_size, int num_tensors) {
struct ethosu_driver* driver = (struct ethosu_driver*)context;
int32_t result =
ethosu_invoke(driver, cms_data, cms_data_size, base_addrs, base_addrs_size, num_tensors);
// Map errors in invoke to TVM errors
if (result != 0) {
return -1;
}
return 0;
}
int32_t TVMDeviceEthosUActivate(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUOpen(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUClose(tvm_device_ethos_u_t* context) { return 0; }
int32_t TVMDeviceEthosUDeactivate(tvm_device_ethos_u_t* context) { return 0; }
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/ethosu/bare_metal/tvm_ethosu_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef TVM_RUNTIME_CONTRIB_ETHOSU_BARE_METAL_TVM_ETHOSU_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_ETHOSU_BARE_METAL_TVM_ETHOSU_RUNTIME_H_
#include <ethosu_driver.h>
#include <stddef.h>
#include <stdint.h>
typedef void tvm_device_ethos_u_t;
int32_t TVMEthosULaunch(tvm_device_ethos_u_t* resource_handle, void* cms_data, size_t cms_data_size,
uint64_t* base_addrs, size_t* base_addrs_size, int num_tensors);
int32_t TVMDeviceEthosUActivate(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUOpen(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUClose(tvm_device_ethos_u_t* context);
int32_t TVMDeviceEthosUDeactivate(tvm_device_ethos_u_t* context);
#endif // TVM_RUNTIME_CONTRIB_ETHOSU_BARE_METAL_TVM_ETHOSU_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/json/json_node.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/json/json_node.h
* \brief The graph nodes used by JSON runtime.
*/
#ifndef TVM_RUNTIME_CONTRIB_JSON_JSON_NODE_H_
#define TVM_RUNTIME_CONTRIB_JSON_JSON_NODE_H_
#include <dlpack/dlpack.h>
#include <dmlc/json.h>
#include <dmlc/memory_io.h>
#include <tvm/runtime/data_type.h>
#include <cstdint>
#include <cstdio>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace tvm {
namespace runtime {
namespace json {
using namespace tvm::runtime;
using JSONGraphAttrs = std::unordered_map<std::string, dmlc::any>;
/*!
* \brief The node entry in the serialized json graph.
*/
class JSONGraphNodeEntry {
public:
// Constructors.
JSONGraphNodeEntry() = default;
JSONGraphNodeEntry(int id, int index, int version = 0)
: id_(id), index_(index), version_(version) {}
/*!
* \brief Serialize a node entry.
* \param writer The json writer.
*/
void Save(dmlc::JSONWriter* writer) const {
writer->BeginArray();
writer->WriteArrayItem(id_);
writer->WriteArrayItem(index_);
writer->WriteArrayItem(version_);
writer->EndArray();
}
/*!
* \brief Deserialize the json string into a node entry.
* \param reader The json reader.
*/
void Load(dmlc::JSONReader* reader) {
reader->BeginArray();
ICHECK(reader->NextArrayItem()) << "invalid json format";
reader->Read(&id_);
ICHECK(reader->NextArrayItem()) << "invalid json format";
reader->Read(&index_);
if (reader->NextArrayItem()) {
reader->Read(&version_);
ICHECK(!reader->NextArrayItem()) << "invalid json format";
} else {
version_ = 0;
}
}
/*! \brief The json graph node ID. */
uint32_t id_;
/*! \brief The entry index. */
uint32_t index_;
uint32_t version_;
};
/*!
* \brief The node of the serialized json graph. It includes an array of
* entries.
*/
class JSONGraphNode {
public:
// Constructors.
JSONGraphNode() = default;
JSONGraphNode(const std::string& name, const std::string& op_type,
const std::vector<JSONGraphNodeEntry>& inputs = {}, size_t num_outputs = 1) {
name_ = name;
op_type_ = op_type;
num_inputs_ = inputs.size();
inputs_ = inputs;
num_outputs_ = num_outputs;
}
/*!
* \brief Serialize a node so that it can be saved to disk.
* \param writer The json writer.
*/
void Save(dmlc::JSONWriter* writer) {
writer->BeginObject();
writer->WriteObjectKeyValue("op", op_type_);
writer->WriteObjectKeyValue("name", name_);
if (!inputs_.empty()) {
SetAttr("num_inputs", std::to_string(inputs_.size()));
SetAttr("num_outputs", std::to_string(num_outputs_));
writer->WriteObjectKeyValue("inputs", this->inputs_);
}
if (!attrs_.empty()) {
writer->WriteObjectKeyValue("attrs", attrs_);
}
writer->EndObject();
}
/*!
* \brief Load the attribute of a node in the json string.
* \param reader The json reader.
*/
void LoadAttrs(dmlc::JSONReader* reader) {
std::string key, value;
reader->BeginObject();
while (reader->NextObjectItem(&key)) {
if (key == "num_inputs") {
reader->Read(&value);
num_inputs_ = strtoul(value.c_str(), nullptr, 10);
} else if (key == "num_outputs") {
reader->Read(&value);
num_outputs_ = strtoul(value.c_str(), nullptr, 10);
} else if (key == "dtype") {
std::vector<std::string> tmp;
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&tmp);
ICHECK(!reader->NextArrayItem());
for (const auto& it : tmp) {
dtype_.push_back(tvm::runtime::String2DLDataType(it));
}
} else if (key == "shape") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&shape_);
ICHECK(!reader->NextArrayItem());
} else {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
std::vector<std::string> tmp;
reader->Read(&tmp);
attrs_[key] = tmp;
ICHECK(!reader->NextArrayItem());
}
}
ICHECK_EQ(shape_.size(), dtype_.size());
}
/*!
* \brief Load a node in the json string.
* \param reader The json reader.
*/
void Load(dmlc::JSONReader* reader) {
reader->BeginObject();
std::string key;
while (reader->NextObjectItem(&key)) {
if (key == "op") {
reader->Read(&op_type_);
} else if (key == "name") {
reader->Read(&name_);
} else if (key == "inputs") {
reader->Read(&inputs_);
} else if (key == "attr" || key == "attrs") {
this->LoadAttrs(reader);
} else {
LOG(FATAL) << "Unknown key: " << key;
}
}
}
/*!
* \brief Check if a node is a leaf node, i.e. input to the graph.
*
* \return True if the node has no input, otherwise, false.
*/
bool IsLeaf() const { return inputs_.empty(); }
/*!
* \brief Return the number of outputs of the node.
*
* \return The number of the output.
*/
uint32_t GetNumOutput() const { return num_outputs_; }
/*!
* \brief Return the input entries.
*
* \return The input entries.
*/
std::vector<JSONGraphNodeEntry> GetInputs() const { return inputs_; }
/*!
* \brief Return the op type.
*
* \return The op type.
*/
std::string GetOpType() const { return op_type_; }
/*!
* \brief Return the op name.
*
* \return The op name.
*/
std::string GetOpName() const { return name_; }
/*!
* \brief Return the op output shapes.
*
* \return The shapes.
*/
std::vector<std::vector<int64_t>> GetOpShape() const { return shape_; }
/*!
* \brief Return the op types.
*
* \return The types.
*/
std::vector<DLDataType> GetOpDataType() const { return dtype_; }
/*!
* \brief Set the number of outputs of the node.
*
* \param num_outputs The number of output.
*/
void SetNumOutput(uint32_t num_outputs) { num_outputs_ = num_outputs; }
/*!
* \brief Get the value of an attribute in the node.
*
* \tparam T The return type.
* \param key The key for lookup.
*
* \return The value.
*/
template <typename T>
T GetAttr(const std::string& key) const {
ICHECK_GT(attrs_.count(key), 0U) << "Key: " << key << "is not found";
return dmlc::get<T>(attrs_.at(key));
}
/*!
* \brief Set an attribute for the node.
*
* \tparam ValueT The type of the value being stored.
* \param key The key of the attribute.
* \param value The value of the attribute.
*/
template <typename ValueT>
void SetAttr(const std::string& key, const ValueT& value) {
attrs_[key] = value;
}
/*!
* \brief Check if node has attribute.
*
* \param key The key of the attribute.
*
* \return True if attribute exists, false otherwise.
*/
bool HasAttr(const std::string& key) const { return attrs_.find(key) != attrs_.end(); }
void CaptureAttrs(const JSONGraphNode& that) {
for (const auto& kv : that.attrs_) {
attrs_[kv.first] = kv.second;
}
}
virtual ~JSONGraphNode() {}
private:
/*! \brief The number of input. */
uint32_t num_inputs_{0};
/*! \brief The number of output. */
uint32_t num_outputs_{1};
/*! \brief The name of the op. It is the symbol that used for runtime lookup. */
std::string name_;
/*! \brief The operator type, i.e. input is "null". */
std::string op_type_;
/*! \brief The shape of the node. */
std::vector<std::vector<int64_t>> shape_;
/*! \brief The type of the node. */
std::vector<DLDataType> dtype_;
/*! \brief The inputs of the node. */
std::vector<JSONGraphNodeEntry> inputs_;
/*!
* \brief Attribute of the node. For simplicity, we store all attribute as
* a list of std::string. It's the developer's resposibility to check the
* required attribute of a certain op and convert it into the needed type.
*
* For example, for conv2d, this map could contain:
* attrs_["strides"] = ["1", "1"]
* attrs_["padding"] = ["0", "0", "0", "0"]
* attrs_["data_layout"] = ["NCHW"]
*
* when creating an execution engine, developers may need to use these
* attributes and they can convert it into the needed type, i.e. padding to
* int
*/
JSONGraphAttrs attrs_;
friend class JSONRuntimeBase;
};
} // namespace json
} // namespace runtime
} // namespace tvm
namespace dmlc {
namespace json {
template <typename T>
inline bool SameType(const dmlc::any& data) {
return std::type_index(data.type()) == std::type_index(typeid(T));
}
template <>
struct Handler<std::unordered_map<std::string, dmlc::any>> {
inline static void Write(dmlc::JSONWriter* writer,
const std::unordered_map<std::string, dmlc::any>& data) {
for (const auto& kv : data) {
auto k = kv.first;
const dmlc::any& v = kv.second;
if (SameType<std::vector<dmlc::any>>(v)) {
writer->WriteObjectKeyValue(k, dmlc::get<std::vector<dmlc::any>>(v));
} else {
LOG(FATAL) << "Not supported";
}
}
writer->EndObject();
}
inline static void Read(dmlc::JSONReader* reader,
std::unordered_map<std::string, dmlc::any>* data) {
LOG(FATAL) << "Not implemented";
}
};
template <>
struct Handler<std::shared_ptr<tvm::runtime::json::JSONGraphNode>> {
inline static void Write(dmlc::JSONWriter* writer,
const std::shared_ptr<tvm::runtime::json::JSONGraphNode>& data) {
data->Save(writer);
}
inline static void Read(dmlc::JSONReader* reader,
std::shared_ptr<tvm::runtime::json::JSONGraphNode>* data) {
(*data)->Load(reader);
}
};
} // namespace json
} // namespace dmlc
#endif // TVM_RUNTIME_CONTRIB_JSON_JSON_NODE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/json/json_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/json/json_runtime.h
* \brief Utilities for json runtime.
*/
#ifndef TVM_RUNTIME_CONTRIB_JSON_JSON_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_JSON_JSON_RUNTIME_H_
#include <tvm/runtime/module.h>
#include <tvm/runtime/ndarray.h>
#include <cstddef>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "json_node.h"
namespace tvm {
namespace runtime {
namespace json {
/*!
* \brief A json runtime that executes the serialized JSON format. This runtime
* can be extended by user defined runtime for execution.
*/
class JSONRuntimeBase : public ModuleNode {
public:
JSONRuntimeBase(const std::string& symbol_name, const std::string& graph_json,
const Array<String> const_names)
: symbol_name_(symbol_name), graph_json_(graph_json), const_names_(const_names) {
LoadGraph(graph_json_);
}
~JSONRuntimeBase() override = default;
const char* type_key() const override { return "json"; } // May be overridden
/*! \brief Initialize a specific json runtime. */
virtual void Init(const Array<NDArray>& consts) = 0;
/*! \brief Invoke the execution engine to inteprete a specific json runtime. */
virtual void Run() = 0;
/*!
* \brief Get a packed function.
* \param name The name/symbol of the function.
* \param sptr_to_self The pointer to the module node.
* \return The packed function.
*/
PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) override {
if (name == "get_symbol") {
return PackedFunc(
[sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->symbol_name_; });
} else if (name == "get_const_vars") {
return PackedFunc(
[sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->const_names_; });
} else if (this->symbol_name_ == name) {
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
ICHECK(this->initialized_) << "The module has not been initialized";
// Bind argument tensors to data entries.
this->SetInputOutputBuffers(args);
// Execute the subgraph.
this->Run();
});
} else if ("__init_" + this->symbol_name_ == name) {
// The function to initialize constant tensors.
return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
ICHECK_EQ(args.size(), 1U);
std::lock_guard<std::mutex> guard(this->initialize_mutex_);
if (!this->initialized_) {
this->Init(args[0]);
this->initialized_ = true;
}
*rv = 0;
});
} else {
return PackedFunc(nullptr);
}
}
void SaveToBinary(dmlc::Stream* stream) override {
// Save the symbol
stream->Write(symbol_name_);
// Save the graph
stream->Write(graph_json_);
// Save the required const names
std::vector<std::string> consts;
for (const auto& it : const_names_) {
consts.push_back(it);
}
stream->Write(consts);
}
template <typename T,
typename = typename std::enable_if<std::is_base_of<JSONRuntimeBase, T>::value>::type>
static Module LoadFromBinary(void* strm) {
dmlc::Stream* stream = static_cast<dmlc::Stream*>(strm);
std::string symbol;
std::string graph_json;
std::vector<std::string> consts;
// Load the symbol
ICHECK(stream->Read(&symbol)) << "Loading symbol name failed";
ICHECK(stream->Read(&graph_json)) << "Loading graph json failed";
ICHECK(stream->Read(&consts)) << "Loading the const name list failed";
Array<String> const_names;
for (const auto& it : consts) {
const_names.push_back(it);
}
auto n = make_object<T>(symbol, graph_json, const_names);
return Module(n);
}
/*!
* \brief Get the JSON generated by codegen.
*
* \param format the format to return.
* \return A string of JSON.
*/
std::string GetSource(const std::string& format = "json") override { return graph_json_; }
protected:
/*!
* \brief Set up the input and output buffers by binding their DLTensor pointers to the
* corresponding data entry.
*
* \param args The packed args.
*/
void SetInputOutputBuffers(const TVMArgs& args) {
ICHECK_EQ(args.size(), input_var_eid_.size() + outputs_.size())
<< "Found mismatch in the number of provided data entryies and required.";
for (size_t i = 0; i < static_cast<size_t>(args.size()); i++) {
auto eid = i < input_var_eid_.size() ? input_var_eid_[i]
: EntryID(outputs_[i - input_var_eid_.size()]);
ICHECK(args[i].type_code() == kTVMNDArrayHandle || args[i].type_code() == kTVMDLTensorHandle)
<< "Expect NDArray or DLTensor as inputs";
const DLTensor* arg;
if (args[i].IsObjectRef<NDArray>()) {
NDArray arr = args[i];
arg = arr.operator->();
} else {
arg = args[i].operator DLTensor*();
}
// Assign input/output the NDArray pointers to data entry so that we can directly
// read/write host buffers.
data_entry_[eid] = arg;
}
}
/*!
* \brief Load the graph and record the entries for inputs and constants.
*
* \param graph_json The graph in the json format.
*/
void LoadGraph(const std::string& graph_json) {
std::istringstream is(graph_json);
dmlc::JSONReader reader(&is);
this->Load(&reader);
std::vector<std::string> consts;
for (size_t i = 0; i < input_nodes_.size(); i++) {
uint32_t nid = input_nodes_[i];
std::string name = nodes_[nid].name_;
if (nodes_[nid].op_type_ == "input") {
ICHECK_EQ(nodes_[nid].GetOpShape().size(), nodes_[nid].GetOpDataType().size());
for (size_t j = 0; j < nodes_[nid].GetOpShape().size(); ++j) {
input_var_eid_.push_back(EntryID(nid, j));
}
nodes_[nid].SetNumOutput(nodes_[nid].GetOpShape().size());
} else {
ICHECK_EQ(nodes_[nid].op_type_, "const");
auto pos = std::find(std::begin(const_names_), std::end(const_names_), name);
ICHECK(pos != std::end(const_names_)) << "Found non-existent constant: " << name;
const_idx_.push_back(nid);
consts.push_back(name);
}
}
ICHECK_EQ(consts.size(), const_names_.size())
<< "Found mismatch for the number of constants in the graph and required.";
for (size_t i = 0; i < consts.size(); i++) {
ICHECK_EQ(consts[i], const_names_[i])
<< "The position of constant in the graph must be the same as the required.";
}
// Reserve data entries.
data_entry_.resize(NumEntries());
}
/*!
* \brief Set up the constants/weights for inference by binding their DLTensor pointer to
* the corresponding data entry.
*
* \param consts A list of constant NDArray to be used.
*/
void SetupConstants(const Array<NDArray>& consts) {
for (size_t i = 0; i < consts.size(); ++i) {
data_entry_[EntryID(const_idx_[i], 0)] = consts[i].operator->();
}
}
// Load the graph.
void Load(dmlc::JSONReader* reader) {
reader->BeginObject();
std::string key;
while (reader->NextObjectItem(&key)) {
if (key == "nodes") {
reader->Read(&nodes_);
} else if (key == "arg_nodes") {
reader->Read(&input_nodes_);
} else if (key == "node_row_ptr") {
reader->Read(&node_row_ptr_);
} else if (key == "heads") {
reader->Read(&outputs_);
} else {
LOG(FATAL) << "Unknown key: " << key;
}
}
}
// Get the node entry index.
uint32_t EntryID(uint32_t nid, uint32_t index) const { return node_row_ptr_[nid] + index; }
// Get the node entry index.
uint32_t EntryID(const JSONGraphNodeEntry& e) const { return EntryID(e.id_, e.index_); }
// Number of node entries.
uint32_t NumEntries() const { return node_row_ptr_.back(); }
protected:
/*! \brief The only subgraph name for this module. */
std::string symbol_name_;
/*! \brief The graph. */
std::string graph_json_;
/*! \brief The required constant names. */
Array<String> const_names_;
/*! \brief The json graph nodes. */
std::vector<JSONGraphNode> nodes_;
/*! \brief The input nodes, including variables and constants. */
std::vector<uint32_t> input_nodes_;
/*! \brief Used for quick entry indexing. */
std::vector<uint32_t> node_row_ptr_;
/*! \brief Output entries. */
std::vector<JSONGraphNodeEntry> outputs_;
/*! \brief Data of that entry. */
std::vector<const DLTensor*> data_entry_;
/*! \brief Map the input name to entry id. */
std::vector<uint32_t> input_var_eid_;
/*! \brief input const node index. */
std::vector<uint32_t> const_idx_;
/*! \brief Indicate if the engine has been initialized. */
bool initialized_{false};
/*! \brief Initializer mutex*/
std::mutex initialize_mutex_;
};
} // namespace json
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_JSON_JSON_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/miopen/miopen_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Use external miopen utils function
*/
#ifndef TVM_RUNTIME_CONTRIB_MIOPEN_MIOPEN_UTILS_H_
#define TVM_RUNTIME_CONTRIB_MIOPEN_MIOPEN_UTILS_H_
#include <miopen/miopen.h>
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/logging.h>
#include <string>
#include "../../rocm/rocm_common.h"
namespace tvm {
namespace contrib {
namespace miopen {
std::string miopenGetErrorString(int error_code);
#define MIOPEN_CALL(func) \
{ \
miopenStatus_t e = (func); \
ICHECK_EQ(e, miopenStatusSuccess) << "miopen error: " << miopenGetErrorString(e); \
}
struct ConvEntry {
miopenConvolutionDescriptor_t conv_desc;
miopenConvolutionMode_t mode{miopenConvolution};
miopenTensorDescriptor_t filter_desc;
miopenDataType_t data_type{miopenFloat};
miopenTensorDescriptor_t input_desc;
miopenTensorDescriptor_t output_desc;
miopenConvFwdAlgorithm_t fwd_algo;
Device device;
runtime::DeviceAPI* rocm_api;
void* workspace{nullptr};
size_t workspace_size{0};
ConvEntry();
~ConvEntry();
void UpdateWorkspace(const size_t wsize);
void CleanWorkspace();
}; // ConvThreadEntry
struct SoftmaxEntry {
miopenTensorDescriptor_t shape_desc;
SoftmaxEntry();
~SoftmaxEntry();
}; // SoftmaxEntry
struct MIOpenThreadEntry {
MIOpenThreadEntry();
~MIOpenThreadEntry();
miopenHandle_t handle{nullptr};
ConvEntry conv_entry;
SoftmaxEntry softmax_entry;
runtime::DeviceAPI* rocm_api{nullptr};
static MIOpenThreadEntry* ThreadLocal();
}; // MIOpenThreadEntry
} // namespace miopen
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_MIOPEN_MIOPEN_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/mps/mps_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Use external mps utils function
*/
#ifndef TVM_RUNTIME_CONTRIB_MPS_MPS_UTILS_H_
#define TVM_RUNTIME_CONTRIB_MPS_MPS_UTILS_H_
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#include <dmlc/thread_local.h>
#include <tvm/runtime/data_type.h>
#include <tvm/runtime/device_api.h>
#include <tvm/runtime/logging.h>
#include <tvm/runtime/registry.h>
#include <vector>
#include "../../metal/metal_common.h"
namespace tvm {
namespace contrib {
/*! breif Convert DLTensor type to MPS type */
struct MPSType {
static MPSDataType DLTypeToMPSType(const DLDataType& dtype);
}; // struct MPSType
struct MetalThreadEntry {
MetalThreadEntry();
~MetalThreadEntry();
MPSImage* AllocMPSImage(id<MTLDevice> dev, MPSImageDescriptor* desc);
MPSTemporaryImage* AllocTempImage(id<MTLCommandBuffer> cb, MPSImageDescriptor* desc);
runtime::metal::MetalWorkspace* metal_api{nullptr};
static MetalThreadEntry* ThreadLocal();
std::vector<MPSImage*> img_table;
}; // MetalThreadEntry
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_MPS_MPS_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/nnpack/nnpack_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file Use external nnpack library call.
*/
#ifndef TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
#define TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
#include <dmlc/thread_local.h>
#include <nnpack.h>
#include <tvm/runtime/data_type.h>
#include <tvm/runtime/logging.h>
#include <tvm/runtime/registry.h>
namespace tvm {
namespace contrib {
using namespace runtime;
struct NNPackThreadLocalEntry {
pthreadpool_t threadpool{nullptr};
static NNPackThreadLocalEntry* ThreadLocal();
};
bool NNPackConfig(uint64_t nthreads);
} // namespace contrib
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_NNPACK_NNPACK_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tachikoma/tachikoma_kernel.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/tachikoma/tachikoma_kernel.h
* \brief Use external tachikoma library kernels.
*/
#ifndef TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_KERNEL_H_
#define TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_KERNEL_H_
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/logging.h>
#include <tvm/runtime/registry.h>
#include <vector>
#include "dnnl.hpp"
namespace tachikoma = dnnl;
namespace tvm {
namespace runtime {
namespace contrib {
using namespace tachikoma;
extern "C" TVM_DLL void tachikoma_conv2d(float* data, float* weights, float* out, int p_N_, int p_C_,
int p_H_, int p_W_, int p_O_, int p_G_, int p_Ph0_, int p_Pw0_,
int p_Ph1_, int p_Pw1_, int p_Kh_, int p_Kw_, int p_Sh_,
int p_Sw_);
extern "C" TVM_DLL void tachikoma_fused_conv2d_relu(float* data, float* weights, float* out, int p_N_,
int p_C_, int p_H_, int p_W_, int p_O_, int p_G_,
int p_Ph0_, int p_Pw0_, int p_Ph1_, int p_Pw1_,
int p_Kh_, int p_Kw_, int p_Sh_, int p_Sw_);
extern "C" TVM_DLL void tachikoma_fused_conv2d_bias_relu(float* data, float* weights, float* bias,
float* out, int p_N_, int p_C_, int p_H_,
int p_W_, int p_O_, int p_G_, int p_Ph0_,
int p_Pw0_, int p_Ph1_, int p_Pw1_, int p_Kh_,
int p_Kw_, int p_Sh_, int p_Sw_);
extern "C" TVM_DLL void tachikoma_dense(float* data, float* weight, float* out, int p_B_, int p_I_,
int p_O_);
extern "C" TVM_DLL void tachikoma_relu(float* data, float* out, std::vector<int64_t> shape);
extern "C" TVM_DLL void tachikoma_bn(float* data, float* gamma, float* beta, float* mean,
float* variance, float* out, float* new_mean, float* new_variance,
int p_n_, int p_c_, int p_h_, int p_w_, int p_e_);
extern "C" TVM_DLL void tachikoma_binary_op(float* data, float* weight, float* out, int binary_algo,
std::vector<int64_t> shape);
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_KERNEL_H_ | https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tachikoma/tachikoma_tensor_requisite.h |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/tachikoma/tachikoma_tensor_requisite.cc
* \brief Helper TR wrapper to simplify tensors processing
*/
#ifndef TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_TENSOR_REQUISITE_H_
#define TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_TENSOR_REQUISITE_H_
#include <dlpack/dlpack.h>
#include <algorithm>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
#include <utility>
#include <vector>
// TODO(@liaopeiyuan): Have to mute warning from tachikoma headers.
// -Wzero-as-null-pointer-constant and -Wdocumentation-unknown-command
#include <dnnl.hpp>
namespace tachikoma = dnnl;
#include "tachikoma_utils.h"
namespace tvm {
namespace runtime {
namespace contrib {
using namespace utils;
/*!
* \brief Helper object to simplify tensor transformation description.
*
* Allow to specify original source tensor and future actions which should be applied to it.
* Can be treated as sequence of reordering or reinterpretation of original source tensor.
* Finally TR can be solved as proper interpretation of source memory buffer, or sequence of
* tachikoma::reorder operators which will provide desired data.
*
* \note Empty TR object allow any manipulation. Empty TR will be returned.
*
* \sa TensorRegistry
*
* Example:
* \code
* tachikoma::memory src_mem = ...; // 5D tensor, shape {5, 2, 128, 128, 8}
*
* // Construct TR
* auto tr = TensorRequisite.AsIs(src_mem, eid); // 5D
*
* // describe sequence of layout transformation
* tr = tr.TreatAs("ABCD8b"); // 4D
* tr = tr.Permute({0, 2, 3, 1}); // Permute axes NCHW -> NHWC
* tr = tr.Crop({1, 128, 128, 16}, {0, 0, 0}); // extract first batch element
* tr = tr.Squeeze(); // 1D
*
* // register TR
* TensorRegistry t_reg;
* auto t_id = t_reg.register(tr);
*
* // Get final tachikoma::memory object
* auto solver = t_reg.MakeSolver(ext_tensor_provider);
* auto mem = solver(t_id);
* \endcode
*
*/
class TensorRequisite {
public:
using Tid = uint32_t;
static constexpr Tid kUndefinedTid = std::numeric_limits<uint32_t>::max() - 1;
/*! \brief Empty constructor */
TensorRequisite() {}
/*! \brief Construct TR on top of existing memory object */
static TensorRequisite AsIs(const tachikoma::memory& mem, Tid id = kUndefinedTid) {
auto res = AsIs(mem.get_desc(), id);
if (mem.get_data_handle() != nullptr) res.mem_ = mem;
return res;
}
/*! \brief Construct TR on top of existing memory descriptor object */
static TensorRequisite AsIs(const tachikoma::memory::desc& desc, Tid id = kUndefinedTid) {
return {desc, {}, false, {}, id, false};
}
/*! \brief return logical shape of tensor */
tachikoma::memory::dims dims() const { return t_desc_.dims(); }
/*! \brief return data type of tensor */
tachikoma::memory::data_type data_type() const { return t_desc_.data_type(); }
/*! \brief return tensor desc */
tachikoma::memory::desc desc() const { return t_desc_; }
/*! \brief Make TR with backward dataflow */
TensorRequisite Backward() const {
if (!defined()) return *this;
ICHECK(orig_ == nullptr);
return {t_desc_, orig_, reinterpret_, mem_, eid_, true};
}
/*! \brief Produce TR with permuted axes */
TensorRequisite Permute(const std::vector<int>& permutation) const {
if (!defined()) return *this; // nothing for empty TR
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.permute_axes(permutation);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with reinterpret data of original tr */
TensorRequisite Reshape(const tachikoma::memory::dims& shape) const {
if (!defined()) return *this; // nothing for empty TR
if (t_desc_.dims() == shape) return *this;
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.reshape(shape);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with broadcasted values */
TensorRequisite Broadcast(const tachikoma::memory::dims& shape) const {
if (!defined()) return *this; // nothing for empty TR
if (t_desc_.dims() == shape) return *this;
ICHECK(!reverse_data_flow_);
auto orig = std::make_shared<TensorRequisite>(*this);
// numpy like broadcast
auto extended_dims = t_desc_.dims();
auto one_filled = tachikoma::memory::dims(shape.size() - extended_dims.size(), 1);
extended_dims.insert(extended_dims.begin(), one_filled.begin(), one_filled.end());
auto desc = t_desc_.reshape(extended_dims);
for (size_t i = 0; i < extended_dims.size(); i++) {
if (extended_dims[i] == shape[i]) continue;
ICHECK(extended_dims[i] == 1);
ICHECK(desc.data.dims[i] == desc.data.padded_dims[i]);
desc.data.dims[i] = shape[i];
desc.data.padded_dims[i] = shape[i];
desc.data.format_desc.blocking.strides[i] = 0;
}
// reinterpret memory buffer with new strides
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with sub memory view (ROI) */
TensorRequisite Crop(const tachikoma::memory::dims& shape, const tachikoma::memory::dims& offset) const {
if (!defined()) return *this; // nothing for empty TR
ICHECK_EQ(shape.size(), t_desc_.dims().size());
ICHECK_EQ(offset.size(), t_desc_.dims().size());
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.submemory_desc(shape, offset, /*allow_empty=*/true);
// Originally Tachikoma implementation is very limited. Let's slightly enhance it.
if (!desc && t_desc_.data.format_kind == dnnl_blocked) {
bool offset_is_zero =
std::all_of(offset.begin(), offset.end(), [](auto el) { return el == 0; });
tachikoma::memory::dims block_sizes(t_desc_.dims().size(), 1);
for (int i = 0; i < t_desc_.data.format_desc.blocking.inner_nblks; i++)
block_sizes[t_desc_.data.format_desc.blocking.inner_idxs[i]] *=
t_desc_.data.format_desc.blocking.inner_blks[i];
bool shape_reduction_less_than_block = true;
for (int i = 0; i < t_desc_.data.ndims; i++) {
shape_reduction_less_than_block &= t_desc_.data.dims[i] - shape[i] < block_sizes[i];
}
// This is auto padded case. Just update dims value.
if (offset_is_zero && shape_reduction_less_than_block) {
desc = t_desc_;
std::copy(shape.begin(), shape.end(), desc.data.dims);
}
}
ICHECK(desc);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with squeeze shape */
TensorRequisite Squeeze(const tachikoma::memory::dims& dims_to_squeeze = {}) const {
if (!defined()) return *this; // nothing for empty TR
tachikoma::memory::dims squeezed_dims;
if (dims_to_squeeze.empty()) {
for (auto d : t_desc_.dims())
if (d != 1) squeezed_dims.push_back(d);
} else {
for (size_t i = 0; i < t_desc_.dims().size(); i++)
if (std::find(dims_to_squeeze.begin(), dims_to_squeeze.end(), i) == dims_to_squeeze.end())
squeezed_dims.push_back(t_desc_.dims()[i]);
}
if (squeezed_dims.empty()) squeezed_dims = {1};
auto orig = std::make_shared<TensorRequisite>(*this);
// reinterpret memory buffer with new strides
auto desc = t_desc_.reshape(squeezed_dims);
return {desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Produce TR with specified layout descriptor */
TensorRequisite RequestLayout(tachikoma::memory::desc desc) const {
if (!defined()) return *this; // nothing for empty TR
// If it's the same desc just return self
if (desc == t_desc_) return *this;
ICHECK(t_desc_.dims() == desc.dims()) << "Requested layout is not compatible with "
"presented shape";
auto orig = std::make_shared<TensorRequisite>(*this);
return {desc, orig, false, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Define which logical dims ordering is default for particular layout string. */
static std::string DefaultLogicLayoutFor(const std::string& layout) {
// Rank is all non digit marked dims
auto it = layout.begin();
while (it != layout.end() && !std::isdigit(*it)) it++;
int rank = std::distance(layout.begin(), it);
static const std::vector<std::string> sparse_dims = {"W", "HW", "DHW"};
if (layout.find("N") != std::string::npos) return "NC" + sparse_dims[rank - 3];
if (layout.find("G") != std::string::npos) return "GOI" + sparse_dims[rank - 4];
if (layout.find("O") != std::string::npos) return "OI" + sparse_dims[rank - 3];
LOG(FATAL) << "Unknown layout " << layout << "There is no default scheme to handle it";
return {};
}
/*!
* \brief Treat TR shape as described in layout string.
*
* Blocked dimensions will be concatenated and put into proper shape position corresponding to .
* resulting_layout_logic argument. If desired logic layout was not provided it will be deduced
* automatically based on some internal heuristics.
*
* Limitation 1. Blocking dims should be dense. Dims marked with digits use natural strides.
* Limitation 2. Blocking dims are innermost. Dims marked like 8c, 4o goes after regular
* dimensions. NC8cHW4h4cD is not valid tensor in terms of Tachikoma. And cannot be
* achieved with memory reinterpretation, so data copy is required. Proper layout
* looks like NCHWD_8c4h4c, first part is outer dims, second digits marked part is
* innermost.
*/
TensorRequisite TreatAs(const std::string& layout, std::string desired_logic_layout = "") const {
if (!defined()) return *this;
if (desired_logic_layout.empty()) desired_logic_layout = DefaultLogicLayoutFor(layout);
const auto origin_dims = dims();
// split layout string to tokens {size, tag} like {16, 'C'}, {4, 'O'}
std::vector<std::pair<int, char>> layout_tokens;
for (auto it = layout.begin(); it != layout.end();) {
auto start = it;
while (std::isdigit(*it)) it++;
int blk_size = start == it ? -1 : std::stoi(std::string{start, it});
layout_tokens.push_back({blk_size, std::toupper(*it)});
it++;
}
// check applicability of layout
auto it = layout_tokens.begin();
while (it != layout_tokens.end() && it->first == -1) it++;
int rank = std::distance(layout_tokens.begin(), it);
while (it != layout_tokens.end()) {
ICHECK_NE(it->first, -1) << "Tachikoma limitation. Blocking dims should be innermost. "
<< "But received layout is " << layout;
it++;
}
ICHECK_EQ(layout_tokens.size(), origin_dims.size());
ICHECK_EQ(rank, desired_logic_layout.size()) << layout;
std::vector<std::pair<int, char>> outermost_tokens(layout_tokens.begin(),
layout_tokens.begin() + rank);
std::vector<std::pair<int, char>> innermost_tokens(layout_tokens.begin() + rank,
layout_tokens.end());
// define dim resulting dim positions
std::map<char, int> dim_position_by_tag;
for (size_t i = 0; i < desired_logic_layout.size(); i++)
dim_position_by_tag[std::toupper(desired_logic_layout[i])] = i;
// Construct resulting desc by modifying original one
tachikoma::memory::desc res_desc = t_desc_;
memset(&res_desc.data.format_desc.blocking, 0, sizeof(res_desc.data.format_desc.blocking));
std::fill(res_desc.data.dims, res_desc.data.dims + DNNL_MAX_NDIMS, 0);
std::fill(res_desc.data.padded_dims, res_desc.data.padded_dims + DNNL_MAX_NDIMS, 0);
res_desc.data.ndims = rank;
res_desc.data.format_desc.blocking.inner_nblks = innermost_tokens.size();
auto res_dims = res_desc.data.dims;
auto res_strides = res_desc.data.format_desc.blocking.strides;
auto res_inner_blks = res_desc.data.format_desc.blocking.inner_blks;
auto res_inner_idxs = res_desc.data.format_desc.blocking.inner_idxs;
std::fill(res_dims, res_dims + rank, 1);
int orig_dim_idx = 0;
for (const auto& p : outermost_tokens) {
auto tag = p.second;
auto dim_size = origin_dims[orig_dim_idx];
auto result_dim_position = dim_position_by_tag[tag];
res_dims[result_dim_position] *= dim_size;
res_strides[result_dim_position] = t_desc_.data.format_desc.blocking.strides[orig_dim_idx];
orig_dim_idx++;
}
for (const auto& p : innermost_tokens) {
auto tag = p.second;
auto dim_size = origin_dims[orig_dim_idx];
auto result_dim_position = dim_position_by_tag[tag];
ICHECK_EQ(p.first, dim_size)
<< "Blocking layout is not applicable to tensor with shape: " << origin_dims
<< ". Requested layout is " << layout;
res_dims[result_dim_position] *= dim_size;
*res_inner_blks++ = dim_size;
*res_inner_idxs++ = result_dim_position;
orig_dim_idx++;
}
// Assume tensor is dense. There is no additional padding.
std::copy(res_desc.data.dims, res_desc.data.dims + rank, res_desc.data.padded_dims);
if (t_desc_ == res_desc) return *this;
auto orig = std::make_shared<TensorRequisite>(*this);
return {res_desc, orig, true, {}, kUndefinedTid, reverse_data_flow_};
}
/*!
* \brief Produce TR with unspecified layout.
*
* Cannot be registered in TensorRegistry. Only for querying Tachikoma for preferred layouts.
*/
TensorRequisite LayoutAny() const {
auto orig = std::make_shared<TensorRequisite>(*this);
// Recreate tensor desc with layout 'any'
tachikoma::memory::desc any_desc{t_desc_.dims(), t_desc_.data_type(), tachikoma::memory::format_tag::any};
return {any_desc, orig, false, {}, kUndefinedTid, reverse_data_flow_};
}
/*! \brief Check is TR is constant. */
bool IsConstant() const {
if (orig_) return orig_->IsConstant();
return mem_.operator bool();
}
/*! \brief Check is tensor is scalar. */
bool IsScalar() const { return t_desc_.dims().size() == 1 && t_desc_.dims()[0] == 1; }
/*! \brief Return const data memory if available. */
tachikoma::memory GetConstData() const {
if (mem_) return mem_;
if (!orig_) return {};
if (auto orig_const_data = orig_->GetConstData()) {
if (reinterpret_) {
return {t_desc_, orig_const_data.get_engine(), orig_const_data.get_data_handle()};
} else {
auto eng = orig_const_data.get_engine();
auto res = tachikoma::memory{t_desc_, eng};
tachikoma::reorder(orig_const_data, res).execute(tachikoma::stream(eng), orig_const_data, res);
return res;
}
}
return {};
}
/*!
* \brief Return const data memory in form of vector.
*
* Same as GetConstData but use std::vector instead of tachikoma::memory. Works only for 1D tensor
* and scalar TRs. Useful for specification of 1D Tachikoma attributes like zero_point or
* per_channel_scale
*/
template <typename T>
std::vector<T> GetConstDataLikeVec() const {
auto const_data = GetConstData();
auto desc = const_data.get_desc();
ICHECK(desc.data_type() == utils::TachikomaDType<T>());
ICHECK(desc.dims().size() == 1 || desc.dims().size() == 3);
auto size = desc.get_size() / sizeof(T);
auto ptr = static_cast<T*>(const_data.get_data_handle());
return std::vector<T>(ptr, ptr + size);
}
/*! \brief Get value of constant scalar tensor if possible. */
template <typename T>
T GetConstScalarData() const {
ICHECK(IsConstant());
ICHECK(IsScalar());
auto const_data = GetConstData();
auto desc = const_data.get_desc();
ICHECK(desc.data_type() == utils::TachikomaDType<T>());
auto ptr = static_cast<T*>(const_data.get_data_handle());
return *ptr;
}
/*! \brief Check if tensor is not empty. */
bool defined() const { return !t_desc_.is_zero(); }
/*! \brief Same as defined */
operator bool() const { return defined(); }
/*!
* \brief Check if tensor represent a reversed data flow.
* Useful for describing output processing
*/
bool IsReversed() const { return reverse_data_flow_; }
private:
TensorRequisite(const tachikoma::memory::desc& t_desc, const std::shared_ptr<TensorRequisite>& orig,
bool reinterpret, const tachikoma::memory& const_mem, uint32_t eid,
bool reverse_data_flow)
: t_desc_(t_desc),
orig_(orig),
reinterpret_(reinterpret),
mem_(const_mem),
eid_(eid),
reverse_data_flow_(reverse_data_flow) {
if (mem_) ICHECK(!orig_ && !reverse_data_flow_ && eid_ == kUndefinedTid);
if (eid_ != kUndefinedTid) ICHECK(!orig_);
}
/* Descriptor of particular tensor */
tachikoma::memory::desc t_desc_ = {};
/* Parent TR object which is referred from this TR */
std::shared_ptr<TensorRequisite> orig_ = {};
/* Flag to specify which action should be done with orig TR, reordering or reinterpretation */
bool reinterpret_ = false;
/* Const memory object if available */
tachikoma::memory mem_ = {};
/* Entry ID of tensor if available */
uint32_t eid_ = kUndefinedTid;
/*
* Flag to describe reverse data flow case
* All operation on queue will be executed in reverse order. Actual for dst tensor description
*/
bool reverse_data_flow_ = false;
friend class TensorRegistry;
};
/*!
* \brief The registry of tensors. Implement matching of provided TRs and real memory buffers.
*
* Registration of TR performed by calling method Register(), which will return ArgId object.
* ArgId can be mapped to real memory via memory solver created by method MakeSolver().
*/
class TensorRegistry {
private:
enum ArgReqFlag {
CONST, /// < Constant tensor. ExecutionCTX independent
TMP_STORAGE, /// < Intermediate tensors. Stored inside TensorRegistry. Inaccessible outside
EXT_EID, /// < External data. Input or Output.
};
public:
struct ArgId {
TensorRegistry::ArgReqFlag flag_;
uint32_t idx_;
};
using Action = std::tuple<tachikoma::primitive, std::unordered_map<int, ArgId>>;
using ActionQue = std::vector<Action>;
using DLTensorProvider = std::function<const DLTensor*(uint32_t)>;
using MemSolver = std::function<const tachikoma::memory(ArgId)>;
TensorRegistry() = default;
TensorRegistry(const tachikoma::engine& eng, const std::set<uint32_t>& ext_io_eid)
: tmp_mem_collection_(1), ext_io_eid_(ext_io_eid), eng_(eng), stream_(eng) {}
/*!
* \brief Register TR to registry
*
* Resolution of TR may lead to introduction of intermediate memory buffers and additional
* transformation actions which should be performed before or after usage of corresponding memory
* buffer. Additional actions will be append to provided actions queue. Corresponding to
* tr.IsReversed() value actions should be executed before or after usage of resulting ArgId.
*
* \param tr tensor requisite sequence to register
* \param action resulting action queue. If TR resolution is required execution of some
* transformation actions they will be put here
* \return associated ArgId. Should be used as argument for MemSolver.
*/
ArgId Register(const TensorRequisite& tr, ActionQue* action) {
// 1) Constant tensor. Direct reference
if (auto const_data = tr.GetConstData()) {
auto idx = const_mem_collection_.size();
const_mem_collection_.push_back(const_data);
return MakeArgReq(ArgReqFlag::CONST, static_cast<uint32_t>(idx));
}
// 2) EID mapped tensor. Direct reference
if (tr.eid_ != TensorRequisite::kUndefinedTid) {
if (ext_io_eid_.count(tr.eid_) == 0) { // Not IO tensor, means it's intermediate
if (eid2idx_tmp_.count(tr.eid_)) {
auto idx = eid2idx_tmp_.at(tr.eid_);
return MakeArgReq(ArgReqFlag::TMP_STORAGE, idx);
} else {
// register himself
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(tr.t_desc_);
eid2idx_tmp_[tr.eid_] = idx;
return MakeArgReq(ArgReqFlag::TMP_STORAGE, static_cast<uint32_t>(idx));
}
} else {
auto idx = ext_mem_collection_.size();
ext_mem_collection_.push_back({tr.eid_, tr.t_desc_});
return MakeArgReq(ArgReqFlag::EXT_EID, static_cast<uint32_t>(idx));
}
}
// 3) Tensors with transform actions
if (tr.orig_) {
// recursive register of orig TR
auto orig_arg_req = Register(*tr.orig_, action);
if (tr.reinterpret_) {
return RegisterReinterpret(orig_arg_req, tr.t_desc_);
} else {
return RegisterReorder(orig_arg_req, tr.t_desc_, tr.reverse_data_flow_, action);
}
}
// 4) Scratchpad
ICHECK(!tr.orig_ && !tr.mem_ && tr.eid_ == TensorRequisite::kUndefinedTid);
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(tr.t_desc_);
tmp_mem_mapping_[idx] = 0; // zero position tmp mem object is reserved for scratchpads
auto scratchpad_size = tr.t_desc_.get_size();
auto glob_scratchpad_size = tmp_mem_collection_[0].get_size();
if (scratchpad_size > glob_scratchpad_size) {
tmp_mem_collection_[0] =
tachikoma::memory::desc({static_cast<tachikoma::memory::dim>(scratchpad_size)},
tachikoma::memory::data_type::u8, tachikoma::memory::format_tag::a);
}
return MakeArgReq(TMP_STORAGE, static_cast<uint32_t>(idx));
}
/*!
* \brief Construct memory solver for all registered TRs.
* \param ext_provider callback to resolve external IO buffers
* \return memory solver object to match ArgId to tachikoma::memory objects
*/
MemSolver MakeSolver(const DLTensorProvider& ext_provider) const {
return MemSolverImpl(eng_, ext_provider, const_mem_collection_, ext_mem_collection_,
tmp_mem_collection_, tmp_mem_mapping_);
}
private:
ArgId RegisterReinterpret(ArgId src_ar, const tachikoma::memory::desc& desc) {
switch (src_ar.flag_) {
case TMP_STORAGE: {
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(desc);
tmp_mem_mapping_[idx] = src_ar.idx_;
return MakeArgReq(TMP_STORAGE, idx);
}
case EXT_EID: {
auto ext_req = ext_mem_collection_[src_ar.idx_];
auto idx = ext_mem_collection_.size();
ext_mem_collection_.push_back({ext_req.first, desc});
return MakeArgReq(EXT_EID, idx);
}
default:
LOG(FATAL) << "Unknown case";
}
return {};
}
ArgId RegisterReorder(ArgId src_ar, const tachikoma::memory::desc& desc, bool reverse_data_flow,
ActionQue* action) {
ICHECK(src_ar.flag_ == TMP_STORAGE || src_ar.flag_ == EXT_EID);
auto src_desc = src_ar.flag_ == TMP_STORAGE ? tmp_mem_collection_[src_ar.idx_]
: ext_mem_collection_[src_ar.idx_].second;
auto idx = tmp_mem_collection_.size();
tmp_mem_collection_.push_back(desc);
auto dst_ar = MakeArgReq(TMP_STORAGE, idx);
// reorder action submit
if (reverse_data_flow) {
auto reorder_pd = tachikoma::reorder::primitive_desc(eng_, desc, eng_, src_desc);
action->insert(action->begin(),
{tachikoma::reorder(reorder_pd), {{DNNL_ARG_FROM, dst_ar}, {DNNL_ARG_TO, src_ar}}});
} else {
auto reorder_pd = tachikoma::reorder::primitive_desc(eng_, src_desc, eng_, desc);
action->push_back(
{tachikoma::reorder(reorder_pd), {{DNNL_ARG_FROM, src_ar}, {DNNL_ARG_TO, dst_ar}}});
}
return dst_ar;
}
/*! \brief Implementation of memory solver */
class MemSolverImpl {
public:
MemSolverImpl(const tachikoma::engine& eng, const DLTensorProvider& ext_data_provider,
const std::vector<tachikoma::memory>& const_mems,
const std::vector<std::pair<uint32_t, tachikoma::memory::desc>>& ext_mems,
const std::vector<tachikoma::memory::desc>& tmp_mem_descs,
const std::map<size_t, size_t>& tmp_mem_mapping)
: eng_(eng),
ext_data_provider_(ext_data_provider),
const_mems_(const_mems),
ext_mems_(ext_mems) {
// Construct temp memory objects on the fly. While we have no scratchpads
// support on VM/GraphExecutor level.
tmp_mems_.resize(tmp_mem_descs.size());
for (size_t i = 0; i < tmp_mem_descs.size(); i++) {
auto found = tmp_mem_mapping.find(i);
if (found != tmp_mem_mapping.end()) {
auto reuse_hdl = tmp_mems_[found->second].get_data_handle();
tmp_mems_[i] = tachikoma::memory(tmp_mem_descs[i], eng_, reuse_hdl);
} else {
tmp_mems_[i] = tachikoma::memory(tmp_mem_descs[i], eng_);
}
}
}
/*! \brief Find memory object associated with provided ArgId */
tachikoma::memory operator()(const ArgId& ar) const {
switch (ar.flag_) {
case CONST:
return const_mems_.at(ar.idx_);
case TMP_STORAGE:
return tmp_mems_.at(ar.idx_);
case EXT_EID: {
auto eid_and_desc = ext_mems_.at(ar.idx_);
auto eid = eid_and_desc.first;
auto desc = eid_and_desc.second;
auto ext_dl_tensor = ext_data_provider_(eid);
ICHECK(ext_dl_tensor->data);
return tachikoma::memory{desc, eng_, ext_dl_tensor->data};
}
}
return {};
}
private:
const tachikoma::engine& eng_;
const DLTensorProvider& ext_data_provider_;
const std::vector<tachikoma::memory>& const_mems_;
const std::vector<std::pair<uint32_t, tachikoma::memory::desc>>& ext_mems_;
std::vector<tachikoma::memory> tmp_mems_;
};
ArgId MakeArgReq(ArgReqFlag flag, uint32_t idx) { return {flag, idx}; }
/* Collection of const memory objects. */
std::vector<tachikoma::memory> const_mem_collection_;
/* Collection of intermediate memory descriptors. Zero position is reserved for scratchpads. */
std::vector<tachikoma::memory::desc> tmp_mem_collection_;
/* Mapping of some temp buffer on previously registered. */
std::map<size_t, size_t> tmp_mem_mapping_;
/* Collection of external_intermediate memory objects.
* first - eid of external buffer to ask
* second - t_desc describes how to treat external buffer */
std::vector<std::pair<uint32_t, tachikoma::memory::desc>> ext_mem_collection_;
/* Map of eid to index of temp buffer in tmp_mem_collection_ */
std::unordered_map<uint32_t, size_t> eid2idx_tmp_;
/* List of external eid */
std::set<uint32_t> ext_io_eid_;
/* Engine of all tensors existing in this registry */
tachikoma::engine eng_;
/* Execution stream use to reorder const data */
tachikoma::stream stream_;
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_TENSOR_REQUISITE_H_ | https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tachikoma/tachikoma_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/tachikoma/tachikoma_utils.cc
* \brief Some Tachikoma specific utility functions
*/
#ifndef TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_UTILS_H_
#define TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_UTILS_H_
#include <cstdint>
#include <ostream>
#include <string>
#include <vector>
// TODO(@liaopeiyuan): Have to mute warning from tachikoma headers.
// -Wzero-as-null-pointer-constant and -Wdocumentation-unknown-command
#include <dnnl.hpp>
namespace tachikoma = dnnl;
#include "tvm/runtime/data_type.h"
namespace tvm {
namespace runtime {
namespace contrib {
/*!
* \brief Convert a DLPack data type to a Tachikoma data type.
* \param dltype The DLPack data type.
* \return The corresponding Tachikoma data type.
*/
tachikoma::memory::data_type dtype_dl2tachikoma(DLDataType dltype);
/*!
* \brief Converter TVM shape to Tachikoma dims
* \param shape tvm shape
* \return dims in terms of tachikoma
*/
tachikoma::memory::dims shape_dl2tachikoma(const std::vector<int64_t>& shape);
/*!
* \brief Construct plain tensor descriptor
* \param shape provided shape
* \param dltype provided data type
* \return resulting plain tensor desc
*/
tachikoma::memory::desc MakePlainDesc(const std::vector<int64_t>& shape, DLDataType dltype);
namespace utils {
/*! \brief Pretty printer util for shape */
inline std::ostream& operator<<(std::ostream& o, const tachikoma::memory::dims& dims) {
o << "[";
auto d = dims.begin();
if (d != dims.end()) o << *d++;
while (d != dims.end()) o << "," << *d++;
o << "]";
return o;
}
/*! \brief Pretty printer util for data type */
inline std::ostream& operator<<(std::ostream& o, const tachikoma::memory::data_type& type) {
std::string name = "undef";
switch (type) {
case tachikoma::memory::data_type::undef:
name = "undef";
break;
case tachikoma::memory::data_type::f32:
name = "fp32";
break;
case tachikoma::memory::data_type::f16:
name = "fp16";
break;
case tachikoma::memory::data_type::bf16:
name = "bf16";
break;
case tachikoma::memory::data_type::s32:
name = "i32";
break;
case tachikoma::memory::data_type::s8:
name = "i8";
break;
case tachikoma::memory::data_type::u8:
name = "u8";
break;
}
o << name;
return o;
}
/*! \brief Converter data type template arg to runtime object */
template <typename T>
inline tachikoma::memory::data_type TachikomaDType();
template <>
inline tachikoma::memory::data_type TachikomaDType<int>() {
return tachikoma::memory::data_type::s32;
}
template <>
inline tachikoma::memory::data_type TachikomaDType<float>() {
return tachikoma::memory::data_type::f32;
}
template <>
inline tachikoma::memory::data_type TachikomaDType<uint8_t>() {
return tachikoma::memory::data_type::u8;
}
template <>
inline tachikoma::memory::data_type TachikomaDType<int8_t>() {
return tachikoma::memory::data_type::s8;
}
} // namespace utils
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TACHIKOMA_TACHIKOMA_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tensorrt/tensorrt_builder.h | /* * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime/contrib/tensorrt/tensorrt_builder.h
* \brief The TensorRTBuilder class can be used to convert a JSONRuntime graph into a TRT engine
* which can be used for inference.
*/
#ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_BUILDER_H_
#define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_BUILDER_H_
#include <tvm/runtime/ndarray.h>
#include <string>
#include <unordered_map>
#include <vector>
#include "../json/json_node.h"
#include "NvInfer.h"
#include "tensorrt_logger.h"
#include "tensorrt_ops.h"
namespace tvm {
namespace runtime {
namespace contrib {
using JSONGraphNode = tvm::runtime::json::JSONGraphNode;
using JSONGraphNodeEntry = tvm::runtime::json::JSONGraphNodeEntry;
/*!
* \brief The product of TensorRTBuilder which provides everything needed to
* perform inference.
*/
struct TensorRTEngineAndContext {
nvinfer1::ICudaEngine* engine = nullptr;
nvinfer1::IExecutionContext* context = nullptr;
std::vector<std::string> inputs;
std::vector<std::string> outputs;
};
/*!
* \brief Converts a JSONRuntime graph into a TensorRT engine and execution context. Inputs,
* constants, layers, and outputs can be added to construct the TensorRT network definition.
* BuildEngine() will then use the network definition to build the TensorRT engine and context which
* can be used to run inference - this phase can take a long time because TensorRT will query the
* performance of all available kernels and fusions to optimize the engine.
*/
class TensorRTBuilder {
public:
/*!
* \brief Create TensorRT builder.
* \param logger TensorRT logger to use for errors and warnings.
* \param max_workspace_size Workspace size parameter for TensorRT engine build phase.
* \param use_implicit_batch Whether to use implicit batch mode (default)
* \param use_fp16 Whether to automatically convert a model to fp16
* \param batch_size If use_implicit_batch,
*/
TensorRTBuilder(TensorRTLogger* logger, const std::vector<const DLTensor*>& data_entry,
size_t max_workspace_size, bool use_implicit_batch, bool use_fp16, int batch_size,
nvinfer1::IInt8Calibrator* calibrator = nullptr);
/*!
* \brief Add TensorRT input(s) for input node in network definition.
* \param nid The input node id.
* \param entry_id The index into data_entry_ for first entry in node.
* \param node The input node.
*/
void AddInput(int nid, uint32_t entry_id, const JSONGraphNode& node);
/*!
* \brief Add TensorRT weight for input constant in network definition.
* \param nid The input node id.
* \param node The data tensor on CPU.
*/
void AddConstant(int nid, const DLTensor* data);
/*!
* \brief Add TensorRT layer for op node in network definition.
* \param nid The input node id.
* \param node The op node.
*/
void AddLayer(int nid, const JSONGraphNode& node);
/*!
* \brief Mark TensorRT output in network definition.
* \param entry The output node entry.
* \param entry_id The output node entry id.
*/
void AddOutput(const JSONGraphNodeEntry& entry, uint32_t entry_id);
/*!
* \brief Takes network definition and "compiles" a TensorRT engine which can be used for
* inference. This step is time confusing.
* \return TRT engine, context, and input/output information.
*/
TensorRTEngineAndContext BuildEngine();
private:
/*! \brief Convert a DLTensor to a TensorRT weight. */
nvinfer1::Weights GetDLTensorAsWeights(const DLTensor* dptr, DLDeviceType src_device);
/*! \brief Convert an input to a Tensor if it is a Weight */
nvinfer1::ITensor* GetInputAsTensor(const TensorRTOpInput& input);
/*! \brief Clean up resources used to create engine. */
void CleanUp();
/*! \brief Maps a node to its outputs. */
std::unordered_map<int, std::vector<TensorRTOpInput>> node_output_map_;
/*! \brief TensorRT builder. */
nvinfer1::IBuilder* builder_ = nullptr;
#if TRT_VERSION_GE(6, 0, 1)
/*! \brief TensorRT builder config. */
nvinfer1::IBuilderConfig* config_ = nullptr;
#endif
/*! \brief TensorRT network definition. */
nvinfer1::INetworkDefinition* network_ = nullptr;
/*! \brief List of all weights held in memory. */
std::vector<nvinfer1::Weights> trt_weights_;
/*! \brief Input and output tensors from TVM. */
const std::vector<const DLTensor*>& data_entry_;
/*! \brief Map TensorRT binding name to index in data_entry_. */
std::unordered_map<std::string, uint32_t> entry_id_map_;
/*! \brief Max workspace size in bytes for TRT. */
size_t max_workspace_size_;
/*! \brief Whether to use implicit batch mode. */
bool use_implicit_batch_;
/*! \brief Whether to automatically convert model to 16-bit floating point precision. */
bool use_fp16_;
/*! \brief whether to automatically convert model to int8 precision */
bool use_int8_;
/*! \brief Batch size to optimize for. */
int batch_size_;
/*! \brief Input names. */
std::vector<std::string> network_input_names_;
/*! \brief Output names. */
std::vector<std::string> network_output_names_;
/*! \brief calibrator pointer to add batch data when using int8 mode */
/*! \brief pointer will be nullptr when it is fp16 or fp32 precision */
nvinfer1::IInt8Calibrator* calibrator_;
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_BUILDER_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tensorrt/tensorrt_calibrator.h | /* * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* file runtime/contrib/tensorrt/tensorrt_builder.h
* brief Contains TensorRTBuilder class which can be used to convert a relay
* program into a TRT engine which can be used for inference.
*/
#ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_CALIBRATOR_H_
#define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_CALIBRATOR_H_
#include <string>
#include <vector>
#include "../../cuda/cuda_common.h"
#include "NvInfer.h"
namespace tvm {
namespace runtime {
class TensorRTCalibrator : public nvinfer1::IInt8EntropyCalibrator2 {
public:
TensorRTCalibrator(int batch_size, const std::vector<std::string>& input_names)
: batch_size_(batch_size), num_batches_calibrated_(0), input_names_(input_names) {}
~TensorRTCalibrator() {
// Free calibration data
for (auto& inputs : data_) {
for (size_t i = 0; i < inputs.size(); ++i) {
delete[] inputs[i];
}
}
// Free buffers
for (size_t i = 0; i < buffers_.size(); ++i) {
CUDA_CALL(cudaFree(buffers_[i]));
}
}
void AddBatchData(const std::vector<void*>& bindings, const std::vector<size_t>& binding_sizes) {
// Copy data from GPU
std::vector<float*> data_host(bindings.size(), nullptr);
for (size_t i = 0; i < bindings.size(); ++i) {
data_host[i] = new float[batch_size_ * binding_sizes[i]];
CUDA_CALL(cudaMemcpy(static_cast<void*>(data_host[i]), bindings[i],
batch_size_ * binding_sizes[i] * sizeof(float), cudaMemcpyDeviceToHost));
}
data_.push_back(data_host);
data_sizes_.push_back(binding_sizes);
}
int getBatchSize() const noexcept override { return batch_size_; }
/*!
* \brief TensorRT will call this method to get next batch of data to
* calibrate with.
*/
bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept override {
AllocateBuffersIfNotAllocated();
CHECK_EQ(input_names_.size(), nbBindings);
for (size_t i = 0; i < input_names_.size(); ++i) {
CHECK_EQ(input_names_[i], names[i]);
CUDA_CALL(cudaMemcpy(buffers_[i], data_[num_batches_calibrated_][i],
batch_size_ * data_sizes_[num_batches_calibrated_][i] * sizeof(float),
cudaMemcpyHostToDevice));
bindings[i] = buffers_[i];
}
num_batches_calibrated_++;
// TODO(trevmorr): Free data from previous batch?
return (num_batches_calibrated_ < static_cast<int>(data_.size()));
}
const void* readCalibrationCache(size_t& length) noexcept override {
if (calibration_cache_.empty()) return nullptr;
length = calibration_cache_.size();
return calibration_cache_.data();
}
void writeCalibrationCache(const void* cache, size_t length) noexcept override {
calibration_cache_.assign(static_cast<const char*>(cache), length);
}
private:
/*! \brief Batch size. */
int batch_size_;
/*! \brief Number of batches already fed to calibrator. */
int num_batches_calibrated_;
/*! \brief Storage for calibration cache. */
std::string calibration_cache_;
/*! \brief Data to be used for calibration. */
std::vector<std::vector<float*>> data_;
/*! \brief Number of elements for data to be used for calibration. */
std::vector<std::vector<size_t>> data_sizes_;
/*! \brief Device buffers to be used for calibration. */
std::vector<void*> buffers_;
/*! \brief Names of inputs */
const std::vector<std::string> input_names_;
/*! \brief Allocate device memory buffers. data_sizes_ must already have one
* entry. */
void AllocateBuffersIfNotAllocated() {
if (!buffers_.empty()) return;
CHECK_GE(data_sizes_.size(), 1);
const int num_inputs = data_sizes_[0].size();
buffers_.assign(num_inputs, nullptr);
for (int i = 0; i < num_inputs; ++i) {
CUDA_CALL(cudaMalloc(&buffers_[i], data_sizes_[0][i] * sizeof(float)));
}
}
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_CALIBRATOR_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tensorrt/tensorrt_logger.h | /* * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime/contrib/tensorrt/tensorrt_logger.h
* \brief Contains TensorRTLogger class which is required by TRT and used to
* print info, warnings, and errors.
*/
#ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_LOGGER_H_
#define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_LOGGER_H_
#include <tvm/runtime/logging.h>
#include "NvInfer.h"
#include "tensorrt_utils.h"
namespace tvm {
namespace runtime {
namespace contrib {
/*! \brief Logger for TensorRT info/warning/errors. */
class TensorRTLogger : public nvinfer1::ILogger {
public:
TensorRTLogger() : TensorRTLogger(Severity::kWARNING) {}
explicit TensorRTLogger(Severity severity) : reportable_severity(severity) {}
void log(Severity severity, const char* msg) noexcept override {
// suppress messages with severity enum value greater than the reportable
if (severity > reportable_severity) return;
switch (severity) {
case Severity::kINTERNAL_ERROR:
LOG(ERROR) << "INTERNAL_ERROR: " << msg;
break;
case Severity::kERROR:
LOG(ERROR) << "ERROR: " << msg;
break;
case Severity::kWARNING:
LOG(WARNING) << "WARNING: " << msg;
break;
case Severity::kINFO:
LOG(INFO) << "INFO: " << msg;
break;
#if TRT_VERSION_GE(5, 1, 5)
case Severity::kVERBOSE:
DLOG(INFO) << "VERBOSE: " << msg;
break;
#endif
default:
LOG(INFO) << "UNKNOWN: " << msg;
break;
}
}
private:
Severity reportable_severity{Severity::kWARNING};
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_LOGGER_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tensorrt/tensorrt_ops.h | /* * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime/contrib/tensorrt/tensorrt_ops.h
* \brief Converters from Relay ops into TensorRT layers. Converters should
* inherit from TensorRTOpConverter and implement the Convert() method.
*/
#ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_OPS_H_
#define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_OPS_H_
#include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "../json/json_node.h"
#include "NvInfer.h"
#include "tensorrt_utils.h"
#if TRT_VERSION_GE(6, 0, 1)
#define TRT_HAS_IMPLICIT_BATCH(params) (params->network->hasImplicitBatchDimension())
#else
#define TRT_HAS_IMPLICIT_BATCH(params) (true)
#endif
namespace tvm {
namespace runtime {
namespace contrib {
using JSONGraphNode = tvm::runtime::json::JSONGraphNode;
/*!
* \brief An input to a op may be either kTensor in the case of nvinfer::ITensor*,
* a kWeight for nvinfer1::Weights, or ignored (eg for the nn.pad value).
*/
enum TensorRTInputType { kTensor, kWeight, kIgnored };
/*!
* \brief An input to a TensorRTOpConverter. The type of the input is either kTensor
* or kWeight. For kTensor, "tensor" contains the input tensor. For kWeight,
* "weight" contains the input weight and "weight_shape" contains the shape.
*/
struct TensorRTOpInput {
/*! \brief If type is kTensor, will store input tensor. */
nvinfer1::ITensor* tensor;
/*! \brief If type is kWeight, will store input weight. */
nvinfer1::Weights weight;
/*! \brief Whether the input is in tensor or weight. */
TensorRTInputType type;
/*! \brief If type is kWeight, will store weight shape. */
std::vector<int> weight_shape;
explicit TensorRTOpInput(nvinfer1::ITensor* tensor)
: tensor(tensor), weight({tensor->getType(), nullptr, 0}), type(kTensor) {}
TensorRTOpInput(nvinfer1::Weights weight, const std::vector<int>& shape)
: tensor(nullptr), weight(weight), type(kWeight), weight_shape(shape) {}
};
/*! \brief Parameters to convert an Op from Relay to TensorRT. */
struct TensorRTOpConverterParams {
/*! \brief The TRT network that the new layer should be added to. */
nvinfer1::INetworkDefinition* network;
/*! \brief Index of JSON node. */
int nid;
/*! \brief The corresponding JSON node. */
const JSONGraphNode& node;
/*! \brief The type of op. */
std::string op_name;
/*! \brief Inputs to the op. */
std::vector<TensorRTOpInput> inputs;
/*! \brief Outputs of the op should be populated here during Convert(). */
std::vector<nvinfer1::ITensor*> outputs;
/*! \brief Any newly allocated weights should be stored here also. */
std::vector<nvinfer1::Weights>* trt_weights;
TensorRTOpConverterParams(nvinfer1::INetworkDefinition* network, int nid,
const JSONGraphNode& node, std::vector<nvinfer1::Weights>* trt_weights)
: network(network), nid(nid), node(node), trt_weights(trt_weights) {
op_name = node.GetOpName();
}
std::string LayerName() const { return op_name + "(" + std::to_string(nid) + ")"; }
};
/*! \brief Base class for an op converter from Relay to TRT. */
class TensorRTOpConverter {
public:
virtual ~TensorRTOpConverter() = default;
/*! \brief Operator name. */
std::string op_name;
/*! \brief Used to specify whether each input is tensor or weight. */
const std::vector<TensorRTInputType> input_types;
/*! \brief If set to true, any number of tensor inputs can be used for the op. */
const bool variable_input_count;
/*!
* \brief Converter subclasses should call this constructor to set
* input_types or variable_input_count.
* \param input_types For each input to the op, there should be a
* corresponding entry in input_types to determine whether that input should
* be a tensor or a weight. TensorRTBuilder will prepare inputs in
* TensorRTOpConverter according to this.
* \param variable_input_count If the op can have multiple inputs, set this to
* true. input_types vector will be ignored and any number of input tensors
* can be used for this op. All inputs will be tensors and not weights.
*/
TensorRTOpConverter(std::string op_name, const std::vector<TensorRTInputType>& input_types,
bool variable_input_count = false);
/*!
* \brief Convert to TRT. Implementation should use inputs and attributes
* from the CallNode to add the corresponding TRT layers to network. Outputs
* should be pushed to outputs vector.
* \param params Parameters for this op.
*/
virtual void Convert(TensorRTOpConverterParams* params) const = 0;
/*!
* \brief Helper function to reshape a tensor.
* \param params Parameters for this op.
* \param input Tensor to reshape.
* \param new_shape New shape, does not include batch dim.
* \return Reshaped tensor
*/
nvinfer1::ITensor* Reshape(TensorRTOpConverterParams* params, nvinfer1::ITensor* input,
const std::vector<int>& new_shape) const;
/*!
* \brief Helper function to transpose a tensor.
* \param params Parameters for this op.
* \param input Tensor to transpose.
* \param order New order of axes, does include batch dim.
* \return Transposed tensor
*/
nvinfer1::ITensor* Transpose(TensorRTOpConverterParams* params, nvinfer1::ITensor* input,
const std::vector<int>& order) const;
/*!
* \brief Helper function to convert an axis to TRT format.
* \param axis Axis from TVM.
* \param input_rank Rank of input, does not include batch dim.
* \return Axis in TRT format.
*/
int ConvertAxis(TensorRTOpConverterParams* params, int axis, int input_rank) const;
/*!
* \brief Create constant that is broadcastable.
* \param params Parameters for this op.
* \param value Value of scalar.
* \param broadcast_to_dims Dims that scalar should be broadcastable against.
* \return Constant tensor.
*/
nvinfer1::ITensor* CreateScalar(TensorRTOpConverterParams* params, float value,
const nvinfer1::Dims& broadcast_to_dims) const;
/*!
* \brief Get pre/post padding values from padding attributes array.
* \param padding Serialized padding from op attributes.
* \param padding_is_asymmetric True if both pre and post are needed for asymmetric padding.
* \param prepadding Prepadding value or symmetric padding values if !padding_is_asymmetric.
* \param postpadding Postpadding value if padding_is_asymmetric.
*/
void GetPadding(const std::vector<std::string>& padding, bool* use_asymmetric_padding,
nvinfer1::DimsHW* prepadding, nvinfer1::DimsHW* postpadding) const;
/*!
* \brief Get pre/post padding values from padding attributes array for volumetric ops.
* \param padding Serialized padding from op attributes.
* \param padding_is_asymmetric True if both pre and post are needed for asymmetric padding.
* \param prepadding Prepadding value or symmetric padding values if !padding_is_asymmetric.
* \param postpadding Postpadding value if padding_is_asymmetric.
*/
void GetPadding3D(const std::vector<std::string>& padding, bool* use_asymmetric_padding,
nvinfer1::Dims* prepadding, nvinfer1::Dims* postpadding) const;
};
/*!
* \brief Get the map of available TensorRTOpConverters, where the key is the name of the relay op.
* \return Map of TensorRTOpConverters.
*/
const std::unordered_map<std::string, std::unique_ptr<TensorRTOpConverter>>& GetOpConverters();
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_OPS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tensorrt/tensorrt_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime/contrib/tensorrt/utils.h
* \brief Helper functions used by TensorRTBuilder or TensorRTOpConverters.
*/
#ifndef TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_UTILS_H_
#define TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_UTILS_H_
#include <string>
#include <vector>
#include "NvInfer.h"
// There is a conflict between cpplint and clang-format-10.
// clang-format off
#define TRT_VERSION_GE(major, minor, patch) \
((NV_TENSORRT_MAJOR > major) || (NV_TENSORRT_MAJOR == major && NV_TENSORRT_MINOR > minor) || \
(NV_TENSORRT_MAJOR == major && NV_TENSORRT_MINOR == minor && NV_TENSORRT_PATCH >= patch))
// clang-format on
namespace tvm {
namespace runtime {
namespace contrib {
/*!
* \brief Helper function to convert an vector to TRT Dims.
* \param vec Vector.
* \return TRT Dims.
*/
template <typename T>
inline nvinfer1::Dims VectorToTrtDims(const std::vector<T>& vec) {
nvinfer1::Dims dims;
// Dims(nbDims=0, d[0]=1) is used to represent a scalar in TRT.
dims.d[0] = 1;
dims.nbDims = vec.size();
for (size_t i = 0; i < vec.size(); ++i) {
dims.d[i] = vec[i];
}
return dims;
}
/*!
* \brief Helper function to convert TRT Dims to vector.
* \param vec TRT Dims.
* \return Vector.
*/
inline std::vector<int> TrtDimsToVector(const nvinfer1::Dims& dims) {
return std::vector<int>(dims.d, dims.d + dims.nbDims);
}
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TENSORRT_TENSORRT_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/tflite/tflite_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief Tflite runtime that can run tflite model
* containing only tvm PackedFunc.
* \file tflite_runtime.h
*/
#ifndef TVM_RUNTIME_CONTRIB_TFLITE_TFLITE_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_TFLITE_TFLITE_RUNTIME_H_
#include <dlpack/dlpack.h>
#include <tensorflow/lite/interpreter.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
#include <memory>
#include <string>
#include <vector>
namespace tvm {
namespace runtime {
#define CHECK_TFLITE_STATUS(ret) ICHECK_EQ(ret, kTfLiteOk)
/*!
* \brief Tflite runtime.
*
* This runtime can be accessed in various language via
* TVM runtime PackedFunc API.
*/
class TFLiteRuntime : public ModuleNode {
public:
/*!
* \brief Get member function to front-end.
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self);
/*!
* \return The type key of the executor.
*/
const char* type_key() const { return "TFLiteRuntime"; }
/*!
* \brief Invoke the internal tflite interpreter and run the whole model in
* dependency order.
*/
void Invoke();
/*!
* \brief Initialize the tflite runtime with tflite model and device.
* \param tflite_model_bytes The tflite model.
* \param dev The device where the tflite model will be executed on.
*/
void Init(const std::string& tflite_model_bytes, Device dev);
/*!
* \brief set index-th input to the model.
* \param index The input index.
* \param data_in The input data.
*/
void SetInput(int index, DLTensor* data_in);
/*!
* \brief Return NDArray for given input index.
* \param index The input index.
*
* \return NDArray corresponding to given input node index.
*/
NDArray GetInput(int index) const;
/*!
* \brief Return NDArray for given output index.
* \param index The output index.
*
* \return NDArray corresponding to given output node index.
*/
NDArray GetOutput(int index) const;
/*!
* \brief Set the number of threads available to the interpreter.
* \param num_threads The number of threads to be set.
*/
void SetNumThreads(int num_threads);
// Buffer backing the interpreter's model
std::unique_ptr<char[]> flatBuffersBuffer_;
// TFLite interpreter
std::unique_ptr<tflite::Interpreter> interpreter_;
// TVM device
Device device_;
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_TFLITE_TFLITE_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/verilator/verilator_device.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/verilator/verilator_device.h
* \brief Use external verilator device.
*/
#ifndef TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_DEVICE_H_
#define TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_DEVICE_H_
#include <tvm/runtime/c_runtime_api.h>
namespace tvm {
namespace runtime {
namespace contrib {
/*! \brief Verilator device resource context */
typedef void* VerilatorHandle;
/*!
* \brief Allocate a verilator device resource handle
* \return The verilator device handle.
*/
extern "C" TVM_DLL VerilatorHandle VerilatorAlloc();
/*!
* \brief Free a verilator device handle
* \param handle The verilator device handle to be freed.
*/
extern "C" TVM_DLL void VerilatorDealloc(VerilatorHandle handle);
/*!
* \brief Read verilator register or memory
* \param handle The verilator device handle.
* \param id The register or memory identifier.
* \param addr The register or memory address (word-level).
* \return The value of register or memory.
*/
extern "C" TVM_DLL int VerilatorRead(VerilatorHandle handle, int id, int addr);
/*!
* \brief Write verilator register or memory
* \param handle The verilator device handle.
* \param id The register or memory identifier.
* \param addr The register or memory address (word-level).
* \param value The value of register or memory.
*/
extern "C" TVM_DLL void VerilatorWrite(VerilatorHandle handle, int id, int addr, int value);
/*!
* \brief Reset Verilator for n clock cycles
* \param handle The verilator device handle.
* \param n The number of reset cycles.
*/
extern "C" TVM_DLL void VerilatorReset(VerilatorHandle handle, int n);
/*!
* \brief Run Verilator for n clock cycles
* \param handle The verilator device handle.
* \param n The number of run cycles.
*/
extern "C" TVM_DLL void VerilatorRun(VerilatorHandle handle, int n);
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_DEVICE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/verilator/verilator_kernel.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/verilator/verilator_kernel.h
* \brief Use external verilator library kernels.
*/
#ifndef TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_KERNEL_H_
#define TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_KERNEL_H_
#include <tvm/runtime/c_runtime_api.h>
#include "verilator_device.h"
namespace tvm {
namespace runtime {
namespace contrib {
extern "C" TVM_DLL void verilator_add(VerilatorHandle handle, int* left, int* right, int* out,
int p_h_, int p_w_);
extern "C" TVM_DLL void verilator_bias_add(VerilatorHandle handle, int* data, int* bias, int* out,
int p_n_, int p_c_, int p_h_, int p_w_);
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_KERNEL_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/verilator/verilator_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/contrib/verilator/verilator_runtime.h
* \brief A runtime for Verilator.
*/
#ifndef TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_RUNTIME_H_
#include <dlfcn.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/registry.h>
#include <cstddef>
#include <string>
#include <vector>
#include "../../library_module.h"
#include "../json/json_node.h"
#include "../json/json_runtime.h"
#include "verilator_device.h"
#include "verilator_kernel.h"
namespace tvm {
namespace runtime {
namespace contrib {
using namespace tvm::runtime;
using namespace tvm::runtime::contrib;
using namespace tvm::runtime::json;
typedef VerilatorHandle (*VerilatorAllocFunc)();
typedef void (*VerilatorDeallocFunc)(VerilatorHandle);
typedef void (*VerilatorResetFunc)(VerilatorHandle, int);
typedef int (*VerilatorReadFunc)(VerilatorHandle, int, int);
typedef void (*VerilatorAddFunc)(VerilatorHandle, int*, int*, int*, int, int);
typedef void (*VerilatorBiasAddFunc)(VerilatorHandle, int*, int*, int*, int, int, int, int);
class VerilatorLibrary : public Library {
public:
~VerilatorLibrary();
/*! \brief load library */
void Load(const std::string& name);
/*! \brief get symbol from libray */
void* GetSymbol(const char* name) final;
private:
/*! \brief the library handle */
void* lib_handle_{nullptr};
};
class VerilatorProfiler {
public:
/*! \brief the number of cycle counter */
uint32_t cycle_counter{0};
/*! \brief clear the profiler */
void Clear();
/*! \brief get profiler data */
std::string AsJSON();
/*! \brief profiler constructor */
static VerilatorProfiler* ThreadLocal();
};
class VerilatorRuntime : public JSONRuntimeBase {
public:
VerilatorRuntime(const std::string& symbol_name, const std::string& graph_json,
const Array<String> const_names)
: JSONRuntimeBase(symbol_name, graph_json, const_names) {
VLOG(0) << "creating verilator runtime";
}
~VerilatorRuntime();
const char* type_key() const final { return "verilator"; }
/*! \brief set verilator library */
void SetLibrary(const std::string& lib_name);
/*! \brief set the number of reset cycles */
void SetResetCycles(const int cycles);
/*! \brief enable profiler */
void EnableProfiler();
/*! \brief set cycle counter register id */
void SetProfilerCycleCounterId(const int id);
/*! \brief init verilator runtime */
void Init(const Array<NDArray>& consts) override;
/*! \brief run verilator runtime */
void Run() override;
private:
/*! \brief the verilator library path */
String lib_path_;
/*! \brief the verilator device */
VerilatorHandle device_{nullptr};
/*! \brief the verilator library */
VerilatorLibrary* lib_{nullptr};
/*! \brief the verilator profiler */
VerilatorProfiler* prof_{nullptr};
/*! \brief the verilator read function */
VerilatorReadFunc read_{nullptr};
/*! \brief the verilator reset cycles */
int reset_cycles_{1};
/*! \brief the verilator profiler status */
bool prof_enable_{false};
/*! \brief the verilator profiler cycle counter id */
int prof_cycle_counter_id_{0};
};
} // namespace contrib
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_VERILATOR_VERILATOR_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/contrib/vitis_ai/vitis_ai_runtime.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief Vitis-AI runtime that can run model
* containing only tvm PackedFunc.
* \file vitis_ai_runtime.h
*/
#ifndef TVM_RUNTIME_CONTRIB_VITIS_AI_VITIS_AI_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_VITIS_AI_VITIS_AI_RUNTIME_H_
#include <dlpack/dlpack.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
// clang-format off
#include <memory>
#include <string>
#include <vector>
// clang-format on
#include <pyxir/pyxir.hpp>
#include <pyxir/runtime/run_options.hpp>
namespace tvm {
namespace runtime {
/*!
* \brief VAI runtime.
*
* This runtime can be accessed in various language via
* TVM runtime PackedFunc API.
*/
class VitisAIRuntime : public ModuleNode {
public:
/*!
* \brief Create VitisAI runtime from serialized XGraph
* \param symbol_name The name of the function.
* \param const_names The names of each constant in the sub-graph.
* \param serialized_rt_mod The serialized runtime module.
* \param export_rt_mod_path The path to the file to be used for exporting the
* PyXIR runtime module.
*/
VitisAIRuntime(const std::string& symbol_name, const Array<String> const_names,
const std::string& serialized_rt_mod, const std::string& export_rt_mod);
/*!
* \brief Create VitisAI runtime from serialized XGraph
* \param symbol_name The name of the function.
* \param xgraph_str serialized XGraph representation
* \param const_names The names of each constant in the sub-graph.
* \param dpu_target The Vitis-AI DPU target identifier (e.g. DPUCADX8G, DPUCZDX8G-zcu104).
* \param build_dir The directory to be used for Vitis-AI build files.
* \param work_dir The directory to be used for Vitis-AI work files.
* \param export_rt_mod_path The path to the file to be used for exporting the
* PyXIR runtime module.
*/
VitisAIRuntime(const std::string& symbol_name, const std::string& xgraph_str,
const Array<String> const_names, const std::string& dpu_target,
const std::string& build_dir, const std::string& work_dir,
const std::string& export_runtime_module_path);
/*!
* \brief Get member function to front-end.
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self);
/*!
* \return The type key of the executor.
*/
const char* type_key() const { return "VitisAIRuntime"; }
/*!
* \brief Serialize the content of the pyxir directory and save it to
* binary stream.
* \param stream The binary stream to save to.
*/
void SaveToBinary(dmlc::Stream* stream) final;
private:
/*! \brief The only subgraph name for this module */
std::string symbol_name_;
/*! \brief The required constant names */
Array<String> const_names_;
/*! \brief The runtime module */
pyxir::RtModHolder rt_mod_;
/*! \brief The XGraph input tensor names in the order as provided by TVM */
std::vector<std::string> in_tensor_names_;
/*! \brief The XGraph output tensor names in the order as provided by TVM */
std::vector<std::string> out_tensor_names_;
/*! \brief The file path for exporting the runtime module if set */
std::string export_rt_mod_path_;
/*! \brief Whether constant tensors have been initialized */
bool initialized_{false};
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CONTRIB_VITIS_AI_VITIS_AI_RUNTIME_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/aot_executor/aot_executor.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file aot_executor.c
* \brief implement AoT executor in C
*/
#include <inttypes.h>
#include <string.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/aot_executor.h>
#include <tvm/runtime/crt/logging.h>
#include <tvm/runtime/crt/module.h>
#include <tvm/runtime/crt/packed_func.h>
#include <tvm/runtime/crt/page_allocator.h>
static void DumpMetadata(const TVMMetadata* md) {
LOG_DEBUG("%s:\n", __FUNCTION__);
LOG_DEBUG("\tmod_name=%s\n", md->mod_name);
LOG_DEBUG("\tversion=%" PRId64 "\n", md->version);
LOG_DEBUG("\tnum_inputs=%" PRId64 "\n", md->num_inputs);
LOG_DEBUG("\tnum_outputs=%" PRId64 "\n", md->num_outputs);
LOG_DEBUG("\tnum_workspace_pools=%" PRId64 "\n", md->num_workspace_pools);
LOG_DEBUG("\tnum_constant_pools=%" PRId64 "\n", md->num_constant_pools);
int i;
for (i = 0; i < md->num_inputs; ++i) {
LOG_DEBUG("\tinput[%d]: %s\n", i, md->inputs[i].name);
}
for (i = 0; i < md->num_outputs; ++i) {
LOG_DEBUG("\toutput[%d]: %s\n", i, md->outputs[i].name);
}
for (i = 0; i < md->num_workspace_pools; ++i) {
LOG_DEBUG("\tworkspace_pools[%d]: %s\n", i, md->workspace_pools[i].name);
}
for (i = 0; i < md->num_constant_pools; ++i) {
LOG_DEBUG("\tconstant_pools[%d]: %s\n", i, md->constant_pools[i].name_hint);
}
}
int TVMAotExecutor_GetNumInputs(TVMAotExecutor* executor) { return executor->metadata->num_inputs; }
int TVMAotExecutor_GetNumOutputs(TVMAotExecutor* executor) {
return executor->metadata->num_outputs;
}
int TVMAotExecutor_GetInputIndex(TVMAotExecutor* executor, const char* name) {
int i;
int rv = -1;
const TVMMetadata* md = executor->metadata;
for (i = 0; i < md->num_inputs; ++i) {
if (!strcmp(md->inputs[i].name, name)) {
rv = i;
break;
}
}
CHECK_GE(rv, 0, "cannot find '%s' among input.", name);
return rv;
}
int TVMAotExecutor_Run(TVMAotExecutor* executor) {
const char* tvm_main_suffix = "___tvm_main__";
char tvm_main_name[TVM_CRT_MAX_STRLEN_FUNCTION_NAME];
{
const size_t max_strlen = TVM_CRT_MAX_STRLEN_FUNCTION_NAME;
size_t len = strnlen(executor->metadata->mod_name, max_strlen);
len += strnlen(tvm_main_suffix, max_strlen);
CHECK_LT(len, max_strlen, "tvm_main name too long %zu\n", len);
}
// create main function name string, e.g. "tvmgen_default___tvm_main__"
snprintf(tvm_main_name, sizeof(tvm_main_name), "%s%s", executor->metadata->mod_name,
tvm_main_suffix);
TVMPackedFunc tvm_main;
TVMArgs temp_args;
CHECK_LE(executor->num_args, TVM_CRT_MAX_ARGS, "too many args %" PRId64 "\n", executor->num_args);
int i;
for (i = 0; i < executor->num_args; ++i) {
temp_args.values[i].v_handle = &executor->args[i].dl_tensor;
temp_args.tcodes[i] = kTVMDLTensorHandle;
}
temp_args.values_count = executor->num_args;
int status =
TVMPackedFunc_InitModuleFunc(&tvm_main, executor->module_handle, tvm_main_name, &temp_args);
if (status != 0) {
return status;
}
CHECK_EQ(tvm_main.Call(&tvm_main), 0, "call to %s failed", tvm_main_name);
return 0;
}
int TVMAotExecutor_Init(TVMAotExecutor* executor, TVMModuleHandle module_handle,
const DLDevice device, const char* module_name) {
executor->module_handle = module_handle;
executor->device = device;
// get a pointer to the PackedFunc get_c_metadata() which gives us access to the top-level
// metadata structure
TVMPackedFunc get_c_metadata;
TVMArgs temp_args;
temp_args.values_count = 0;
const char* tvmgen_prefix = "tvmgen_";
const char* get_c_metdata_suffix = "_get_c_metadata";
char get_c_metdata_name[TVM_CRT_MAX_STRLEN_FUNCTION_NAME];
{
size_t max_strlen = TVM_CRT_MAX_STRLEN_FUNCTION_NAME;
size_t len = strnlen(tvmgen_prefix, max_strlen);
len += strnlen(module_name, max_strlen);
len += strnlen(get_c_metdata_suffix, max_strlen);
CHECK_LT(len, max_strlen, "get_c_metadata name too long %zu\n", len);
}
// create get_c_metadata() function name string, e.g. "tvmgen_default_get_c_metadata()"
snprintf(get_c_metdata_name, sizeof(get_c_metdata_name), "%s%s%s", tvmgen_prefix, module_name,
get_c_metdata_suffix);
int status = TVMPackedFunc_InitModuleFunc(&get_c_metadata, executor->module_handle,
get_c_metdata_name, &temp_args);
if (status != 0) {
return status;
}
CHECK_EQ(get_c_metadata.Call(&get_c_metadata), 0, "get_c_metadata");
// save the returned pointer to the top-level metadata
executor->metadata = (TVMMetadata*)get_c_metadata.ret_value.values[0].v_handle;
const TVMMetadata* md = executor->metadata;
DumpMetadata(md);
executor->num_args = md->num_inputs + md->num_outputs + md->num_workspace_pools;
tvm_crt_error_t err = TVMPlatformMemoryAllocate(executor->num_args * sizeof(*executor->args),
executor->device, (void**)(&executor->args));
if (err != kTvmErrorNoError) {
return -1;
}
int i;
int arg_idx = 0;
for (i = 0; i < md->num_inputs; ++i) {
LOG_DEBUG("input allocate[%d]: %s\n", i, md->inputs[i].name);
TVMNDArray* array = &executor->args[arg_idx++];
status = TVMNDArray_Empty(md->inputs[i].num_shape, md->inputs[i].shape, md->inputs[i].dtype,
executor->device, array);
if (status != 0) {
return status;
}
TVMNDArray_IncrementReference(array);
}
for (i = 0; i < md->num_outputs; ++i) {
LOG_DEBUG("output allocate[%d]: %s\n", i, md->outputs[i].name);
TVMNDArray* array = &executor->args[arg_idx++];
status = TVMNDArray_Empty(md->outputs[i].num_shape, md->outputs[i].shape, md->outputs[i].dtype,
executor->device, array);
if (status != 0) {
return status;
}
TVMNDArray_IncrementReference(array);
}
for (i = 0; i < md->num_workspace_pools; ++i) {
LOG_DEBUG("pools allocate[%d]: %s\n", i, md->workspace_pools[i].name);
status = TVMNDArray_Empty(md->workspace_pools[i].num_shape, md->workspace_pools[i].shape,
md->workspace_pools[i].dtype, executor->device,
&executor->args[arg_idx++]);
if (status != 0) {
return status;
}
}
CHECK_EQ(0, md->num_constant_pools, "Constant pools not supported");
return status;
}
int TVMAotExecutor_Create(TVMModuleHandle module_handle, const DLDevice device,
TVMAotExecutor** executor, const char* module_name) {
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(**executor), device, (void**)executor);
if (err != kTvmErrorNoError) {
return -1;
}
memset(*executor, 0, sizeof(**executor));
return TVMAotExecutor_Init(*executor, module_handle, device, module_name);
}
int TVMAotExecutor_Release(TVMAotExecutor* executor, const DLDevice device) {
int status;
if (executor->num_args > 0) {
// free TVMNDArray data memory for each each argument
int i;
for (i = 0; i < executor->num_args; ++i) {
status = TVMNDArray_Release(&executor->args[i]);
if (status != 0) {
return status;
}
}
// free TVMNDArray argument list
status = TVMPlatformMemoryFree(executor->args, executor->device);
if (status != 0) {
return status;
}
}
status = TVMPlatformMemoryFree(executor, device);
if (status != 0) {
return status;
}
return 0;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/aot_executor_module/aot_executor_module.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file aot_executor_module.c
* \brief wrap aot_executor into a TVMModule for use with RPC.
*/
#include <stdio.h>
#include <tvm/runtime/crt/aot_executor.h>
#include <tvm/runtime/crt/aot_executor_module.h>
#include <tvm/runtime/crt/func_registry.h>
#include <tvm/runtime/crt/module.h>
typedef struct {
TVMModule mod;
TVMAotExecutor* executor;
} AotExecutorModule;
static AotExecutorModule aot_executor;
int32_t TVMAotExecutorModule_Create(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
if (aot_executor.executor != NULL) {
return kTvmErrorExecutorModuleAlreadyCreated;
}
if (nargs != 3) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMModuleHandle || tcodes[1] != kDLDevice || tcodes[2] != kTVMStr) {
return kTvmErrorFunctionCallWrongArgType;
}
DLDevice dev = args[1].v_device;
if (dev.device_type != kDLCPU) {
return kTvmErrorExecutorModuleBadContext;
}
TVMAotExecutor_Create(args[0].v_handle, dev, &aot_executor.executor, args[2].v_str);
TVMModuleHandle out_mod;
int status = TVMModCreateFromCModule(&aot_executor.mod, &out_mod);
if (status != 0) {
ret_tcodes[0] = kTVMNullptr;
TVMAotExecutor_Release(aot_executor.executor, dev);
return status;
}
ret_values[0].v_handle = out_mod;
ret_tcodes[0] = kTVMModuleHandle;
return kTvmErrorNoError;
}
int32_t TVMAotExecutorModule_NotImplemented(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
return kTvmErrorFunctionCallNotImplemented;
}
int32_t TVMAotExecutorModule_GetInput(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
int64_t index;
if (tcodes[0] == kTVMArgInt) {
if (args[0].v_int64 > TVMAotExecutor_GetNumInputs(aot_executor.executor)) {
return kTvmErrorFunctionCallInvalidArg;
}
index = args[0].v_int64;
} else {
index = TVMAotExecutor_GetInputIndex(aot_executor.executor, args[0].v_str);
if (index < 0) {
return kTvmErrorExecutorModuleNoSuchInput;
}
}
TVMNDArray* array = &aot_executor.executor->args[index];
TVMNDArray_IncrementReference(array);
ret_values[0].v_handle = (void*)(&array->dl_tensor);
ret_tcodes[0] = kTVMNDArrayHandle;
return 0;
}
int32_t TVMAotExecutorModule_GetOutput(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
if (nargs != 1) {
return kTvmErrorFunctionCallNumArguments;
}
if (args[0].v_int64 > TVMAotExecutor_GetNumOutputs(aot_executor.executor)) {
return kTvmErrorFunctionCallInvalidArg;
}
// index past the input entries
int64_t index = args[0].v_int64 + TVMAotExecutor_GetNumInputs(aot_executor.executor);
TVMNDArray* array = &aot_executor.executor->args[index];
TVMNDArray_IncrementReference(array);
ret_values[0].v_handle = (void*)(&array->dl_tensor);
ret_tcodes[0] = kTVMNDArrayHandle;
return 0;
}
int32_t TVMAotExecutorModule_GetInputIndex(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 1) {
return kTvmErrorFunctionCallNumArguments;
}
int index = TVMAotExecutor_GetInputIndex(aot_executor.executor, args[0].v_str);
if (index < 0) {
return kTvmErrorExecutorModuleNoSuchInput;
}
ret_values[0].v_int64 = index;
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMAotExecutorModule_GetNumInputs(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
ret_values[0].v_int64 = TVMAotExecutor_GetNumInputs(aot_executor.executor);
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMAotExecutorModule_GetNumOutputs(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
ret_values[0].v_int64 = TVMAotExecutor_GetNumOutputs(aot_executor.executor);
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMAotExecutorModule_Run(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
return TVMAotExecutor_Run(aot_executor.executor);
}
static const TVMBackendPackedCFunc aot_executor_registry_funcs[] = {
&TVMAotExecutorModule_GetInput, // get_input
&TVMAotExecutorModule_GetInputIndex, // get_input_index
&TVMAotExecutorModule_NotImplemented, // get_input_info (do not implement)
&TVMAotExecutorModule_GetNumInputs, // get_num_inputs
&TVMAotExecutorModule_GetNumOutputs, // get_num_outputs
&TVMAotExecutorModule_GetOutput, // get_output
&TVMAotExecutorModule_NotImplemented, // load_params (do not implement)
&TVMAotExecutorModule_Run, // run
&TVMAotExecutorModule_NotImplemented, // set_input (implemented via python wrapper)
&TVMAotExecutorModule_NotImplemented, // share_params (do not implement)
};
static const TVMFuncRegistry aot_executor_registry = {
"\x0a\0get_input\0"
"get_input_index\0"
"get_input_info\0"
"get_num_inputs\0"
"get_num_outputs\0"
"get_output\0"
"load_params\0"
"run\0"
"set_input\0"
"share_params\0",
aot_executor_registry_funcs};
tvm_crt_error_t TVMAotExecutorModule_Register() {
aot_executor.mod.registry = &aot_executor_registry;
aot_executor.executor = NULL;
return TVMFuncRegisterGlobal("tvm.aot_executor.create", &TVMAotExecutorModule_Create, 0);
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/common/crt_backend_api.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
#include <assert.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/c_backend_api.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/logging.h>
#include <tvm/runtime/crt/page_allocator.h>
#include <tvm/runtime/crt/platform.h>
#include "crt_config.h"
void* TVMBackendAllocWorkspace(int device_type, int device_id, uint64_t nbytes, int dtype_code_hint,
int dtype_bits_hint) {
tvm_crt_error_t err = kTvmErrorNoError;
void* ptr = 0;
DLDevice dev = {device_type, device_id};
assert(nbytes > 0);
err = TVMPlatformMemoryAllocate(nbytes, dev, &ptr);
CHECK_EQ(err, kTvmErrorNoError,
"TVMBackendAllocWorkspace(%d, %d, %" PRIu64 ", %d, %d) -> %" PRId32, device_type,
device_id, nbytes, dtype_code_hint, dtype_bits_hint, err);
return ptr;
}
int TVMBackendFreeWorkspace(int device_type, int device_id, void* ptr) {
tvm_crt_error_t err = kTvmErrorNoError;
DLDevice dev = {device_type, device_id};
err = TVMPlatformMemoryFree(ptr, dev);
return err;
}
int TVMBackendParallelLaunch(FTVMParallelLambda flambda, void* cdata, int num_task) {
TVMParallelGroupEnv env;
env.num_task = 1;
flambda(0, &env, cdata);
return 0;
}
int TVMBackendRegisterSystemLibSymbol(const char* name, void* ptr) {
return TVMFuncRegisterGlobal(name, ptr, 0);
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/common/crt_runtime_api.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/crt.h>
#include <tvm/runtime/crt/func_registry.h>
#include <tvm/runtime/crt/internal/common/ndarray.h>
#include <tvm/runtime/crt/internal/graph_executor/graph_executor.h>
#include <tvm/runtime/crt/platform.h>
#if defined(_WIN32) || defined(WIN32)
#include <windows.h>
#elif __unix__
#include <unistd.h>
#endif
// Handle internal errors
static char g_last_error[1024];
void TVMAPISetLastError(const char* msg) {
strncpy(g_last_error, msg, sizeof(g_last_error) - 1);
g_last_error[sizeof(g_last_error) - 1] = 0;
}
__attribute__((format(printf, 1, 2))) int TVMAPIErrorf(const char* msg, ...) {
va_list args;
int to_return;
va_start(args, msg);
to_return = vsnprintf(g_last_error, sizeof(g_last_error), msg, args);
va_end(args);
return to_return;
}
const char* TVMGetLastError(void) { return g_last_error; }
// Manipulate NDArray on target device
int TVMArrayAlloc(const tvm_index_t* shape, int ndim, int dtype_code, int dtype_bits,
int dtype_lanes, int device_type, int device_id, TVMArrayHandle* out) {
DLDataType dtype;
dtype.code = dtype_code;
dtype.bits = dtype_bits;
dtype.lanes = dtype_lanes;
DLDevice dev;
dev.device_type = (DLDeviceType)device_type;
dev.device_id = device_id;
TVMNDArray arr;
int status = TVMNDArray_Empty(ndim, shape, dtype, dev, &arr);
if (status != 0) {
return status;
}
**out = arr.dl_tensor;
return 0;
}
int TVMArrayFree(TVMArrayHandle handle) {
TVMNDArray* arr = (TVMNDArray*)handle;
return TVMNDArray_Release(arr);
}
int TVMDeviceAllocDataSpace(DLDevice dev, size_t nbytes, size_t alignment, DLDataType type_hint,
void** out_data) {
if (alignment != 1) {
nbytes = (nbytes + alignment - 1) / alignment * alignment;
}
return TVMPlatformMemoryAllocate(nbytes, dev, out_data);
}
int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape, DLDataType dtype,
const char* mem_scope, void** out_data) {
size_t nbytes = 1;
for (int i = 0; i < ndim; ++i) {
nbytes *= shape[i];
}
nbytes *= (dtype.bits * dtype.lanes + 7) / 8;
int kAllocAlignment = 64;
size_t align = (dtype.bits / 8) * dtype.lanes;
if (align < kAllocAlignment) align = kAllocAlignment;
return TVMDeviceAllocDataSpace(dev, nbytes, align, dtype, out_data);
}
int TVMDeviceFreeDataSpace(DLDevice dev, void* ptr) { return TVMPlatformMemoryFree(ptr, dev); }
static bool IsContiguous(const DLTensor* arr) {
if (arr->strides == NULL) return true;
int64_t expected_stride = 1;
for (int32_t i = arr->ndim; i != 0; --i) {
int32_t k = i - 1;
if (arr->strides[k] != expected_stride) return false;
expected_stride *= arr->shape[k];
}
return true;
}
int TVMDeviceCopyDataFromTo(DLTensor* from, DLTensor* to, TVMStreamHandle stream) {
assert(IsContiguous(from) && IsContiguous(to));
size_t size = 1;
for (int i = 0; i < from->ndim; ++i) {
size *= from->shape[i];
}
size *= (from->dtype.bits * from->dtype.lanes + 7) / 8;
memcpy(((uint8_t*)to->data) + to->byte_offset, ((uint8_t*)from->data) + from->byte_offset, size);
return 0;
}
int TVMStreamCreate(int device_type, int device_id, TVMStreamHandle* out) {
out = NULL;
return 0;
}
int TVMStreamFree(int device_type, int device_id, TVMStreamHandle stream) { return 0; }
int TVMSetStream(int device_type, int device_id, TVMStreamHandle stream) { return 0; }
int TVMSynchronize(int device_type, int device_id, TVMStreamHandle stream) { return 0; }
static TVMMutableFuncRegistry global_func_registry;
int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) {
return TVMMutableFuncRegistry_Set(&global_func_registry, name, f, override != 0);
}
static const TVMModule* registered_modules[TVM_CRT_MAX_REGISTERED_MODULES];
/*! \brief Passed as `module_index` to EncodeFunctionHandle. */
static const tvm_module_index_t kGlobalFuncModuleIndex = TVM_CRT_MAX_REGISTERED_MODULES;
/*! \brief Special module handle for return values from RPCTimeEvaluator. */
static const tvm_module_index_t kTimeEvaluatorModuleIndex = 0x7fff;
static int DecodeModuleHandle(TVMModuleHandle handle, tvm_module_index_t* out_module_index) {
tvm_module_index_t module_index;
module_index = ((tvm_module_index_t)((uintptr_t)handle)) & ~0x8000;
if (module_index > TVM_CRT_MAX_REGISTERED_MODULES || registered_modules[module_index] == NULL) {
TVMAPIErrorf("invalid module handle: %08x", module_index);
return -1;
}
*out_module_index = module_index;
return 0;
}
static TVMModuleHandle EncodeModuleHandle(tvm_module_index_t module_index) {
return (TVMModuleHandle)((uintptr_t)(module_index | 0x8000));
}
int TVMModCreateFromCModule(const TVMModule* mod, TVMModuleHandle* out_handle) {
tvm_module_index_t idx;
for (idx = 0; idx < TVM_CRT_MAX_REGISTERED_MODULES; idx++) {
if (registered_modules[idx] == NULL) {
registered_modules[idx] = mod;
*out_handle = EncodeModuleHandle(idx);
return 0;
}
}
return -1;
}
static const TVMModuleHandle kTVMModuleHandleUninitialized = (TVMModuleHandle)(~0UL);
static TVMModuleHandle system_lib_handle;
int TVMModFree(TVMModuleHandle mod) {
/* Never free system_lib_handler */
if (mod == system_lib_handle && system_lib_handle != kTVMModuleHandleUninitialized) {
return 0;
}
tvm_module_index_t module_index;
if (DecodeModuleHandle(mod, &module_index) != 0) {
return -1;
}
registered_modules[module_index] = NULL;
return 0;
}
static int SystemLibraryCreate(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_codes) {
const TVMModule* system_lib;
if (system_lib_handle == kTVMModuleHandleUninitialized) {
system_lib = TVMSystemLibEntryPoint();
if (TVMModCreateFromCModule(system_lib, &system_lib_handle) != 0) {
TVMAPIErrorf("error registering system lib");
return -1;
}
}
ret_val[0].v_handle = system_lib_handle;
ret_type_codes[0] = kTVMModuleHandle;
return 0;
}
static TVMFunctionHandle EncodeFunctionHandle(tvm_module_index_t module_index,
tvm_function_index_t function_index) {
return (TVMFunctionHandle)(
(((uintptr_t)(module_index | 0x8000) << (sizeof(tvm_function_index_t) * 8)) |
(function_index | 0x8000)));
}
static int DecodeFunctionHandle(TVMFunctionHandle handle, tvm_module_index_t* module_index,
tvm_function_index_t* function_index) {
tvm_module_index_t unvalidated_module_index;
unvalidated_module_index =
(tvm_module_index_t)(((uintptr_t)handle) >> (sizeof(tvm_function_index_t) * 8));
unvalidated_module_index &= ~0x8000;
if (unvalidated_module_index != kTimeEvaluatorModuleIndex) {
if (unvalidated_module_index > kGlobalFuncModuleIndex) {
TVMAPIErrorf("invalid module handle: index=%08x", unvalidated_module_index);
return -1;
} else if (unvalidated_module_index < kGlobalFuncModuleIndex &&
registered_modules[unvalidated_module_index] == NULL) {
TVMAPIErrorf("unregistered module: index=%08x", unvalidated_module_index);
return -1;
}
}
*function_index = ((uint32_t)((uintptr_t)handle)) & ~0x8000;
*module_index = unvalidated_module_index;
return 0;
}
int TVMByteArrayFree(TVMByteArray* arr) {
DLDevice dev = {kDLCPU, 0};
int to_return = TVMPlatformMemoryFree((void*)arr->data, dev);
if (to_return != 0) {
return to_return;
}
return TVMPlatformMemoryFree((void*)arr, dev);
}
tvm_crt_error_t RunTimeEvaluator(tvm_function_index_t function_index, TVMValue* args,
int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_code);
int TVMFuncCall(TVMFunctionHandle func_handle, TVMValue* arg_values, int* type_codes, int num_args,
TVMValue* ret_val, int* ret_type_code) {
tvm_module_index_t module_index;
tvm_function_index_t function_index;
void* resource_handle;
const TVMFuncRegistry* registry;
TVMBackendPackedCFunc func;
if (DecodeFunctionHandle(func_handle, &module_index, &function_index) != 0) {
return -1;
}
if (module_index == kTimeEvaluatorModuleIndex) {
return RunTimeEvaluator(function_index, arg_values, type_codes, num_args, ret_val,
ret_type_code);
} else if (module_index == kGlobalFuncModuleIndex) {
resource_handle = NULL;
registry = &global_func_registry.registry;
} else {
resource_handle = (void*)registered_modules[module_index]->registry;
registry = registered_modules[module_index]->registry;
}
if (TVMFuncRegistry_GetByIndex(registry, function_index, &func) != 0) {
TVMAPIErrorf("invalid function index: %04" PRIx16, function_index);
return -1;
}
ret_type_code[0] = kTVMNullptr;
ret_val[0].v_handle = NULL;
return func(arg_values, type_codes, num_args, ret_val, ret_type_code, resource_handle);
}
static tvm_crt_error_t FindFunctionOrSetAPIError(tvm_module_index_t module_index,
const TVMFuncRegistry* registry, const char* name,
TVMFunctionHandle* out) {
tvm_function_index_t function_index;
tvm_crt_error_t err = TVMFuncRegistry_Lookup(registry, name, &function_index);
if (err != kTvmErrorNoError) {
return err;
}
*out = EncodeFunctionHandle(module_index, function_index);
return kTvmErrorNoError;
}
int TVMFuncGetGlobal(const char* name, TVMFunctionHandle* out) {
tvm_crt_error_t to_return =
FindFunctionOrSetAPIError(kGlobalFuncModuleIndex, &global_func_registry.registry, name, out);
// For compatibility with the C++ runtime equivalent, in src/runtime/registry.cc.
if (to_return == kTvmErrorFunctionNameNotFound) {
*out = NULL;
to_return = kTvmErrorNoError;
}
return to_return;
}
int TVMModGetFunction(TVMModuleHandle mod, const char* func_name, int query_imports,
TVMFunctionHandle* out) {
tvm_module_index_t module_index;
if (DecodeModuleHandle(mod, &module_index) != 0) {
return -1;
}
return FindFunctionOrSetAPIError(module_index, registered_modules[module_index]->registry,
func_name, out);
}
int ModuleGetFunction(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_value,
int* ret_type_codes) {
TVMModuleHandle mod;
const char* name;
int to_return;
int query_imports;
ret_value[0].v_handle = NULL;
ret_type_codes[0] = kTVMNullptr;
if (num_args != 3 || type_codes[0] != kTVMModuleHandle || type_codes[1] != kTVMStr ||
type_codes[2] != kDLInt) {
return 0;
}
mod = (TVMModuleHandle)args[0].v_handle;
name = args[1].v_str;
query_imports = args[2].v_int64 != 0;
to_return = TVMModGetFunction(mod, name, query_imports, &ret_value->v_handle);
if (to_return == 0) {
ret_type_codes[0] = kTVMPackedFuncHandle;
} else {
ret_value->v_handle = NULL;
}
// NOTE: For compatibility with C++ runtime API, return no error (but NULL function) when the
// function lookup failed.
if (to_return == kTvmErrorFunctionNameNotFound) {
to_return = kTvmErrorNoError;
}
return to_return;
}
typedef struct TVMCReturnValue {
TVMValue* ret_val;
int* ret_type_code;
} TVMCReturnValue;
int TVMCFuncSetReturn(TVMRetValueHandle ret, TVMValue* value, int* type_code, int num_ret) {
TVMCReturnValue* ret_val;
int idx;
ret_val = (TVMCReturnValue*)ret;
for (idx = 0; idx < num_ret; idx++) {
ret_val->ret_val[idx] = value[idx];
ret_val->ret_type_code[idx] = type_code[idx];
}
return 0;
}
int TVMFuncFree(TVMFunctionHandle func) {
// A no-op, since we don't actually allocate anything in GetFunction.
return 0;
}
int RPCTimeEvaluator(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_code);
// Sends CRT max packet size.
int RPCGetCRTMaxPacketSize(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_value,
int* ret_type_codes) {
// 11 bytes is for microtvm overhead:
// packet start(2), length(4), session header(3), crc(2)
ret_value[0].v_int64 = TVM_CRT_MAX_PACKET_SIZE_BYTES - 11;
ret_type_codes[0] = kTVMArgInt;
return 0;
}
// Fill the tensor in args[0] with random data using TVMPlatformGenerateRandom.
static int RandomFill(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_code) {
if (num_args != 1) {
return kTvmErrorFunctionCallNumArguments;
}
if (type_codes[0] != kTVMDLTensorHandle) {
return kTvmErrorFunctionCallWrongArgType;
}
DLTensor* tensor = (DLTensor*)args[0].v_handle;
TVMNDArray arr = {*tensor, 0};
return TVMNDArray_RandomFill(&arr);
}
tvm_crt_error_t TVMInitializeRuntime() {
int idx = 0;
tvm_crt_error_t error = kTvmErrorNoError;
DLDevice dev = {kDLCPU, 0};
void* registry_backing_memory;
error = TVMPlatformMemoryAllocate(TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES, dev,
®istry_backing_memory);
if (error != kTvmErrorNoError) {
return error;
}
system_lib_handle = kTVMModuleHandleUninitialized;
error = TVMMutableFuncRegistry_Create(&global_func_registry, registry_backing_memory,
TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES);
for (idx = 0; idx < TVM_CRT_MAX_REGISTERED_MODULES; idx++) {
registered_modules[idx] = NULL;
}
if (error == kTvmErrorNoError) {
error = TVMFuncRegisterGlobal("runtime.SystemLib", &SystemLibraryCreate, 0);
}
if (error == kTvmErrorNoError) {
error = TVMFuncRegisterGlobal("tvm.rpc.server.ModuleGetFunction", &ModuleGetFunction, 0);
}
if (error == kTvmErrorNoError) {
error = TVMFuncRegisterGlobal("runtime.RPCTimeEvaluator", &RPCTimeEvaluator, 0);
}
if (error == kTvmErrorNoError) {
error = TVMFuncRegisterGlobal("tvm.rpc.server.GetCRTMaxPacketSize", &RPCGetCRTMaxPacketSize, 0);
}
if (error == kTvmErrorNoError) {
error = TVMFuncRegisterGlobal("tvm.contrib.random.random_fill", &RandomFill, 0);
}
if (error != kTvmErrorNoError) {
TVMPlatformMemoryFree(registry_backing_memory, dev);
}
return error;
}
typedef struct {
uint16_t function_index;
TVMFunctionHandle func_to_time;
DLDevice device;
int number;
int repeat;
int min_repeat_ms;
int limit_zero_time_iterations;
int cooldown_interval_ms;
int repeats_to_cooldown;
} time_evaluator_state_t;
static time_evaluator_state_t g_time_evaluator_state;
int RPCTimeEvaluator(TVMValue* args, int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_code) {
ret_val[0].v_handle = NULL;
ret_type_code[0] = kTVMNullptr;
if (num_args < 11) {
TVMAPIErrorf("not enough args");
return kTvmErrorFunctionCallNumArguments;
}
if (type_codes[0] != kTVMModuleHandle || type_codes[1] != kTVMStr ||
type_codes[2] != kTVMArgInt || type_codes[3] != kTVMArgInt || type_codes[4] != kTVMArgInt ||
type_codes[5] != kTVMArgInt || type_codes[6] != kTVMArgInt || type_codes[7] != kTVMArgInt ||
type_codes[8] != kTVMArgInt || type_codes[9] != kTVMArgInt || type_codes[10] != kTVMStr) {
TVMAPIErrorf("one or more invalid arg types");
return kTvmErrorFunctionCallWrongArgType;
}
TVMModuleHandle mod = (TVMModuleHandle)args[0].v_handle;
const char* name = args[1].v_str;
g_time_evaluator_state.device.device_type = args[2].v_int64;
g_time_evaluator_state.device.device_id = args[3].v_int64;
g_time_evaluator_state.number = args[4].v_int64;
g_time_evaluator_state.repeat = args[5].v_int64;
g_time_evaluator_state.min_repeat_ms = args[6].v_int64;
g_time_evaluator_state.limit_zero_time_iterations = args[7].v_int64;
g_time_evaluator_state.cooldown_interval_ms = args[8].v_int64;
g_time_evaluator_state.repeats_to_cooldown = args[9].v_int64;
int ret_code =
TVMModGetFunction(mod, name, /* query_imports */ 0, &g_time_evaluator_state.func_to_time);
if (ret_code != 0) {
return ret_code;
}
g_time_evaluator_state.function_index++;
ret_val[0].v_handle =
EncodeFunctionHandle(kTimeEvaluatorModuleIndex, g_time_evaluator_state.function_index);
ret_type_code[0] = kTVMPackedFuncHandle;
return kTvmErrorNoError;
}
tvm_crt_error_t RunTimeEvaluator(tvm_function_index_t function_index, TVMValue* args,
int* type_codes, int num_args, TVMValue* ret_val,
int* ret_type_code) {
if (function_index != g_time_evaluator_state.function_index) {
return kTvmErrorTimeEvaluatorBadHandle;
}
// TODO(areusch): should *really* rethink needing to return doubles
DLDevice result_byte_dev = {kDLCPU, 0};
TVMByteArray* result_byte_arr = NULL;
tvm_crt_error_t err =
TVMPlatformMemoryAllocate(sizeof(TVMByteArray), result_byte_dev, (void*)&result_byte_arr);
if (err != kTvmErrorNoError) {
goto release_and_return;
}
result_byte_arr->data = NULL;
size_t data_size = sizeof(double) * g_time_evaluator_state.repeat;
err = TVMPlatformMemoryAllocate(data_size, result_byte_dev, (void**)&result_byte_arr->data);
if (err != kTvmErrorNoError) {
goto release_and_return;
}
result_byte_arr->size = data_size;
// skip first time call, to activate lazy compilation components.
err = TVMFuncCall(g_time_evaluator_state.func_to_time, args, type_codes, num_args, ret_val,
ret_type_code);
if (err != kTvmErrorNoError) {
goto release_and_return;
}
double min_repeat_seconds = ((double)g_time_evaluator_state.min_repeat_ms) / 1000;
double* iter = (double*)result_byte_arr->data;
for (int i = 0; i < g_time_evaluator_state.repeat; i++) {
double curr_res_seconds = 0.0;
int absolute_zero_times = 0;
// do-while structure ensures we run even when `min_repeat_ms` isn't set (i.e., is 0).
do {
if (curr_res_seconds > 0.0) {
double a = (min_repeat_seconds / (curr_res_seconds / g_time_evaluator_state.number) + 1);
const double golden_ratio = 1.618;
double b = g_time_evaluator_state.number * golden_ratio;
g_time_evaluator_state.number = (int64_t)(a > b ? a : b);
}
err = TVMPlatformBeforeMeasurement();
if (err != kTvmErrorNoError) {
goto release_and_return;
}
err = TVMPlatformTimerStart();
if (err != kTvmErrorNoError) {
goto release_and_return;
}
for (int j = 0; j < g_time_evaluator_state.number; j++) {
err = TVMFuncCall(g_time_evaluator_state.func_to_time, args, type_codes, num_args, ret_val,
ret_type_code);
if (err != kTvmErrorNoError) {
goto release_and_return;
}
}
err = TVMPlatformTimerStop(&curr_res_seconds);
if (err != kTvmErrorNoError) {
goto release_and_return;
}
err = TVMPlatformAfterMeasurement();
if (err != kTvmErrorNoError) {
goto release_and_return;
}
if (fpclassify(curr_res_seconds) == FP_ZERO) absolute_zero_times++;
} while (curr_res_seconds < min_repeat_seconds &&
absolute_zero_times < g_time_evaluator_state.limit_zero_time_iterations);
double mean_exec_seconds = curr_res_seconds / g_time_evaluator_state.number;
*iter = mean_exec_seconds;
iter++;
if (g_time_evaluator_state.cooldown_interval_ms > 0 &&
(i % g_time_evaluator_state.repeats_to_cooldown) == 0) {
#if defined(_WIN32) || defined(WIN32)
Sleep(g_time_evaluator_state.cooldown_interval_ms);
#elif __unix__
usleep(g_time_evaluator_state.cooldown_interval_ms * 1000);
#else
TVMAPIErrorf(
"No support for non-zero cooldown_interval_ms for this platform: Use "
"cooldown_interval_ms = 0");
goto release_and_return;
#endif
}
}
*ret_type_code = kTVMBytes;
ret_val->v_handle = result_byte_arr;
return err;
release_and_return : {
tvm_crt_error_t release_err =
TVMPlatformMemoryFree((void*)result_byte_arr->data, result_byte_dev);
if (release_err != kTvmErrorNoError) {
release_err = TVMPlatformMemoryFree((void*)result_byte_arr, result_byte_dev);
}
if (err == kTvmErrorNoError && release_err != kTvmErrorNoError) {
err = release_err;
}
}
return err;
}
// Default implementation, overridden by the platform runtime.
__attribute__((weak)) tvm_crt_error_t TVMPlatformGenerateRandom(uint8_t* buffer, size_t num_bytes) {
return kTvmErrorFunctionCallNotImplemented;
}
// Default implementation, overridden by the platform runtime.
__attribute__((weak)) tvm_crt_error_t TVMPlatformBeforeMeasurement() { return kTvmErrorNoError; }
// Default implementation, overridden by the platform runtime.
__attribute__((weak)) tvm_crt_error_t TVMPlatformAfterMeasurement() { return kTvmErrorNoError; }
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/common/func_registry.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file tvm/runtime/crt/func_registry.c
* \brief Defines implementations of generic string-based function lookup structs
*/
#include <stdio.h>
#include <string.h>
#include <tvm/runtime/crt/func_registry.h>
/*!
* \brief strcmp against the next string in the registry, and return the end.
*
* Regardless of return value, after calling this function, cursor's value will be modified to
* point at the \0 at the end of the string it currently points to.
*
* \param cursor Pointer to cursor to first string to compare.
* \param name Pointer to reference string.
* \return 0 if the string pointed to by cursor == name; non-zero otherwise.
*/
int strcmp_cursor(const char** cursor, const char* name) {
int return_value = 0;
while (return_value == 0) {
char c = **cursor;
char n = *name;
return_value = ((int)c) - ((int)n);
if (n == 0 || c == 0) {
break;
}
name++;
(*cursor)++;
}
while (**cursor != 0) {
(*cursor)++;
}
return return_value;
}
uint16_t TVMFuncRegistry_GetNumFuncs(const TVMFuncRegistry* reg) {
uint16_t num_funcs;
memcpy(&num_funcs, reg->names, sizeof(num_funcs));
return num_funcs;
}
int TVMFuncRegistry_SetNumFuncs(const TVMFuncRegistry* reg, const uint16_t num_funcs) {
memcpy((char*)reg->names, &num_funcs, sizeof(num_funcs));
return 0;
}
const char* TVMFuncRegistry_Get0thFunctionName(const TVMFuncRegistry* reg) {
// NOTE: first function name starts at index 2 to skip num_funcs.
return (reg->names + sizeof(uint16_t));
}
tvm_crt_error_t TVMFuncRegistry_Lookup(const TVMFuncRegistry* reg, const char* name,
tvm_function_index_t* function_index) {
tvm_function_index_t idx;
const char* reg_name_ptr = TVMFuncRegistry_Get0thFunctionName(reg);
idx = 0;
for (; *reg_name_ptr != '\0'; reg_name_ptr++) {
if (!strcmp_cursor(®_name_ptr, name)) {
*function_index = idx;
return kTvmErrorNoError;
}
idx++;
}
return kTvmErrorFunctionNameNotFound;
}
tvm_crt_error_t TVMFuncRegistry_GetByIndex(const TVMFuncRegistry* reg,
tvm_function_index_t function_index,
TVMBackendPackedCFunc* out_func) {
uint16_t num_funcs;
num_funcs = TVMFuncRegistry_GetNumFuncs(reg);
if (function_index >= num_funcs) {
return kTvmErrorFunctionIndexInvalid;
}
*out_func = reg->funcs[function_index];
return kTvmErrorNoError;
}
tvm_crt_error_t TVMMutableFuncRegistry_Create(TVMMutableFuncRegistry* reg, uint8_t* buffer,
size_t buffer_size_bytes) {
if (buffer_size_bytes < kTvmAverageFuncEntrySizeBytes) {
return kTvmErrorBufferTooSmall;
}
reg->registry.names = (const char*)buffer;
buffer[0] = 0; // number of functions present in buffer.
buffer[1] = 0; // note that we combine the first two elements to form a 16-bit function index.
buffer[2] = 0; // end of names list marker.
// compute a guess of the average size of one entry:
// - assume average function name is around ~10 bytes
// - 1 byte for \0
// - size of 1 function pointer
reg->max_functions = buffer_size_bytes / kTvmAverageFuncEntrySizeBytes;
reg->registry.funcs =
(TVMBackendPackedCFunc*)(buffer + buffer_size_bytes - reg->max_functions * sizeof(void*));
return kTvmErrorNoError;
}
tvm_crt_error_t TVMMutableFuncRegistry_Set(TVMMutableFuncRegistry* reg, const char* name,
TVMBackendPackedCFunc func, int override) {
size_t idx;
char* reg_name_ptr = (char*)TVMFuncRegistry_Get0thFunctionName(&(reg->registry));
idx = 0;
// NOTE: safe to discard const qualifier here, since reg->registry.names was set from
// TVMMutableFuncRegistry_Create above.
for (; *reg_name_ptr != 0; reg_name_ptr++) {
if (!strcmp_cursor((const char**)®_name_ptr, name)) {
if (override == 0) {
return kTvmErrorFunctionAlreadyDefined;
}
((TVMBackendPackedCFunc*)reg->registry.funcs)[idx] = func;
return kTvmErrorNoError;
}
idx++;
}
if (reg_name_ptr > ((const char*)reg->registry.funcs)) {
return kTvmErrorFunctionRegistryFull;
}
size_t name_len = strlen(name);
size_t names_bytes_remaining = ((const char*)reg->registry.funcs) - reg_name_ptr;
if (idx >= reg->max_functions || name_len + 1 > names_bytes_remaining) {
return kTvmErrorFunctionRegistryFull;
}
memcpy(reg_name_ptr, name, name_len + 1);
reg_name_ptr += name_len + 1;
*reg_name_ptr = 0;
((TVMBackendPackedCFunc*)reg->registry.funcs)[idx] = func;
uint16_t num_funcs;
// increment num_funcs.
num_funcs = TVMFuncRegistry_GetNumFuncs(&(reg->registry)) + 1;
TVMFuncRegistry_SetNumFuncs(&(reg->registry), num_funcs);
return kTvmErrorNoError;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/common/ndarray.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file ndarray.c
* \brief NDArray container infratructure.
*/
#include <tvm/runtime/crt/internal/common/ndarray.h>
#include <tvm/runtime/crt/page_allocator.h>
#include <tvm/runtime/crt/platform.h>
#include "crt_config.h"
static int Create(int32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLDevice dev,
TVMNDArray* array) {
memset(array, 0, sizeof(TVMNDArray));
array->dl_tensor.ndim = ndim;
tvm_crt_error_t err;
DLDevice dldev = {kDLCPU, 0};
err = TVMPlatformMemoryAllocate(sizeof(int64_t) * ndim, dldev, (void*)&array->dl_tensor.shape);
if (err != kTvmErrorNoError) {
return -1;
}
memcpy(array->dl_tensor.shape, shape, sizeof(int64_t) * ndim);
array->dl_tensor.dtype = dtype;
array->dl_tensor.device = dev;
array->dl_tensor.data = 0;
return 0;
}
int64_t TVMNDArray_DataSizeBytes(TVMNDArray* array) {
int64_t num_elems = 1;
int32_t idx;
for (idx = 0; idx < array->dl_tensor.ndim; ++idx) {
num_elems *= array->dl_tensor.shape[idx];
}
return (num_elems * array->dl_tensor.dtype.bits + 7) / 8;
}
int TVMNDArray_Empty(int32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLDevice dev,
TVMNDArray* array) {
int status = Create(ndim, shape, dtype, dev, array);
if (status != 0) {
return status;
}
int total_elem_bytes = TVMNDArray_DataSizeBytes(array);
array->dl_tensor.data =
TVMBackendAllocWorkspace(kDLCPU, 0, total_elem_bytes, dtype.code, dtype.bits);
memset(array->dl_tensor.data, 0, total_elem_bytes);
return 0;
}
int TVMNDArray_Load(TVMNDArray* ret, const char** strm) {
int32_t status = 0;
uint64_t header, reserved;
memcpy(&header, *strm, sizeof(header));
*strm += sizeof(header);
if (header != kTVMNDArrayMagic) {
fprintf(stderr, "Invalid DLTensor file format\n");
status = -1;
}
memcpy(&reserved, *strm, sizeof(reserved));
*strm += sizeof(reserved);
DLDevice dev;
int ndim; // sizeof ndim should match dlpack
DLDataType dtype;
memcpy(&dev, *strm, sizeof(dev));
*strm += sizeof(dev);
memcpy(&ndim, *strm, sizeof(ndim));
*strm += sizeof(ndim);
memcpy(&dtype, *strm, sizeof(dtype));
*strm += sizeof(dtype);
if ((ndim < 0) || (ndim > TVM_CRT_MAX_NDIM)) {
fprintf(stderr, "Invalid ndim=%d: expected to be 0 ~ %d.\n", ndim, TVM_CRT_MAX_NDIM);
status = -1;
}
if (dev.device_type != kDLCPU) {
fprintf(stderr, "Invalid DLTensor device: can only save as CPU tensor\n");
status = -1;
}
int64_t shape[TVM_CRT_MAX_NDIM] = {0};
int32_t idx;
if (ndim != 0) {
for (idx = 0; idx < ndim; idx++) {
memcpy(&shape[idx], *strm, sizeof(int64_t));
*strm += sizeof(shape[idx]);
}
}
status = TVMNDArray_Empty(ndim, shape, dtype, dev, ret);
if (status != 0) {
return status;
}
int64_t num_elems = 1;
int elem_bytes = (ret->dl_tensor.dtype.bits + 7) / 8;
for (idx = 0; idx < ret->dl_tensor.ndim; ++idx) {
num_elems *= ret->dl_tensor.shape[idx];
}
int64_t data_byte_size;
memcpy(&data_byte_size, *strm, sizeof(data_byte_size));
*strm += sizeof(data_byte_size);
if (!(data_byte_size == num_elems * elem_bytes)) {
fprintf(stderr,
"invalid DLTensor file format: data_byte_size=%d, "
"while num_elems*elem_bytes=%d\n",
(int)data_byte_size, (int)(num_elems * elem_bytes)); // NOLINT(*)
status = -1;
}
memcpy(ret->dl_tensor.data, *strm, data_byte_size);
*strm += data_byte_size;
return status;
}
int TVMNDArray_CreateView(TVMNDArray* arr, const tvm_index_t* shape, int32_t ndim, DLDataType dtype,
TVMNDArray* array_view) {
int status = Create(ndim, shape, dtype, arr->dl_tensor.device, array_view);
if (status != 0) {
return status;
}
array_view->dl_tensor.data = arr->dl_tensor.data;
return 0;
}
int TVMNDArray_RandomFill(TVMNDArray* arr) {
int64_t num_bytes = TVMNDArray_DataSizeBytes(arr);
if (num_bytes < 0 || num_bytes > SIZE_MAX) {
return kTvmErrorFunctionCallInvalidArg;
}
return TVMPlatformGenerateRandom(arr->dl_tensor.data, (size_t)num_bytes);
}
void TVMNDArray_IncrementReference(TVMNDArray* arr) { arr->reference_count++; }
uint32_t TVMNDArray_DecrementReference(TVMNDArray* arr) {
if (arr->reference_count > 0) {
arr->reference_count--;
}
return arr->reference_count;
}
int TVMNDArray_Release(TVMNDArray* arr) {
tvm_crt_error_t err;
DLDevice dev = {kDLCPU, 0};
if (TVMNDArray_DecrementReference(arr) > 0) {
return 0;
}
err = TVMPlatformMemoryFree(arr->dl_tensor.data, dev);
if (err != kTvmErrorNoError) {
return err;
}
arr->dl_tensor.data = NULL;
err = TVMPlatformMemoryFree(arr->dl_tensor.shape, dev);
if (err != kTvmErrorNoError) {
return err;
}
arr->dl_tensor.shape = NULL;
return 0;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/common/packed_func.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file src/runtime/crt/common/packed_func.c
* \brief PackedFunc implementation.
*/
#include <stdio.h>
#include <string.h>
#include <tvm/runtime/crt/logging.h>
#include <tvm/runtime/crt/packed_func.h>
DLDataType String2DLDataType(const char* s) {
DLDataType t;
// handle None type
if (strlen(s) == 0) {
t.bits = 0;
t.lanes = 0;
t.code = kTVMOpaqueHandle;
return t;
}
t.bits = 32;
t.lanes = 1;
const char* scan;
if (!strncmp(s, "int", 3)) {
t.code = kDLInt;
scan = s + 3;
} else if (!strncmp(s, "uint", 4)) {
t.code = kDLUInt;
scan = s + 4;
} else if (!strncmp(s, "float", 5)) {
t.code = kDLFloat;
scan = s + 5;
} else if (!strncmp(s, "bfloat", 6)) {
t.code = kDLBfloat;
scan = s + 6;
} else if (!strncmp(s, "handle", 6)) {
t.code = kTVMOpaqueHandle;
t.bits = 64; // handle uses 64 bit by default.
scan = s + 6;
} else if (!strcmp(s, "bool")) {
t.code = kDLUInt;
t.bits = 1;
t.lanes = 1;
return t;
} else {
scan = s;
fprintf(stderr, "unknown type %s\n", s);
}
char* xdelim;
uint8_t bits = (uint8_t)(strtoul(scan, &xdelim, 10));
if (bits != 0) t.bits = bits;
char* endpt = xdelim;
if (*xdelim == 'x') {
t.lanes = (uint16_t)(strtoul(xdelim + 1, &endpt, 10));
}
if (!(endpt == s + strlen(s))) {
fprintf(stderr, "unknown type %s\n", s);
}
return t;
}
int TVMPackedFunc_InitGlobalFunc(TVMPackedFunc* pf, const char* name, const TVMArgs* args) {
int status = 0;
pf->Call = &TVMPackedFunc_Call;
pf->SetArgs = &TVMPackedFunc_SetArgs;
status = TVMFuncGetGlobal(name, &pf->fexec);
if (status != 0) {
return status;
}
snprintf(pf->name, sizeof(pf->name), "%s", name);
TVMPackedFunc_SetArgs(pf, args);
return status;
}
int TVMPackedFunc_InitModuleFunc(TVMPackedFunc* pf, TVMModuleHandle module, const char* name,
const TVMArgs* args) {
int status = 0;
pf->Call = &TVMPackedFunc_Call;
pf->SetArgs = &TVMPackedFunc_SetArgs;
status = TVMModGetFunction(module, name, 0, &pf->fexec);
if (status != 0) {
return status;
}
snprintf(pf->name, sizeof(pf->name), "%s", name);
TVMPackedFunc_SetArgs(pf, args);
return status;
}
TVMArgs TVMArgs_Create(TVMValue* values, uint32_t* tcodes, uint32_t values_count) {
uint32_t idx;
TVMArgs args;
memset(&args, 0, sizeof(args));
for (idx = 0; idx < values_count; idx++) {
memcpy(args.values + idx, values + idx, sizeof(TVMValue));
args.tcodes[idx] = tcodes[idx];
}
args.values_count = values_count;
return args;
}
int TVMPackedFunc_Call(TVMPackedFunc* pf) {
pf->ret_value.values_count = 1;
pf->ret_value.tcodes[0] = kTVMNullptr;
return TVMFuncCall(pf->fexec, pf->args.values, pf->args.tcodes, pf->args.values_count,
pf->ret_value.values, pf->ret_value.tcodes);
}
void TVMPackedFunc_SetArgs(TVMPackedFunc* pf, const TVMArgs* args) {
memcpy(&(pf->args), args, sizeof(TVMArgs));
}
TVMPackedFunc* g_fexecs;
uint32_t g_fexecs_count;
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/contrib/stm32/ai_runtime_api.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file ai_runtime_api.c
* \brief The runtime API for the TVM generated C code.
*/
// LINT_C_FILE
#include "ai_runtime_api.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// =======================================================
// ai_network_t
// =======================================================
typedef struct {
ai_model_info* info;
ai_tensor** inputs;
ai_tensor** outputs;
ai_ptr activations;
const char* error;
} ai_network_t;
//
// .nn_models_info section
//
extern uintptr_t __models_section_start__;
extern uintptr_t __models_section_end__;
uint32_t _modelsSection_start = (uint32_t)(&__models_section_start__);
uint32_t _modelsSection_end = (uint32_t)(&__models_section_end__);
// =======================================================
// Iterator
// =======================================================
ai_model_iterator ai_model_iterator_begin() {
return _modelsSection_start; // begin()
}
ai_model_iterator ai_model_iterator_end() { return _modelsSection_end; }
ai_model_iterator ai_model_iterator_next(ai_model_iterator idx) {
return (idx + sizeof(ai_model_info));
}
ai_model_info* ai_model_iterator_value(ai_model_iterator idx) { return (ai_model_info*)idx; }
// =======================================================
// ai_create
// =======================================================
AI_API_ENTRY ai_status ai_create(ai_model_info* nn, ai_ptr activations, ai_handle* handle) {
uint32_t n_inputs = AI_MODEL_n_inputs(nn);
uint32_t n_outputs = AI_MODEL_n_outputs(nn);
ai_status status = AI_STATUS_OK;
//
// Create internal network representation
//
ai_network_t* network = (ai_network_t*)malloc(sizeof(ai_network_t));
network->info = nn;
for (int i = 0; i < n_inputs; i++) {
network->inputs = AI_MODEL_inputs(nn);
}
for (int i = 0; i < n_outputs; i++) {
network->outputs = AI_MODEL_outputs(nn);
}
network->activations = activations;
network->error = NULL;
const ai_ptr params = nn->ai_get_params();
status = nn->ai_create(params, activations);
if (status != AI_STATUS_OK) {
network->error = TVMGetLastError();
}
//
// Setup weights and activations
//
*handle = network;
return status;
}
// =======================================================
// ai_destroy
// =======================================================
AI_API_ENTRY ai_status ai_destroy(ai_handle handle) {
if (handle == NULL) {
return AI_STATUS_ERROR;
}
ai_network_t* network = (ai_network_t*)handle;
free(network);
return AI_STATUS_OK;
}
// =======================================================
// ai_get_error
// =======================================================
AI_API_ENTRY
const char* ai_get_error(ai_handle handle) {
if (handle == NULL) {
return "Network handle is NULL";
}
ai_network_t* network = (ai_network_t*)handle;
if (network->error == NULL) {
return "";
}
return network->error;
}
// =======================================================
// ai_get_input_size
// =======================================================
AI_API_ENTRY int32_t ai_get_input_size(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_n_inputs(network->info);
}
// =======================================================
// ai_get_output_size
// =======================================================
AI_API_ENTRY int32_t ai_get_output_size(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_n_outputs(network->info);
}
// =======================================================
// ai_get_input
// =======================================================
AI_API_ENTRY ai_tensor* ai_get_input(ai_handle handle, int32_t index) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
if (index >= AI_MODEL_n_inputs(network->info)) {
network->error = "Input index out of range";
return NULL;
}
return (network->inputs)[index];
}
// =======================================================
// ai_get_output
// =======================================================
AI_API_ENTRY ai_tensor* ai_get_output(ai_handle handle, int32_t index) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
if (index >= AI_MODEL_n_outputs(network->info)) {
network->error = "Output index out of range";
return NULL;
}
return (network->outputs)[index];
}
// =======================================================
// ai_run
// =======================================================
AI_API_ENTRY ai_status ai_run(ai_handle handle) {
if (handle == NULL) {
return AI_STATUS_ERROR;
}
ai_network_t* network = (ai_network_t*)handle;
ai_model_info* nn = network->info;
uint32_t n_inputs = AI_MODEL_n_inputs(nn);
uint32_t n_outputs = AI_MODEL_n_outputs(nn);
ai_status status = AI_STATUS_OK;
//
// Check that input tensors have been specified
//
uint32_t i;
for (i = 0; i < n_inputs; i++) {
ai_tensor* input_tensor = network->inputs[i];
DLTensor* input = &input_tensor->dltensor;
if (input->data == NULL) {
network->error = "Network input NULL";
return AI_STATUS_ERROR;
}
}
for (i = 0; i < n_outputs; i++) {
ai_tensor* output_tensor = network->outputs[i];
DLTensor* output = &output_tensor->dltensor;
if (output->data == NULL) {
network->error = "Network output NULL";
return AI_STATUS_ERROR;
}
}
status = nn->ai_run(network->inputs, network->outputs);
if (status != AI_STATUS_OK) {
const char* err = TVMGetLastError();
network->error = err;
}
return status;
}
// =======================================================
// ai_get_name
// =======================================================
const char* ai_get_name(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_name(network->info);
}
// =======================================================
// ai_get_datetime
// =======================================================
const char* ai_get_datetime(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_datetime(network->info);
}
// =======================================================
// ai_get_revision
// =======================================================
const char* ai_get_revision(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_revision(network->info);
}
// =======================================================
// ai_get_tool_version
// =======================================================
const char* ai_get_tool_version(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_tool_version(network->info);
}
// =======================================================
// ai_get_api_version
// =======================================================
const char* ai_get_api_version(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_api_version(network->info);
}
// =======================================================
// ai_get_node_size
// =======================================================
uint32_t ai_get_node_size(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_n_nodes(network->info);
}
// =======================================================
// ai_get_activations_size
// =======================================================
uint32_t ai_get_activations_size(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_activations_size(network->info);
}
// =======================================================
// ai_get_params_size
// =======================================================
uint32_t ai_get_params_size(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return AI_MODEL_params_size(network->info);
}
// =======================================================
// ai_get_activations
// =======================================================
ai_ptr ai_get_activations(ai_handle handle) {
if (handle == NULL) {
return 0;
}
ai_network_t* network = (ai_network_t*)handle;
return network->activations;
}
// =======================================================
// ai_get_params
// =======================================================
const ai_ptr ai_get_params(ai_handle handle) {
if (handle == NULL) {
return NULL;
}
ai_network_t* network = (ai_network_t*)handle;
return network->info->ai_get_params();
}
// =======================================================
// ai_get_quantization
// =======================================================
const ai_quantization_info* ai_get_quantization(ai_tensor* tensor) {
if (tensor == NULL) {
return NULL;
}
return tensor->quant;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/contrib/stm32/ai_runtime_api.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file ai_runtime_api.h
* \brief The runtime API for the TVM generated C code.
*/
#ifndef TVM_RUNTIME_CRT_CONTRIB_STM32_AI_RUNTIME_API_H_
#define TVM_RUNTIME_CRT_CONTRIB_STM32_AI_RUNTIME_API_H_
#include <inttypes.h>
#include <stddef.h>
#include <stdint.h>
#include "dlpack/dlpack.h" // From TVM
#include "tvm/runtime/c_runtime_api.h" // From TVM
//
// This describes current ai_runtime version
//
#define AI_PLATFORM_RUNTIME_MAJOR 1
#define AI_PLATFORM_RUNTIME_MINOR 0
#define AI_PLATFORM_RUNTIME_MICRO 0
#define AI_STATIC static
#if defined(_MSC_VER)
#define AI_INLINE __inline
#define AI_API_ENTRY __declspec(dllexport)
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
#elif defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
#define AI_INLINE inline
#define AI_API_ENTRY /* AI_API_ENTRY */
#define AI_ALIGNED(x) AI_CONCAT(AI_ALIGNED_, x)
#elif defined(__CC_ARM)
#define AI_INLINE __inline
#define AI_API_ENTRY __attribute__((visibility("default")))
#define AI_ALIGNED(x) __attribute__((aligned(x)))
/* Keil disallows anonymous union initialization by default */
#pragma anon_unions
#elif defined(__GNUC__)
#define AI_INLINE __inline
#define AI_API_ENTRY __attribute__((visibility("default")))
#define AI_ALIGNED(x) __attribute__((aligned(x)))
#else
/* Dynamic libraries are not supported by the compiler */
#define AI_API_ENTRY /* AI_API_ENTRY */
#define AI_ALIGNED(x) /* AI_ALIGNED(x) */
#endif
/*********************************************************/
typedef void* ai_handle;
#define AI_HANDLE_PTR(ptr_) ((ai_handle)(ptr_))
#define AI_HANDLE_NULL AI_HANDLE_PTR(NULL)
typedef uint8_t* ai_ptr;
typedef enum { AI_STATUS_OK = 0, AI_STATUS_ERROR = 1, AI_STATUS_DELEGATE_ERROR = 2 } ai_status;
// =======================================================
// ai_quantization_info
//
// Parameters for asymmetric quantization across a dimension (i.e
// per output channel quantization).
// quantized_dimension specifies which dimension the scales and
// zero_points correspond to.
// For a particular value in quantized_dimension, quantized values
// can be converted back to float using:
// real_value = scale * (quantized_value - zero_point)
// =======================================================
typedef struct {
/*!
* \brief The quantization info, if quantized
*/
float* scale;
int32_t* zero_point;
int32_t dim;
} ai_quantization_info;
// =======================================================
// ai_tensor
// =======================================================
typedef struct {
/*!
* \brief The TVM tensor.
*/
DLTensor dltensor;
/*!
* \brief The quantization info, if quantized
*/
ai_quantization_info* quant;
} ai_tensor;
// =======================================================
// get_dltensor
// =======================================================
AI_STATIC AI_INLINE DLTensor* get_dltensor(ai_tensor* tensor) { return &tensor->dltensor; }
// =======================================================
// get_tensor_elts
// =======================================================
AI_STATIC AI_INLINE uint32_t get_tensor_elts(const ai_tensor* tensor) {
const DLTensor* t = &tensor->dltensor;
uint32_t elts = 1;
for (int i = 0; i < t->ndim; ++i) {
elts *= t->shape[i];
}
return elts;
}
// =======================================================
// get_tensor_size
// =======================================================
AI_STATIC AI_INLINE uint32_t get_tensor_size(const ai_tensor* tensor) {
const DLTensor* t = &tensor->dltensor;
uint32_t size = 1;
for (int i = 0; i < t->ndim; ++i) {
size *= t->shape[i];
}
size *= (t->dtype.bits * t->dtype.lanes + 7) / 8;
return size;
}
// =======================================================
// ai_network_info
// =======================================================
typedef struct {
const char* name;
const char* datetime;
const char* revision;
const char* tool_version;
const char* api_version;
uint16_t n_nodes;
uint8_t n_inputs;
uint8_t n_outputs;
uint32_t activations_size;
uint32_t params_size;
ai_ptr activations;
ai_tensor** inputs;
ai_tensor** outputs;
const ai_ptr (*ai_get_params)(void);
ai_status (*ai_create)(const ai_ptr weights, const ai_ptr activations);
ai_status (*ai_destroy)();
ai_status (*ai_run)(ai_tensor* input[], ai_tensor* output[]);
} ai_model_info;
#define AI_MODEL_name(x) (x->name)
#define AI_MODEL_datetime(x) (x->datetime)
#define AI_MODEL_revision(x) (x->revision)
#define AI_MODEL_tool_version(x) (x->tool_version)
#define AI_MODEL_api_version(x) (x->api_version)
#define AI_MODEL_n_nodes(x) (x->n_nodes)
#define AI_MODEL_n_inputs(x) (x->n_inputs)
#define AI_MODEL_n_outputs(x) (x->n_outputs)
#define AI_MODEL_activations_size(x) (x->activations_size)
#define AI_MODEL_params_size(x) (x->params_size)
#define AI_MODEL_inputs(x) (x->inputs)
#define AI_MODEL_outputs(x) (x->outputs)
#define AI_MODEL_activations(x) (x->activations)
// =======================================================
// Iterator
//
// Usage:
//
// for (ai_models_iterator it = ai_models_iterator_begin();
// it != ai_models_iterator_end();
// it = ai_models_iterator_next(it)) {
// const char * name = ai_models_iterator_value(it);
// }
//
// =======================================================
typedef uint32_t ai_model_iterator;
ai_model_iterator ai_model_iterator_begin();
ai_model_iterator ai_model_iterator_next(ai_model_iterator it);
ai_model_iterator ai_model_iterator_end();
ai_model_info* ai_model_iterator_value(ai_model_iterator it);
// =======================================================
// External Interface
// =======================================================
ai_status ai_create(ai_model_info* nn, ai_ptr activations, ai_handle* handle);
ai_status ai_destroy(ai_handle handle);
const char* ai_get_error(ai_handle handle);
int32_t ai_get_input_size(ai_handle handle);
int32_t ai_get_output_size(ai_handle handle);
ai_tensor* ai_get_input(ai_handle handle, int32_t index);
ai_tensor* ai_get_output(ai_handle handle, int32_t index);
ai_status ai_run(ai_handle handle);
//
// Additional methods
//
const char* ai_get_name(ai_handle handle);
const char* ai_get_datetime(ai_handle handle);
const char* ai_get_revision(ai_handle handle);
const char* ai_get_tool_version(ai_handle handle);
const char* ai_get_api_version(ai_handle handle);
uint32_t ai_get_node_size(ai_handle handle);
uint32_t ai_get_activations_size(ai_handle handle);
uint32_t ai_get_params_size(ai_handle handle);
ai_ptr ai_get_activations(ai_handle handle);
const ai_ptr ai_get_params(ai_handle handle);
//
// Quantization
//
const ai_quantization_info* ai_get_quantization(ai_tensor* tensor);
#endif // TVM_RUNTIME_CRT_CONTRIB_STM32_AI_RUNTIME_API_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/contrib/stm32/crt_config.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/runtime/crt_config.h.template
* \brief Template for CRT configuration, to be modified on each target.
*/
#ifndef TVM_RUNTIME_CRT_CONTRIB_STM32_CRT_CONFIG_H_
#define TVM_RUNTIME_CRT_CONTRIB_STM32_CRT_CONFIG_H_
#include <tvm/runtime/crt/logging.h>
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
#endif // TVM_RUNTIME_CRT_CONTRIB_STM32_CRT_CONFIG_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/contrib/stm32/runtime.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime.c
* \brief A minimal "C" runtime support required by the TVM
* generated C code. Declared in "runtime/c_backend_api.h"
* and "runtime/c_runtime_api.h"
*/
#include <assert.h>
#include <malloc.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/c_backend_api.h>
#include <tvm/runtime/crt/error_codes.h>
static char* g_last_error = NULL;
// ====================================================
// TVMPlatformMemoryAllocate
// ====================================================
tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr) {
#ifdef __arm__
*out_ptr = malloc(num_bytes);
#else // _x86_
*out_ptr = malloc(num_bytes);
#endif
return (*out_ptr == NULL) ? kTvmErrorPlatformNoMemory : kTvmErrorNoError;
}
// ====================================================
// TVMPlatformMemoryFree
// ====================================================
tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev) {
free(ptr);
return kTvmErrorNoError;
}
// ====================================================
// TVMFuncRegisterGlobal
// ====================================================
int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override) { return -1; }
// ====================================================
// TVMPlatformAbort
// ====================================================
void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t code) {
for (;;) {
}
}
// ====================================================
// TVMLogf
// ====================================================
void TVMLogf(const char* msg, ...) { return; }
// ====================================================
// TVMAPISetLastError
// ====================================================
void TVMAPISetLastError(const char* msg) {
if (g_last_error) {
free(g_last_error);
}
uint32_t nbytes = strlen(msg) + 1;
g_last_error = malloc(nbytes);
snprintf(g_last_error, nbytes, "%s", msg);
}
// ====================================================
// TVMGetLastError
// ====================================================
const char* TVMGetLastError(void) {
assert(g_last_error);
return g_last_error;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/crt_config-template.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/runtime/crt_config.h.template
* \brief Template for CRT configuration, to be modified on each target.
*/
#ifndef TVM_RUNTIME_CRT_CRT_CONFIG_TEMPLATE_H_
#define TVM_RUNTIME_CRT_CRT_CONFIG_TEMPLATE_H_
/*! Log level of the CRT runtime */
#define TVM_CRT_LOG_LEVEL TVM_CRT_LOG_LEVEL_DEBUG
/*! Support low-level debugging in MISRA-C runtime */
#define TVM_CRT_DEBUG 0
/*! Maximum supported dimension in NDArray */
#define TVM_CRT_MAX_NDIM 6
/*! Maximum supported arguments in generated functions */
#define TVM_CRT_MAX_ARGS 10
/*! Size of the global function registry, in bytes. */
#define TVM_CRT_GLOBAL_FUNC_REGISTRY_SIZE_BYTES 512
/*! Maximum number of registered modules. */
#define TVM_CRT_MAX_REGISTERED_MODULES 2
/*! Maximum packet size, in bytes, including the length header. */
#define TVM_CRT_MAX_PACKET_SIZE_BYTES 2048
/*! Maximum supported string length in dltype, e.g. "int8", "int16", "float32" */
#define TVM_CRT_MAX_STRLEN_DLTYPE 10
/*! Maximum supported string length in function names */
#define TVM_CRT_MAX_STRLEN_FUNCTION_NAME 120
/*! Maximum supported string length in parameter names */
#define TVM_CRT_MAX_STRLEN_PARAM_NAME 80
/*! \brief Maximum length of a PackedFunc function name. */
#define TVM_CRT_MAX_FUNCTION_NAME_LENGTH_BYTES 30
/*! \brief Enable checks to enforce the stack allocator with a FIFO ordering. Off by default */
// #define TVM_CRT_STACK_ALLOCATOR_ENABLE_FIFO_CHECK
#endif // TVM_RUNTIME_CRT_CRT_CONFIG_TEMPLATE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/graph_executor/graph_executor.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file graph_executor.c
* \brief implement graph executor in pure C
*/
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/internal/graph_executor/graph_executor.h>
#include <tvm/runtime/crt/logging.h>
#include <tvm/runtime/crt/module.h>
#include <tvm/runtime/crt/packed_func.h>
#include <tvm/runtime/crt/page_allocator.h>
#include "crt_config.h"
#ifndef MAX
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif // MAX
uint32_t Shape_Accumulate(int64_t* shape, uint32_t ndim) {
int64_t accum = 1;
uint32_t idx;
for (idx = 0; idx < ndim; idx++) {
if (shape[idx] == 0) {
break;
}
accum *= shape[idx];
}
return accum;
}
int NodeEntry_Load(TVMGraphExecutorNodeEntry* entry, JSONReader* reader) {
int status = 0;
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "invalid json format: failed to parse `node_id`\n");
status = -1;
}
reader->ReadUnsignedInteger(reader, &(entry->node_id));
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "invalid json format: failed to parse `index`\n");
status = -1;
}
reader->ReadUnsignedInteger(reader, &(entry->index));
if (reader->NextArrayItem(reader)) {
reader->ReadUnsignedInteger(reader, &(entry->version));
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format: failed to parse `version`\n");
status = -1;
}
} else {
entry->version = 0;
}
return status;
}
void TVMGraphExecutorNode_LoadAttrs(TVMGraphExecutorNode* node, JSONReader* reader,
TVMOpParam* param) {
int bitmask = 0;
char key[20], value[TVM_CRT_MAX_STRLEN_FUNCTION_NAME];
memset(param, 0, sizeof(TVMOpParam));
memset(key, 0, sizeof(key));
memset(value, 0, sizeof(value));
reader->BeginObject(reader);
while (reader->NextObjectItem(reader, key, sizeof(key))) {
int status = reader->ReadString(reader, value, sizeof(value));
if (status != 0) {
fprintf(stderr, "error reading value for key: %s\n", key);
break;
}
if (!strcmp(key, "func_name")) {
snprintf(param->func_name, sizeof(value), "%s", value);
bitmask |= 1;
} else if (!strcmp(key, "num_inputs")) {
param->num_inputs = strtoul(value, 0, 10);
bitmask |= 2;
} else if (!strcmp(key, "num_outputs")) {
param->num_outputs = strtoul(value, 0, 10);
bitmask |= 4;
} else if (!strcmp(key, "flatten_data")) {
param->flatten_data = strtoul(value, 0, 10);
bitmask |= 8;
#if TVM_CRT_DEBUG
} else {
printf("do not support key %s", key);
#endif // TVM_CRT_DEBUG
}
}
if (bitmask != (1 | 2 | 4 | 8)) {
fprintf(stderr, "invalid format\n");
}
}
int TVMGraphExecutorNode_Load(TVMGraphExecutorNode* node, JSONReader* reader) {
int status = 0;
reader->BeginObject(reader);
int bitmask = 0;
char key[20];
while (reader->NextObjectItem(reader, key, sizeof(key))) {
if (!strcmp(key, "op")) {
status = reader->ReadString(reader, node->op_type, sizeof(node->op_type));
if (status != 0) {
fprintf(stderr, "error reading op\n");
break;
}
bitmask |= 1;
} else if (!strcmp(key, "name")) {
status = reader->ReadString(reader, node->name, sizeof(node->name));
if (status != 0) {
fprintf(stderr, "error reading name\n");
break;
}
bitmask |= 2;
} else if (!strcmp(key, "inputs")) {
size_t count = 0;
reader->BeginArray(reader);
size_t num_inputs = 0;
if (reader->ArrayLength(reader, &num_inputs) != 0) {
fprintf(stderr, "error determining inputs array length\n");
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(
sizeof(TVMGraphExecutorNodeEntry) * num_inputs, dev, (void**)&node->inputs);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
while (reader->NextArrayItem(reader)) {
if (count == num_inputs) {
fprintf(stderr, "too many array elements\n");
return -1;
}
TVMGraphExecutorNodeEntry* inputs = node->inputs + count;
reader->BeginArray(reader);
if (!reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
status = -1;
break;
}
reader->ReadUnsignedInteger(reader, &(inputs->node_id));
if (!reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
status = -1;
break;
}
reader->ReadUnsignedInteger(reader, &(inputs->index));
if (reader->NextArrayItem(reader)) {
reader->ReadUnsignedInteger(reader, &(inputs->version));
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
status = -1;
break;
}
} else {
inputs->version = 0;
}
count++;
}
node->inputs_count = count;
bitmask |= 4;
} else if (!strcmp(key, "attr") || !strcmp(key, "attrs")) {
TVMOpParam param;
TVMGraphExecutorNode_LoadAttrs(node, reader, ¶m);
memcpy(&node->param, ¶m, sizeof(param));
} else if (!strcmp(key, "control_deps")) {
fprintf(stderr, "do not support key %s", key);
status = -1;
} else {
fprintf(stderr, "do not support key %s", key);
status = -1;
}
if (status != 0) {
break;
}
}
if (bitmask != (1 | 2 | 4)) {
fprintf(stderr, "invalid format\n");
status = -1;
}
return status;
}
TVMGraphExecutorNode TVMGraphExecutorNodeCreate() {
TVMGraphExecutorNode node;
memset(&node, 0, sizeof(TVMGraphExecutorNode));
node.LoadAttrs = TVMGraphExecutorNode_LoadAttrs;
node.Load = TVMGraphExecutorNode_Load;
return node;
}
int TVMGraphExecutorNodeRelease(TVMGraphExecutorNode* node) {
if (!node) {
return 0;
}
if (node->inputs) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(node->inputs, dev);
node->inputs = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
return 0;
}
int TVMGraphExecutorGraphAttr_Load(TVMGraphExecutorGraphAttr* attr, JSONReader* reader) {
int status = 0;
int bitmask = 0;
char key[16], type[16];
uint32_t storage_id_count = 0;
uint32_t dltype_count = 0;
uint32_t shape_count = 0;
uint32_t device_index_count = 0;
reader->BeginObject(reader);
while (reader->NextObjectItem(reader, key, sizeof(key))) {
if (!strcmp(key, "dltype")) {
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
status = reader->ReadString(reader, type, sizeof(type));
if (status != 0) {
fprintf(stderr, "error reading dltype type\n");
break;
}
if (strcmp(type, "list_str")) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_str length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(TVM_CRT_MAX_STRLEN_DLTYPE * num_items, dev,
(void**)&attr->dltype);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
dltype_count = 0;
while (reader->NextArrayItem(reader)) {
if (dltype_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
status = reader->ReadString(reader, attr->dltype + dltype_count * TVM_CRT_MAX_STRLEN_DLTYPE,
TVM_CRT_MAX_STRLEN_DLTYPE);
if (status != 0) {
fprintf(stderr, "error reading dltype array item");
break;
}
dltype_count++;
}
attr->dltype_count = dltype_count;
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
bitmask |= 1;
} else if (!strcmp(key, "storage_id")) {
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
status = reader->ReadString(reader, type, sizeof(type));
if (status != 0) {
fprintf(stderr, "error reading device_index array item");
}
if (strcmp(type, "list_int")) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_str length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err =
TVMPlatformMemoryAllocate(sizeof(uint32_t) * num_items, dev, (void**)&attr->storage_id);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
storage_id_count = 0;
while (reader->NextArrayItem(reader)) {
if (storage_id_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
reader->ReadUnsignedInteger(reader, &(attr->storage_id[storage_id_count]));
storage_id_count++;
}
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
bitmask |= 2;
} else if (!strcmp(key, "shape")) {
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
status = reader->ReadString(reader, type, sizeof(type));
if (status != 0) {
fprintf(stderr, "error reading shape array item\n");
break;
}
if (strcmp(type, "list_shape")) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_str length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(
sizeof(int64_t) * TVM_CRT_MAX_NDIM * num_items, dev, (void**)&attr->shape);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
err = TVMPlatformMemoryAllocate(sizeof(uint32_t) * num_items, dev, (void**)&attr->ndim);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
shape_count = 0;
while (reader->NextArrayItem(reader)) {
if (shape_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
reader->BeginArray(reader);
int64_t* attr_shape_ptr = attr->shape + shape_count * TVM_CRT_MAX_NDIM;
reader->ReadInteger(reader, attr_shape_ptr + 0);
uint32_t ndim = 1;
if (reader->NextArrayItem(reader)) {
for (ndim = 1; ndim < TVM_CRT_MAX_NDIM; ndim++) {
if (reader->NextArrayItem(reader)) {
reader->ReadInteger(reader, attr_shape_ptr + ndim);
} else {
break;
}
}
if (ndim == TVM_CRT_MAX_NDIM) {
reader->NextArrayItem(reader);
}
}
attr->ndim[shape_count] = ndim;
shape_count++;
}
attr->shape_count = shape_count;
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
bitmask |= 4;
} else if (!strcmp(key, "device_index")) {
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
status = reader->ReadString(reader, type, sizeof(type));
if (status != 0) {
fprintf(stderr, "error reading device_index array item");
break;
}
if (strcmp(type, "list_int")) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_int length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err =
TVMPlatformMemoryAllocate(sizeof(uint32_t) * num_items, dev, (void**)&attr->device_index);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
device_index_count = 0;
while (reader->NextArrayItem(reader)) {
if (device_index_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
reader->ReadUnsignedInteger(reader, &(attr->device_index[device_index_count]));
device_index_count++;
}
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
} else {
reader->BeginArray(reader);
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
reader->ReadString(reader, type, sizeof(type));
if (!strcmp(type, "list_int")) {
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
uint32_t temp_count = 0;
reader->BeginArray(reader);
while (reader->NextArrayItem(reader)) {
uint32_t temp;
reader->ReadUnsignedInteger(reader, &temp);
temp_count++;
}
} else if (!strcmp(type, "size_t")) {
if (!(reader->NextArrayItem(reader))) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
uint32_t temp;
reader->ReadUnsignedInteger(reader, &temp);
} else {
fprintf(stderr, "cannot skip graph attr %s", key);
status = -1;
break;
}
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "Invalid json format\n");
status = -1;
break;
}
}
}
if (bitmask != (1 | 2 | 4)) {
fprintf(stderr, "invalid format\n");
status = -1;
}
return status;
}
int TVMGraphExecutorGraphAttr_Release(TVMGraphExecutorGraphAttr* attr) {
if (!attr) {
return 0;
}
if (attr->storage_id) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(attr->storage_id, dev);
attr->storage_id = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
if (attr->device_index) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(attr->device_index, dev);
attr->device_index = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
if (attr->dltype) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(attr->dltype, dev);
attr->dltype = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
if (attr->shape) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(attr->shape, dev);
attr->shape = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
if (attr->ndim) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(attr->ndim, dev);
attr->ndim = 0;
if (err != kTvmErrorNoError) {
return -1;
}
}
return 0;
}
int TVMGraphExecutor_Load(TVMGraphExecutor* executor, JSONReader* reader) {
int status = 0;
reader->BeginObject(reader);
int bitmask = 0;
char key[20];
while (reader->NextObjectItem(reader, key, sizeof(key))) {
if (!strcmp(key, "nodes")) {
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_int length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(TVMGraphExecutorNode) * num_items, dev,
(void**)&executor->nodes);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
while (reader->NextArrayItem(reader)) {
if (executor->nodes_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
TVMGraphExecutorNode* node = executor->nodes + executor->nodes_count;
status = TVMGraphExecutorNode_Load(node, reader);
if (status != 0) {
fprintf(stderr, "failed to load an element in `nodes` field in graph executor node.\n");
break;
#if TVM_CRT_DEBUG
} else {
printf("loading: node (%u) %s loaded.\n", executor->nodes_count, node->name);
#endif // TVM_CRT_DEBUG
}
executor->nodes_count++;
}
bitmask |= 1;
} else if (!strcmp(key, "arg_nodes")) {
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_int length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(uint32_t) * num_items, dev,
(void**)&executor->input_nodes);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
while (reader->NextArrayItem(reader)) {
if (executor->input_nodes_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
uint32_t* node = executor->input_nodes + executor->input_nodes_count;
reader->ReadUnsignedInteger(reader, node);
executor->input_nodes_count++;
}
bitmask |= 2;
} else if (!strcmp(key, "node_row_ptr")) {
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_int length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(uint32_t) * num_items, dev,
(void**)&executor->node_row_ptr);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
while (reader->NextArrayItem(reader)) {
if (executor->node_row_ptr_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
uint32_t count = executor->node_row_ptr_count;
uint32_t* node = executor->node_row_ptr + count;
reader->ReadUnsignedInteger(reader, node);
executor->node_row_ptr_count++;
}
bitmask |= 4;
} else if (!strcmp(key, "heads")) {
reader->BeginArray(reader);
size_t num_items = 0;
if (reader->ArrayLength(reader, &num_items) != 0) {
fprintf(stderr, "error determing list_int length\n");
status = -1;
break;
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(TVMGraphExecutorNodeEntry) * num_items,
dev, (void**)&executor->outputs);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
break;
}
while (reader->NextArrayItem(reader)) {
if (executor->outputs_count == num_items) {
fprintf(stderr, "array too big\n");
status = -1;
return status;
}
TVMGraphExecutorNodeEntry* entry = executor->outputs + executor->outputs_count;
status = NodeEntry_Load(entry, reader);
if (status != 0) {
fprintf(stderr, "Fail to load an element in `heads` field in graph executor node.\n");
break;
}
executor->outputs_count++;
}
bitmask |= 8;
} else if (!strcmp(key, "attrs")) {
status = TVMGraphExecutorGraphAttr_Load(&(executor->attrs), reader);
if (status != 0) {
fprintf(stderr, "Fail to load an element in `heads` field in graph executor node.\n");
break;
}
bitmask |= 16;
} else if (!strcmp(key, "metadata")) {
break;
} else {
fprintf(stderr, "key %s is not supported\n", key);
status = -1;
}
if (status != 0) {
break;
}
}
if (!(bitmask == (1 | 2 | 4 | 8 | 16))) {
fprintf(stderr, "invalid format\n");
status = -1;
}
return status;
}
uint32_t TVMGraphExecutor_GetEntryId(TVMGraphExecutor* executor, uint32_t nid, uint32_t index) {
return executor->node_row_ptr[nid] + index;
}
/*!
* \brief Get the number of input tensors allocated.
* \param executor The graph executor.
* \return the number of input tensors allocated.
*/
int TVMGraphExecutor_GetNumInputs(TVMGraphExecutor* executor) {
return executor->input_nodes_count;
}
/*!
* \brief Get the input index given the name of input.
* \param executor The graph executor.
* \param name The name of the input.
* \return The index of input.
*/
int TVMGraphExecutor_GetInputIndex(TVMGraphExecutor* executor, const char* name) {
uint32_t i;
int32_t rv = -1;
for (i = 0; i < executor->input_nodes_count; ++i) {
uint32_t nid = executor->input_nodes[i];
if (!strcmp(executor->nodes[nid].name, name)) {
rv = i;
break;
}
}
CHECK_GE(rv, 0, "cannot find '%s' among input.", name);
return rv;
}
/*!
* \brief set input to the graph based on name.
* \param executor The graph executor.
* \param name The name of the input.
* \param data_in The input data.
*/
void TVMGraphExecutor_SetInput(TVMGraphExecutor* executor, const char* name, DLTensor* data_in) {
uint32_t index = TVMGraphExecutor_GetInputIndex(executor, name);
if (index >= executor->input_nodes_count) {
fprintf(stderr, "given index is greater than num of input nodes.\n");
}
uint32_t eid = TVMGraphExecutor_GetEntryId(executor, executor->input_nodes[index], 0);
executor->data_entry[eid].dl_tensor.data = data_in->data;
}
/*!
* \brief Load parameters from parameter blob.
* \param executor The graph executor.
* \param param_blob A binary blob of parameter.
* \param param_size The parameter size.
* \return The result of this function execution.
*/
int TVMGraphExecutor_LoadParams(TVMGraphExecutor* executor, const char* param_blob,
const uint32_t param_size) {
int status = 0;
const char* bptr = param_blob;
uint64_t header, reserved;
memcpy(&header, bptr, sizeof(header));
bptr += sizeof(header);
if (header != kTVMNDArrayListMagic) {
fprintf(stderr, "Invalid parameters file format");
status = -1;
}
memcpy(&reserved, bptr, sizeof(reserved));
bptr += sizeof(reserved);
// read names
char* names = NULL;
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(
TVM_CRT_MAX_STRLEN_PARAM_NAME * executor->nodes_count, dev, (void**)&names);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
return status;
}
memset(names, 0, TVM_CRT_MAX_STRLEN_PARAM_NAME * executor->nodes_count);
uint64_t names_count;
int idx;
memcpy(&names_count, bptr, sizeof(names_count));
bptr += sizeof(names_count);
for (idx = 0; idx < names_count; idx++) {
uint64_t name_length;
memcpy(&name_length, bptr, sizeof(name_length));
bptr += sizeof(name_length);
if (name_length >= TVM_CRT_MAX_STRLEN_PARAM_NAME) {
fprintf(stderr, "Error: function name longer than expected.\n");
status = -1;
}
memcpy(names + TVM_CRT_MAX_STRLEN_PARAM_NAME * idx, bptr, name_length);
bptr += name_length;
}
// read sizes
uint64_t sz;
memcpy(&sz, bptr, sizeof(sz));
bptr += sizeof(sz);
uint32_t size = sz;
if (size != names_count) {
fprintf(stderr, "Invalid parameters file format\n");
status = -1;
}
for (idx = 0; idx < size; idx++) {
int32_t in_idx =
TVMGraphExecutor_GetInputIndex(executor, names + TVM_CRT_MAX_STRLEN_PARAM_NAME * idx);
CHECK_GT(in_idx, 0, "Found param for non-existent input: %s\n",
names + TVM_CRT_MAX_STRLEN_PARAM_NAME * idx);
uint32_t eid = TVMGraphExecutor_GetEntryId(executor, executor->input_nodes[in_idx], 0);
if (!(eid < executor->data_entry_count)) {
fprintf(stderr, "`entry_id`=%d is greater than expected(%d).\n", eid,
executor->data_entry_count);
status = -1;
}
if (executor->data_entry[eid].dl_tensor.shape) {
err = TVMPlatformMemoryFree(executor->data_entry[eid].dl_tensor.shape, dev);
if (err != kTvmErrorNoError) {
status = -1;
}
executor->data_entry[eid].dl_tensor.shape = 0;
}
if (executor->data_entry[eid].dl_tensor.data) {
err = TVMPlatformMemoryFree(executor->data_entry[eid].dl_tensor.data, dev);
if (err != kTvmErrorNoError) {
status = -1;
}
executor->data_entry[eid].dl_tensor.data = 0;
}
status |= TVMNDArray_Load(&(executor->data_entry[eid]), &bptr);
#if TVM_CRT_DEBUG
TVMNDArray* entry = &(executor->data_entry[eid]);
printf("loading: param %s loaded, in_idx=%d, eid=%d, ndim=%d, data[0]=%f\n",
names + TVM_CRT_MAX_STRLEN_PARAM_NAME * idx, in_idx, eid, entry->dl_tensor.ndim,
((float*)entry->dl_tensor.data)[0]); // NOLINT(*)
#endif // TVM_CRT_DEBUG
}
// Release memory
err = TVMPlatformMemoryFree(names, dev);
if (err != kTvmErrorNoError) {
status = -1;
return status;
}
return status;
}
/*!
* \brief Run all the operations one by one.
* \param executor The graph executor.
*/
void TVMGraphExecutor_Run(TVMGraphExecutor* executor) {
// setup the array and requirements.
uint32_t idx;
for (idx = 0; idx < executor->op_execs_count; ++idx) {
if (executor->op_execs[idx].fexec) {
#if TVM_CRT_DEBUG
printf("calling: %s (%d)\n", executor->op_execs[idx].name, idx);
#endif // TVM_CRT_DEBUG
executor->op_execs[idx].Call(&(executor->op_execs[idx]));
}
}
}
/*!
* \brief Get the number of output tensors allocated.
* \param executor The graph executor.
* \return the number of output tensors allocated.
*/
int TVMGraphExecutor_GetNumOutputs(TVMGraphExecutor* executor) { return executor->outputs_count; }
int TVMGraphExecutor_GetOutput(TVMGraphExecutor* executor, const int32_t idx, DLTensor* out) {
int status = 0;
uint32_t nid = executor->outputs[idx].node_id;
uint32_t index = executor->outputs[idx].index;
uint32_t eid = TVMGraphExecutor_GetEntryId(executor, nid, index);
// copy data section to allocated output tensor
int32_t elem_bytes = out->dtype.bits / 8;
int64_t size = Shape_Accumulate(out->shape, out->ndim);
DLTensor* tensor = &(executor->data_entry[eid].dl_tensor);
CHECK(out->ndim == tensor->ndim);
CHECK(out->dtype.bits == tensor->dtype.bits);
CHECK(Shape_Accumulate(out->shape, out->ndim) == Shape_Accumulate(tensor->shape, tensor->ndim));
memcpy(out->data, tensor->data, size * elem_bytes);
return status;
}
int TVMGraphExecutor_SetupStorage(TVMGraphExecutor* executor) {
TVMPackedFunc lookup_linked_param;
int lookup_linked_param_valid;
uint32_t idx;
{
TVMArgs temp_args;
temp_args.values[0].v_int64 = 0;
temp_args.tcodes[0] = kTVMArgInt;
temp_args.values_count = 1;
lookup_linked_param_valid =
(TVMPackedFunc_InitModuleFunc(&lookup_linked_param, executor->module_handle,
"_lookup_linked_param", &temp_args) == 0);
}
// Grab saved optimization plan from graph.
TVMGraphExecutorGraphAttr* attrs = &(executor->attrs);
DLDataType* vtype = NULL;
DLDevice alloc_dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(DLDataType) * attrs->dltype_count,
alloc_dev, (void**)&vtype);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
for (idx = 0; idx < attrs->dltype_count; idx++) {
vtype[idx] = String2DLDataType(attrs->dltype + idx * TVM_CRT_MAX_STRLEN_DLTYPE);
}
// Size and device type of each storage pool entry.
TVMGraphExecutorPoolEntry* pool_entry = NULL;
err = TVMPlatformMemoryAllocate(sizeof(TVMGraphExecutorPoolEntry) * executor->nodes_count,
alloc_dev, (void**)&pool_entry);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
memset(pool_entry, 0, sizeof(TVMGraphExecutorPoolEntry) * executor->nodes_count);
uint32_t pool_entry_count = 0;
// Find the maximum space size.
for (idx = 0; idx < attrs->shape_count; idx++) {
int storage_id = attrs->storage_id[idx];
// Use the fallback device if no device index is available.
int device_type = executor->devices[0].device_type;
uint32_t size = Shape_Accumulate(attrs->shape + idx * TVM_CRT_MAX_NDIM, attrs->ndim[idx]);
DLDataType t = vtype[idx];
uint32_t bits = t.bits * t.lanes;
size_t bytes = ((bits + 7U) / 8U) * size;
uint32_t sid = storage_id;
if (sid >= pool_entry_count) {
pool_entry_count = sid + 1;
}
pool_entry[sid].entry_id = idx;
pool_entry[sid].size = MAX(pool_entry[sid].size, bytes);
pool_entry[sid].device_type = device_type;
}
// Allocate the space.
err = TVMPlatformMemoryAllocate(sizeof(TVMGraphExecutorStorageEntry) * pool_entry_count,
alloc_dev, (void**)&executor->storage_pool);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
for (idx = 0; idx < pool_entry_count; idx++) {
TVMGraphExecutorPoolEntry pit = pool_entry[idx];
DLDevice dev = executor->devices[0];
uint8_t did_find_linked_param = 0;
if (lookup_linked_param_valid) {
lookup_linked_param.args.values[0].v_int64 = idx;
CHECK_EQ(lookup_linked_param.Call(&lookup_linked_param), 0, "lookup_linked_param");
void* linked_param_data = lookup_linked_param.ret_value.values[0].v_handle;
if (linked_param_data != NULL) {
executor->storage_pool[executor->storage_pool_count].is_linked_param = 1;
DLTensor* tensor = &executor->storage_pool[executor->storage_pool_count].array.dl_tensor;
tensor->data = linked_param_data;
tensor->device = dev;
tensor->ndim = attrs->ndim[pit.entry_id];
tensor->shape = attrs->shape + idx * TVM_CRT_MAX_NDIM;
tensor->strides = NULL;
tensor->byte_offset = 0;
did_find_linked_param = 1;
}
}
if (did_find_linked_param == 0) {
DLDataType dtype = {kDLFloat, 32, 1};
int64_t shape[TVM_CRT_MAX_NDIM] = {
0,
};
shape[0] = (pit.size + 3) / 4;
int status = TVMNDArray_Empty(1, shape, dtype, dev,
&executor->storage_pool[executor->storage_pool_count].array);
CHECK_EQ(status, 0, "fail to create storage_pool with idx=%d\n", idx);
}
executor->storage_pool_count++;
}
// Assign the pooled entries. A unified memory pool is used to simplify
// memory assignment for each node entry. The allocated memory on each device
// is mapped to this pool.
executor->data_entry_count = executor->node_row_ptr[executor->node_row_ptr_count - 1];
err = TVMPlatformMemoryAllocate(sizeof(TVMNDArray) * executor->data_entry_count, alloc_dev,
(void**)&executor->data_entry);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
for (idx = 0; idx < executor->data_entry_count; ++idx) {
uint32_t storage_id = attrs->storage_id[idx];
CHECK(storage_id < executor->storage_pool_count);
int status = TVMNDArray_CreateView(&(executor->storage_pool[storage_id].array),
attrs->shape + idx * TVM_CRT_MAX_NDIM, attrs->ndim[idx],
vtype[idx], &executor->data_entry[idx]);
CHECK_EQ(status, 0, "fail to create for node with idx=%d, storage_id=%u\n", idx, storage_id);
TVMNDArray_IncrementReference(&executor->data_entry[idx]);
}
// Release memory
err = TVMPlatformMemoryFree(vtype, alloc_dev);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory free error: %08x", err);
return err;
}
err = TVMPlatformMemoryFree(pool_entry, alloc_dev);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory free error: %08x", err);
return -1;
}
return 0;
}
int TVMGraphExecutor_SetupOpExecs(TVMGraphExecutor* executor) {
int status = 0;
uint32_t nid, idx;
executor->op_execs_count = executor->nodes_count;
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(TVMPackedFunc) * executor->op_execs_count,
dev, (void**)&executor->op_execs);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
status = -1;
return status;
}
for (nid = 0; nid < executor->nodes_count; nid++) {
const TVMGraphExecutorNode* inode = executor->nodes + nid;
if (strcmp(inode->op_type, "null")) {
DLTensorPtr args[TVM_CRT_MAX_ARGS];
uint32_t args_count = 0;
for (idx = 0; idx < inode->inputs_count; idx++) {
const TVMGraphExecutorNodeEntry* entry = inode->inputs + idx;
uint32_t eid = TVMGraphExecutor_GetEntryId(executor, entry->node_id, entry->index);
args[idx] = &(executor->data_entry[eid].dl_tensor);
args_count++;
}
for (idx = 0; idx < inode->param.num_outputs; idx++) {
uint32_t eid = TVMGraphExecutor_GetEntryId(executor, nid, idx);
args[args_count] = &(executor->data_entry[eid].dl_tensor);
args_count++;
}
if (strcmp(inode->op_type, "tvm_op")) {
fprintf(stderr, "Can only take tvm_op as op, but \"%s\" is found.\n", inode->op_type);
status = -1;
break;
}
if (args_count >= TVM_CRT_MAX_ARGS) {
fprintf(stderr, "too many arguments: expected less than %d args, but got %d.\n",
TVM_CRT_MAX_ARGS, args_count);
status = -1;
break;
}
#if TVM_CRT_DEBUG
printf("tvm_op: creating %s with node_id=%d\n", inode->param.func_name, nid);
#endif // TVM_CRT_DEBUG
TVMPackedFunc pf;
TVMGraphExecutor_CreateTVMOp(executor, &(inode->param), args, args_count, &pf);
executor->op_execs[nid] = pf;
} else {
memset(&executor->op_execs[nid], 0, sizeof(TVMPackedFunc));
}
}
return status;
}
typedef struct TVMOpArgs {
DLTensor args[TVM_CRT_MAX_ARGS];
uint32_t args_count;
TVMValue arg_values[TVM_CRT_MAX_ARGS];
uint32_t arg_values_count;
uint32_t arg_tcodes[TVM_CRT_MAX_ARGS];
uint32_t arg_tcodes_count;
int64_t shape_data[TVM_CRT_MAX_ARGS];
uint32_t shape_data_count;
} TVMOpArgs;
int32_t TVMGraphExecutor_CreateTVMOp(TVMGraphExecutor* executor, const TVMOpParam* param,
DLTensorPtr* args, const uint32_t args_count,
TVMPackedFunc* pf) {
int status = 0;
uint32_t idx;
TVMOpArgs arg_ptr;
memset(&arg_ptr, 0, sizeof(TVMOpArgs));
arg_ptr.args_count = args_count;
if (param->flatten_data) {
arg_ptr.shape_data_count = arg_ptr.args_count;
}
for (idx = 0; idx < arg_ptr.args_count; ++idx) {
TVMValue v;
memset(&v, 0, sizeof(v));
DLTensor* t = &(arg_ptr.args[idx]);
/* v.v_handle = &((*args)[idx]); */
v.v_handle = args[idx];
arg_ptr.arg_values[idx] = v;
arg_ptr.arg_values_count++;
arg_ptr.arg_tcodes[idx] = kTVMNDArrayHandle;
arg_ptr.arg_tcodes_count++;
if (param->flatten_data) {
arg_ptr.shape_data[idx] = Shape_Accumulate(t->shape, t->ndim);
t->ndim = 1;
t->shape[0] = arg_ptr.shape_data[idx];
}
}
if (!strcmp(param->func_name, "__nop") || !strcmp(param->func_name, "__copy")) {
fprintf(stderr, "%s function is not yet supported.", param->func_name);
status = -1;
}
TVMArgs targs = TVMArgs_Create(arg_ptr.arg_values, arg_ptr.arg_tcodes, arg_ptr.arg_values_count);
status = TVMPackedFunc_InitModuleFunc(pf, executor->module_handle, param->func_name, &targs);
return status;
}
/*!
* \brief Initialize the graph executor with graph and device.
* \param graph_json The execution graph.
* \param module_handle The module containing the compiled functions for the host
* processor.
* \param devs The device of the host and devices where graph nodes will be
* executed on.
* \return 0 on success.
*/
int TVMGraphExecutor_Init(TVMGraphExecutor* executor, const char* graph_json,
TVMModuleHandle module_handle, const DLDevice* devs) {
JSONReader reader;
tvm_crt_error_t err = JSONReader_Create(graph_json, &reader);
if (err != kTvmErrorNoError) {
return -1;
}
TVMGraphExecutor_Load(executor, &reader);
err = JSONReader_Release(&reader);
if (err != kTvmErrorNoError) {
return -1;
}
executor->module_handle = module_handle;
executor->devices[0] = devs[0];
int status;
status = TVMGraphExecutor_SetupStorage(executor);
if (status != 0) {
return status;
}
status = TVMGraphExecutor_SetupOpExecs(executor);
return status;
}
int TVMGraphExecutor_Create(const char* sym_json, TVMModuleHandle module_handle,
const DLDevice* devs, TVMGraphExecutor** executor) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(TVMGraphExecutor), dev, (void**)executor);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
}
memset(*executor, 0, sizeof(TVMGraphExecutor));
// init
return TVMGraphExecutor_Init(*executor, sym_json, module_handle, devs);
}
int TVMGraphExecutor_Release(TVMGraphExecutor** pptr) {
int status = 0;
int32_t idx;
TVMGraphExecutor* executor = (TVMGraphExecutor*)(*pptr);
for (idx = 0; idx < executor->nodes_count; ++idx) {
status = TVMGraphExecutorNodeRelease(&(executor->nodes[idx]));
if (status != 0) {
return status;
}
}
DLDevice dev = {kDLCPU, 0};
status = TVMPlatformMemoryFree(executor->nodes, dev);
if (status != 0) {
return status;
}
status = TVMGraphExecutorGraphAttr_Release(&(executor->attrs));
if (status != 0) {
return status;
}
for (idx = 0; idx < executor->storage_pool_count; ++idx) {
if (executor->storage_pool[idx].is_linked_param == 0) {
status = TVMNDArray_Release(&(executor->storage_pool[idx]).array);
if (status != 0) {
return status;
}
}
}
for (idx = 0; idx < executor->data_entry_count; ++idx) {
status = TVMPlatformMemoryFree(executor->data_entry[idx].dl_tensor.shape, dev);
if (status != 0) {
return status;
}
}
status = TVMPlatformMemoryFree(executor->input_nodes, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(executor->node_row_ptr, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(executor->outputs, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(executor->storage_pool, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(executor->data_entry, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(executor->op_execs, dev);
if (status != 0) {
return status;
}
status = TVMPlatformMemoryFree(*pptr, dev);
if (status != 0) {
return status;
}
if (g_fexecs) {
status = TVMPlatformMemoryFree(g_fexecs, dev);
g_fexecs = 0;
if (status != 0) {
return status;
}
}
return 0;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/graph_executor/load_json.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file load_json.c
* \brief Load graph from JSON file.
*/
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/crt/internal/graph_executor/load_json.h>
#include <tvm/runtime/crt/page_allocator.h>
#include <tvm/runtime/crt/platform.h>
// the node entry structure in serialized format
typedef struct JSONNodeEntry {
uint32_t node_id;
uint32_t index;
uint32_t version;
void (*Load)(struct JSONNodeEntry* entry, JSONReader* reader);
} JSONNodeEntry;
void JSONNodeEntryLoad(JSONNodeEntry* entry, JSONReader* reader) {
reader->BeginArray(reader);
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
}
reader->ReadUnsignedInteger(reader, &(entry->node_id));
if (reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
}
reader->ReadUnsignedInteger(reader, &(entry->index));
if (reader->NextArrayItem(reader)) {
reader->ReadUnsignedInteger(reader, &(entry->version));
if (!reader->NextArrayItem(reader)) {
fprintf(stderr, "invalid json format\n");
}
} else {
entry->version = 0;
}
}
// implementation of Seq class
void SeqPush(Seq* seq, uint32_t src) {
if (seq->size >= seq->allocated) {
printf("seq too large.\n");
}
seq->data[seq->size] = src;
seq->size += 1;
}
uint32_t* SeqBack(Seq* seq) {
if (seq->size >= seq->allocated) {
printf("seq too large.\n");
}
return seq->data + (seq->size - 1);
}
void SeqPop(Seq* seq) {
if (seq->size >= seq->allocated) {
printf("seq size is too large.\n");
}
if (seq->size == 0) {
printf("seq size is too small.\n");
}
seq->size -= 1;
}
tvm_crt_error_t SeqCreate(uint64_t len, Seq** seq) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryAllocate(sizeof(Seq), dev, (void**)seq);
if (err != kTvmErrorNoError) {
return err;
}
memset(*seq, 0, sizeof(Seq));
(*seq)->allocated = len;
err = TVMPlatformMemoryAllocate(sizeof(uint32_t) * len, dev, (void**)&(*seq)->data);
if (err != kTvmErrorNoError) {
return err;
}
(*seq)->push_back = SeqPush;
(*seq)->back = SeqBack;
(*seq)->pop_back = SeqPop;
return err;
}
tvm_crt_error_t SeqRelease(Seq* seq) {
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err = TVMPlatformMemoryFree(seq->data, dev);
if (err != kTvmErrorNoError) {
return err;
}
return TVMPlatformMemoryFree(seq, dev);
}
// implementations of JSONReader
/*!
* \brief Takes the next char from the input source.
* \return the next character.
*/
char JSONReader_NextChar(JSONReader* reader) {
char ch = reader->isptr[0];
reader->isptr += 1;
return ch;
}
/*!
* \brief Returns the next char from the input source.
* \return the next character.
*/
char JSONReader_PeekNextChar(JSONReader* reader) { return reader->isptr[0]; }
/*!
* \brief Read next nonspace character.
* \return the next nonspace character.
*/
char JSONReader_NextNonSpace(JSONReader* reader) {
int ch;
do {
ch = reader->NextChar(reader);
if (ch == '\n') {
++(reader->line_count_n_);
}
if (ch == '\r') {
++(reader->line_count_r_);
}
} while (isspace(ch));
return ch;
}
/*!
* \brief Read just before next nonspace but not read that.
* \return the next nonspace character.
*/
char JSONReader_PeekNextNonSpace(JSONReader* reader) {
int ch;
while (1) {
ch = reader->PeekNextChar(reader);
if (ch == '\n') {
++(reader->line_count_n_);
}
if (ch == '\r') {
++(reader->line_count_r_);
}
if (!isspace(ch)) break;
reader->NextChar(reader);
}
return ch;
}
/*!
* \brief Parse next JSON string.
* \param out_str the output string. NULL to merely consume input and discard it.
* \param out_str_size Number of bytes available to write starting from out_str. Includes
* terminating \0.
* \throw tvm::Error when next token is not string
*/
int JSONReader_ReadString(JSONReader* reader, char* out_str, size_t out_str_size) {
int status = 0;
int ch = reader->NextNonSpace(reader);
size_t output_counter = 0;
while (output_counter < out_str_size || out_str == NULL) {
ch = reader->NextChar(reader);
if (ch == '\\') {
char sch = reader->NextChar(reader);
switch (sch) {
case 'r':
out_str[output_counter++] = '\r';
break;
case 'n':
out_str[output_counter++] = '\n';
break;
case '\\':
out_str[output_counter++] = '\\';
break;
case 't':
out_str[output_counter++] = '\t';
break;
case '\"':
out_str[output_counter++] = '\"';
break;
default:
fprintf(stderr, "unknown string escape %c\n", sch);
break;
}
} else {
if (ch == '\"') {
break;
}
if (out_str != NULL) {
out_str[output_counter++] = ch;
}
}
if (output_counter == out_str_size - 1) {
fprintf(stderr, "Error: string size greater than buffer size (%zu).\n", out_str_size);
break;
}
if (ch == EOF || ch == '\r' || ch == '\n') {
fprintf(stderr, "Error at line %zu, Expect \'\"\' but reach end of line\n",
reader->line_count_n_);
break;
}
}
if (out_str != NULL) {
out_str[output_counter] = 0;
}
return status;
}
int JSONReader_ReadUnsignedInteger(JSONReader* reader, unsigned int* out_value) {
int status = 0;
char* endptr;
const char* icstr = reader->isptr;
unsigned int number = strtol(icstr, &endptr, 10);
reader->isptr += endptr - icstr;
*out_value = number;
return status;
}
int JSONReader_ReadInteger(JSONReader* reader, int64_t* out_value) {
int status = 0;
char* endptr;
const char* icstr = reader->isptr;
int64_t number = strtol(icstr, &endptr, 10);
reader->isptr += endptr - icstr;
*out_value = number;
return status;
}
/*!
* \brief Begin parsing an object.
* \code
* string key;
* // value can be any type that is json serializable.
* string value;
* reader->BeginObject();
* while (reader->NextObjectItem(&key)) {
* // do somthing to key value
* reader->Read(&value);
* }
* \endcode
*/
void JSONReader_BeginObject(JSONReader* reader) {
int ch = reader->NextNonSpace(reader);
if (!(ch == '{')) {
fprintf(stderr, "Error at line %zu, Expect \'{\' but got \'%c\'\n", reader->line_count_n_, ch);
}
Seq* scope_counter_ = reader->scope_counter_;
scope_counter_->push_back(scope_counter_, 0);
}
/*!
* \brief Try to move to next object item.
* If this call is successful, user can proceed to call
* reader->Read to read in the value.
* \param out_key the key to the next object.
* \param out_key_size number of bytes available to write at out_key, including terminating \0.
* \return true if the read is successful, false if we are at end of the object.
*/
uint8_t JSONReader_NextObjectItem(JSONReader* reader, char* out_key, size_t out_key_size) {
uint8_t next = 1;
Seq* scope_counter_ = reader->scope_counter_;
if (scope_counter_->back(scope_counter_)[0] != 0) {
int ch = reader->NextNonSpace(reader);
if (ch == EOF) {
next = 0;
} else if (ch == '}') {
next = 0;
} else {
if (ch != ',') {
fprintf(stderr, "Error at line %zu, JSON object expect \'}\' or \',\' but got \'%c\'\n",
reader->line_count_n_, ch);
}
}
} else {
int ch = reader->PeekNextNonSpace(reader);
if (ch == '}') {
reader->NextChar(reader);
next = 0;
}
}
if (!next) {
scope_counter_->pop_back(scope_counter_);
return 0;
} else {
scope_counter_->back(scope_counter_)[0] += 1;
int err = reader->ReadString(reader, out_key, out_key_size);
if (err != 0) {
fprintf(stderr, "error reading key");
return 0;
}
int ch = reader->NextNonSpace(reader);
if (ch != ':') {
fprintf(stderr, "Error at line %zu, Expect \':\' but get \'%c\'\n", reader->line_count_n_,
ch);
}
return 1;
}
}
/*!
* \brief Begin parsing an array.
* \code
* // value can be any type that is json serializable.
* string value;
* reader->BeginArray();
* while (reader->NextArrayItem(&value)) {
* // do somthing to value
* }
* \endcode
*/
void JSONReader_BeginArray(JSONReader* reader) {
int ch = reader->NextNonSpace(reader);
if (ch != '[') {
fprintf(stderr, "Error at line %zu, Expect \'[\' but get \'%c\'\n", reader->line_count_n_, ch);
}
Seq* scope_counter_ = reader->scope_counter_;
scope_counter_->push_back(scope_counter_, 0);
}
/*!
* \brief Try to read the next element in the array.
* If this call is successful, user can proceed to call
* reader->Read to read in the value.
* \return true if the read is successful, false if we are at end of the array.
*/
uint8_t JSONReader_NextArrayItem(JSONReader* reader) {
uint8_t next = 1;
Seq* scope_counter_ = reader->scope_counter_;
if (scope_counter_->back(scope_counter_)[0] != 0) {
int ch = reader->NextNonSpace(reader);
if (ch == EOF) {
next = 0;
} else if (ch == ']') {
next = 0;
} else {
if (ch != ',') {
fprintf(stderr, "Error at line %zu, JSON object expect \']\' or \',\' but got \'%c\'\n",
reader->line_count_n_, ch);
}
}
} else {
int ch = reader->PeekNextNonSpace(reader);
if (ch == ']') {
reader->NextChar(reader);
next = 0;
}
}
if (!next) {
scope_counter_->pop_back(scope_counter_);
return 0;
} else {
scope_counter_->back(scope_counter_)[0] += 1;
return 1;
}
}
/*!
* \brief Determine the remaining length of the array to read.
* \param num_elements Pointer which receives the length.
* \return 0 if successful
*/
int JSONReader_ArrayLength(JSONReader* reader, size_t* num_elements) {
int status = 0;
char* old_isptr = reader->isptr;
size_t old_line_count_r_ = reader->line_count_r_;
size_t old_line_count_n_ = reader->line_count_n_;
int old_scope_counter_back = *reader->scope_counter_->back(reader->scope_counter_);
typedef enum { kObject, kArray } item_type_t;
Seq* scopes;
tvm_crt_error_t err = SeqCreate(10, &scopes);
if (err != kTvmErrorNoError) {
return -1;
}
item_type_t json_item_type = kArray;
*num_elements = 0;
for (;;) {
int has_item = 0;
if (json_item_type == kArray) {
has_item = reader->NextArrayItem(reader);
if (scopes->size == 0 && has_item != 0) {
(*num_elements)++;
}
} else if (json_item_type == kObject) {
has_item = reader->NextObjectItem(reader, NULL, 0);
} else {
status = -1;
break;
}
if (has_item) {
char c = reader->PeekNextNonSpace(reader);
if (c == '"') {
reader->ReadString(reader, NULL, 1024);
} else if (c == '[') {
reader->BeginArray(reader);
scopes->push_back(scopes, json_item_type);
json_item_type = kArray;
} else if (c == '{') {
reader->BeginObject(reader);
scopes->push_back(scopes, json_item_type);
json_item_type = kObject;
} else {
int64_t val;
reader->ReadInteger(reader, &val);
}
} else {
if (scopes->size > 0) {
json_item_type = *scopes->back(scopes);
scopes->pop_back(scopes);
} else {
break;
}
}
}
reader->isptr = old_isptr;
reader->line_count_r_ = old_line_count_r_;
reader->line_count_n_ = old_line_count_n_;
reader->scope_counter_->push_back(reader->scope_counter_, old_scope_counter_back);
err = SeqRelease(scopes);
if (err != kTvmErrorNoError) {
return -1;
}
return status;
}
/*!
* \brief Constructor.
* \param is the input source.
*/
tvm_crt_error_t JSONReader_Create(const char* is, JSONReader* reader) {
memset(reader, 0, sizeof(JSONReader));
tvm_crt_error_t err = SeqCreate(200, &reader->scope_counter_);
if (err != kTvmErrorNoError) {
return err;
}
reader->NextChar = JSONReader_NextChar;
reader->PeekNextChar = JSONReader_PeekNextChar;
reader->NextNonSpace = JSONReader_NextNonSpace;
reader->PeekNextNonSpace = JSONReader_PeekNextNonSpace;
reader->ReadString = JSONReader_ReadString;
reader->ReadUnsignedInteger = JSONReader_ReadUnsignedInteger;
reader->ReadInteger = JSONReader_ReadInteger;
reader->BeginArray = JSONReader_BeginArray;
reader->BeginObject = JSONReader_BeginObject;
reader->NextArrayItem = JSONReader_NextArrayItem;
reader->NextObjectItem = JSONReader_NextObjectItem;
reader->ArrayLength = JSONReader_ArrayLength;
DLDevice dev = {kDLCPU, 0};
err = TVMPlatformMemoryAllocate(strlen(is) + 1, dev, (void**)&reader->is_);
if (err != kTvmErrorNoError) {
return err;
}
memset(reader->is_, 0, strlen(is) + 1);
snprintf(reader->is_, strlen(is) + 1, "%s", is);
reader->isptr = reader->is_;
return err;
}
tvm_crt_error_t JSONReader_Release(JSONReader* reader) {
tvm_crt_error_t err = SeqRelease(reader->scope_counter_);
if (err != kTvmErrorNoError) {
return err;
}
DLDevice dev = {kDLCPU, 0};
return TVMPlatformMemoryFree(reader->is_, dev);
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/graph_executor_module/graph_executor_module.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file graph_executor_module.c
* \brief wrap graph_executor into a TVMModule for use with RPC.
*/
#include <tvm/runtime/crt/func_registry.h>
#include <tvm/runtime/crt/graph_executor.h>
#include <tvm/runtime/crt/graph_executor_module.h>
#include <tvm/runtime/crt/module.h>
#include "tvm/runtime/crt/internal/graph_executor/graph_executor.h"
typedef struct {
TVMModule mod;
TVMGraphExecutor* executor;
} GraphExecutorModule;
static GraphExecutorModule graph_executor;
int32_t TVMGraphExecutorModule_Create(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
if (graph_executor.executor != NULL) {
return kTvmErrorExecutorModuleAlreadyCreated;
}
if (nargs != 4) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMStr || tcodes[1] != kTVMModuleHandle || tcodes[2] != kTVMArgInt ||
tcodes[3] != kTVMArgInt) {
return kTvmErrorFunctionCallWrongArgType;
}
if (args[2].v_int64 != kDLCPU || args[3].v_int64 != 0) {
return kTvmErrorExecutorModuleBadContext;
}
DLDevice dev = {(DLDeviceType)args[2].v_int64, (int)args[3].v_int64};
int ret_value =
TVMGraphExecutor_Create(args[0].v_str, args[1].v_handle, &dev, &graph_executor.executor);
if (ret_value != 0) {
return ret_value;
}
TVMModuleHandle out;
ret_value = TVMModCreateFromCModule(&graph_executor.mod, &out);
if (ret_value != 0) {
ret_tcodes[0] = kTVMNullptr;
TVMGraphExecutor_Release(&graph_executor.executor);
return ret_value;
}
ret_values[0].v_handle = out;
ret_tcodes[0] = kTVMModuleHandle;
return kTvmErrorNoError;
}
int32_t TVMGraphExecutorModule_GetInput(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 1) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMStr) {
return kTvmErrorFunctionCallWrongArgType;
}
int index = TVMGraphExecutor_GetInputIndex(graph_executor.executor, args[0].v_str);
if (index < 0) {
return kTvmErrorExecutorModuleNoSuchInput;
}
uint32_t eid = TVMGraphExecutor_GetEntryId(graph_executor.executor,
graph_executor.executor->input_nodes[index], 0);
TVMNDArray* array = &graph_executor.executor->data_entry[eid];
TVMNDArray_IncrementReference(array);
ret_values[0].v_handle = (void*)(&array->dl_tensor);
ret_tcodes[0] = kTVMNDArrayHandle;
return 0;
}
int32_t TVMGraphExecutorModule_GetInputIndex(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
int index = TVMGraphExecutor_GetInputIndex(graph_executor.executor, args[0].v_str);
if (index < 0) {
return kTvmErrorExecutorModuleNoSuchInput;
}
ret_values[0].v_int64 = index;
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMGraphExecutorModule_GetNumInputs(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
ret_values[0].v_int64 = TVMGraphExecutor_GetNumInputs();
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMGraphExecutorModule_GetNumOutputs(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
ret_values[0].v_int64 = TVMGraphExecutor_GetNumOutputs(graph_executor.executor);
ret_tcodes[0] = kTVMArgInt;
return 0;
}
int32_t TVMGraphExecutorModule_GetOutput(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 1) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMArgInt) {
return kTvmErrorFunctionCallWrongArgType;
}
int output_index = args[0].v_int64;
if (output_index < 0 || output_index > TVMGraphExecutor_GetNumOutputs(graph_executor.executor)) {
return kTvmErrorExecutorModuleNoSuchInput;
}
uint32_t nid = graph_executor.executor->outputs[output_index].node_id;
uint32_t index = graph_executor.executor->outputs[output_index].index;
uint32_t eid = TVMGraphExecutor_GetEntryId(graph_executor.executor, nid, index);
TVMNDArray* array = &graph_executor.executor->data_entry[eid];
TVMNDArray_IncrementReference(array);
ret_values[0].v_handle = (void*)(&array->dl_tensor);
ret_tcodes[0] = kTVMNDArrayHandle;
return 0;
}
int32_t TVMGraphExecutorModule_LoadParams(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 1) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMBytes) {
return kTvmErrorFunctionCallWrongArgType;
}
ret_tcodes[0] = kTVMNullptr;
TVMByteArray* arr = (TVMByteArray*)args[0].v_handle;
return TVMGraphExecutor_LoadParams(graph_executor.executor, arr->data, arr->size);
}
int32_t TVMGraphExecutorModule_Run(TVMValue* args, int* tcodes, int nargs, TVMValue* ret_values,
int* ret_tcodes, void* resource_handle) {
if (nargs != 0) {
return kTvmErrorFunctionCallNumArguments;
}
TVMGraphExecutor_Run(graph_executor.executor);
ret_tcodes[0] = kTVMNullptr;
return 0;
}
int32_t TVMGraphExecutorModule_SetInput(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
if (nargs != 2) {
return kTvmErrorFunctionCallNumArguments;
}
if (tcodes[0] != kTVMStr || tcodes[1] != kTVMDLTensorHandle) {
return kTvmErrorFunctionCallWrongArgType;
}
TVMGraphExecutor_SetInput(graph_executor.executor, args[0].v_str, (DLTensor*)args[1].v_handle);
ret_tcodes[0] = kTVMNullptr;
return 0;
}
int32_t TVMGraphExecutorModule_NotImplemented(TVMValue* args, int* tcodes, int nargs,
TVMValue* ret_values, int* ret_tcodes,
void* resource_handle) {
return kTvmErrorFunctionCallNotImplemented;
}
static const TVMBackendPackedCFunc graph_executor_registry_funcs[] = {
&TVMGraphExecutorModule_GetInput,
&TVMGraphExecutorModule_GetInputIndex,
&TVMGraphExecutorModule_NotImplemented, // get_input_info
&TVMGraphExecutorModule_GetNumInputs,
&TVMGraphExecutorModule_GetNumOutputs,
&TVMGraphExecutorModule_GetOutput,
&TVMGraphExecutorModule_LoadParams,
&TVMGraphExecutorModule_Run,
&TVMGraphExecutorModule_SetInput,
&TVMGraphExecutorModule_NotImplemented, // share_params
};
static const TVMFuncRegistry graph_executor_registry = {
"\x08\0get_input\0"
"get_input_index\0"
"get_input_info\0"
"get_num_inputs\0"
"get_num_outputs\0"
"get_output\0"
"load_params\0"
"run\0"
"set_input\0"
"share_params\0",
graph_executor_registry_funcs};
tvm_crt_error_t TVMGraphExecutorModule_Register() {
graph_executor.mod.registry = &graph_executor_registry;
graph_executor.executor = NULL;
return TVMFuncRegisterGlobal("tvm.graph_executor.create", &TVMGraphExecutorModule_Create, 0);
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/host/microtvm_api_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import fcntl
import os
import os.path
import pathlib
import select
import shutil
import subprocess
import tarfile
import time
import re
from tvm.micro.project_api import server
PROJECT_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not os.path.exists(os.path.join(PROJECT_DIR, MODEL_LIBRARY_FORMAT_RELPATH))
# Used this size to pass most CRT tests in TVM.
MEMORY_SIZE_BYTES = 2 * 1024 * 1024
MAKEFILE_FILENAME = "Makefile"
class Handler(server.ProjectAPIHandler):
BUILD_TARGET = "build/main"
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="host",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else PROJECT_DIR / MODEL_LIBRARY_FORMAT_RELPATH,
project_options=[
server.ProjectOption(
"verbose",
optional=["build"],
type="bool",
default=False,
help="Run make with verbose output",
),
server.ProjectOption(
"memory_size_bytes",
optional=["generate_project"],
type="int",
default=MEMORY_SIZE_BYTES,
help="Sets the value of MEMORY_SIZE_BYTES.",
),
],
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# The build target given to make
BUILD_TARGET = "build/main"
def _populate_makefile(
self,
makefile_template_path: pathlib.Path,
makefile_path: pathlib.Path,
memory_size: int,
):
"""Generate Makefile from template."""
flags = {
"MEMORY_SIZE_BYTES": str(memory_size),
}
regex = re.compile(r"([A-Z_]+) := (<[A-Z_]+>)")
with open(makefile_path, "w") as makefile_f:
with open(makefile_template_path, "r") as makefile_template_f:
for line in makefile_template_f:
m = regex.match(line)
if m:
var, token = m.groups()
line = line.replace(token, flags[var])
makefile_f.write(line)
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
# Make project directory.
project_dir.mkdir(parents=True)
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = project_dir / project_model_library_format_path.stem
with tarfile.TarFile(project_model_library_format_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
# Populate CRT.
crt_path = project_dir / "crt"
os.mkdir(crt_path)
for item in self.CRT_COPY_ITEMS:
src_path = standalone_crt_dir / item
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile
self._populate_makefile(
pathlib.Path(__file__).parent / f"{MAKEFILE_FILENAME}.template",
project_dir / MAKEFILE_FILENAME,
options.get("memory_size_bytes", MEMORY_SIZE_BYTES),
)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
os.path.join(os.path.dirname(__file__), "..", "crt_config-template.h"),
os.path.join(crt_config_dir, "crt_config.h"),
)
# Populate src/
src_dir = os.path.join(project_dir, "src")
os.mkdir(src_dir)
shutil.copy2(
os.path.join(os.path.dirname(__file__), "main.cc"), os.path.join(src_dir, "main.cc")
)
def build(self, options):
args = ["make"]
if options.get("verbose"):
args.append("VERBOSE=1")
args.append(self.BUILD_TARGET)
subprocess.check_call(args, cwd=PROJECT_DIR)
def flash(self, options):
pass # Flashing does nothing on host.
def _set_nonblock(self, fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
def open_transport(self, options):
self._proc = subprocess.Popen(
[self.BUILD_TARGET], stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0
)
self._set_nonblock(self._proc.stdin.fileno())
self._set_nonblock(self._proc.stdout.fileno())
return server.TransportTimeouts(
session_start_retry_timeout_sec=0,
session_start_timeout_sec=0,
session_established_timeout_sec=0,
)
def close_transport(self):
if self._proc is not None:
proc = self._proc
self._proc = None
proc.terminate()
proc.wait()
def _await_ready(self, rlist, wlist, timeout_sec=None, end_time=None):
if timeout_sec is None and end_time is not None:
timeout_sec = max(0, end_time - time.monotonic())
rlist, wlist, xlist = select.select(rlist, wlist, rlist + wlist, timeout_sec)
if not rlist and not wlist and not xlist:
raise server.IoTimeoutError()
return True
def read_transport(self, n, timeout_sec):
if self._proc is None:
raise server.TransportClosedError()
fd = self._proc.stdout.fileno()
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
try:
self._await_ready([fd], [], end_time=end_time)
to_return = os.read(fd, n)
except BrokenPipeError:
to_return = 0
if not to_return:
self.disconnect_transport()
raise server.TransportClosedError()
return to_return
def write_transport(self, data, timeout_sec):
if self._proc is None:
raise server.TransportClosedError()
fd = self._proc.stdin.fileno()
end_time = None if timeout_sec is None else time.monotonic() + timeout_sec
data_len = len(data)
while data:
self._await_ready([], [fd], end_time=end_time)
try:
num_written = os.write(fd, data)
except BrokenPipeError:
num_written = 0
if not num_written:
self.disconnect_transport()
raise server.TransportClosedError()
data = data[num_written:]
if __name__ == "__main__":
server.main(Handler())
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/include/tvm/runtime/crt/internal/common/func_registry.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file tvm/runtime/crt/include/tvm/runtime/crt/internal/common/func_registry.h
* \brief Abstract device memory management API
*/
#ifndef TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_FUNC_REGISTRY_H_
#define TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_FUNC_REGISTRY_H_
#ifdef __cplusplus
extern "C" {
#endif
int strcmp_cursor(const char** cursor, const char* name);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_FUNC_REGISTRY_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/include/tvm/runtime/crt/internal/common/ndarray.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file tvm/runtime/crt/include/tvm/runtime/crt/internal/common/ndarray.h
* \brief Abstract device memory management API
*/
#ifndef TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_NDARRAY_H_
#define TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_NDARRAY_H_
#include <dlpack/dlpack.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/c_backend_api.h>
#include <tvm/runtime/c_runtime_api.h>
/*! \brief Magic number for NDArray file */
static const uint64_t kTVMNDArrayMagic = 0xDD5E40F096B4A13F;
/*! \brief Magic number for NDArray list file */
static const uint64_t kTVMNDArrayListMagic = 0xF7E58D4F05049CB7;
typedef struct TVMNDArray {
/*! \brief the actual tensor in DLPack format. NOTE: this must be first element in struct */
DLTensor dl_tensor;
/*! \brief count of references to TVMNDArray to avoid early freeing by host */
uint32_t reference_count;
} TVMNDArray;
int TVMNDArray_Create(int32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLDevice dev,
TVMNDArray* array);
int64_t TVMNDArray_DataSizeBytes(TVMNDArray* array);
int TVMNDArray_RandomFill(TVMNDArray* array);
int TVMNDArray_Empty(int32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLDevice dev,
TVMNDArray* array);
int TVMNDArray_Load(TVMNDArray* ret, const char** strm);
int TVMNDArray_CreateView(TVMNDArray* arr, const tvm_index_t* shape, int32_t ndim, DLDataType dtype,
TVMNDArray* array_view);
void TVMNDArray_IncrementReference(TVMNDArray* arr);
uint32_t TVMNDArray_DecrementReference(TVMNDArray* arr);
int TVMNDArray_Release(TVMNDArray* arr);
#endif // TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_COMMON_NDARRAY_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/include/tvm/runtime/crt/internal/graph_executor/graph_executor.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/crt/include/tvm/runtime/crt/internal/graph_executor/graph_executor.h
* \brief Tiny graph executor that can run graph containing only tvm PackedFunc.
*/
#ifndef TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#define TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <tvm/runtime/crt/graph_executor.h>
#include <tvm/runtime/crt/internal/common/ndarray.h>
#include <tvm/runtime/crt/internal/graph_executor/load_json.h>
#include <tvm/runtime/crt/module.h>
// Memory pool entry.
typedef struct TVMGraphExecutorPoolEntry {
size_t size;
int device_type;
int entry_id;
} TVMGraphExecutorPoolEntry;
// Node entry
typedef struct TVMGraphExecutorNodeEntry {
uint32_t node_id;
uint32_t index;
uint32_t version;
// JSON Loader
void (*Load)(JSONReader* reader);
} TVMGraphExecutorNodeEntry;
// Storage entry.
typedef struct TVMGraphExecutorStorageEntry {
uint8_t is_linked_param;
TVMNDArray array;
} TVMGraphExecutorStorageEntry;
// Node
typedef struct TVMGraphExecutorNode {
// operator type in string
char op_type[16];
// name of the op
char name[TVM_CRT_MAX_STRLEN_FUNCTION_NAME];
// parameters
TVMOpParam param;
// inputs
TVMGraphExecutorNodeEntry* inputs;
// number of inputs
size_t inputs_count;
// control deps
uint32_t control_deps[20];
// JSON Loader
void (*LoadAttrs)(struct TVMGraphExecutorNode* node, JSONReader* reader, TVMOpParam* param);
// JSON Loader
int (*Load)(struct TVMGraphExecutorNode* node, JSONReader* reader);
} TVMGraphExecutorNode;
typedef struct TVMGraphExecutor {
/*! \brief The graph nodes. */
TVMGraphExecutorNode* nodes;
/*! \brief The graph nodes counter. */
uint32_t nodes_count;
/*! \brief The argument nodes. */
uint32_t* input_nodes;
uint32_t input_nodes_count;
/*! \brief Used for quick entry indexing. */
uint32_t* node_row_ptr;
uint32_t node_row_ptr_count;
/*! \brief Output entries. */
TVMGraphExecutorNodeEntry* outputs;
/*! \brief Output entries counter. */
uint32_t outputs_count;
/*! \brief Additional graph attributes. */
TVMGraphExecutorGraphAttr attrs;
/*! \brief The code module that contains both host and device code. */
TVMModuleHandle module_handle;
/*! \brief Execution context of all devices including the host. */
DLDevice devices[1];
uint32_t devices_count;
/*! \brief Common storage pool for all devices. */
TVMGraphExecutorStorageEntry* storage_pool;
uint32_t storage_pool_count;
/*! \brief Data entry of each node. */
TVMNDArray* data_entry;
uint32_t data_entry_count;
/*! \brief Operator on each node. */
TVMPackedFunc* op_execs;
uint32_t op_execs_count;
} TVMGraphExecutor;
typedef DLTensor* DLTensorPtr;
// private functions
uint32_t TVMGraphExecutor_GetEntryId(TVMGraphExecutor* executor, uint32_t nid, uint32_t index);
void TVMGraphExecutor_SetInput(TVMGraphExecutor* executor, const char* name, DLTensor* data_in);
int TVMGraphExecutor_LoadParams(TVMGraphExecutor* executor, const char* param_blob,
const uint32_t param_size);
void TVMGraphExecutor_Run(TVMGraphExecutor* executor);
int TVMGraphExecutor_GetOutput(TVMGraphExecutor* executor, const int32_t idx, DLTensor* out);
int32_t TVMGraphExecutor_CreateTVMOp(TVMGraphExecutor* executor, const TVMOpParam* param,
DLTensorPtr* args, const uint32_t args_count,
TVMPackedFunc* pf);
int TVMGraphExecutor_Load(TVMGraphExecutor* executor, JSONReader* reader);
#ifdef __cplusplus
}
#endif
#endif // TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/include/tvm/runtime/crt/internal/graph_executor/load_json.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file src/runtime/crt/include/tvm/runtime/crt/internal/graph_executor/load_json.h
* \brief Lightweight JSON Reader that read save into C++ data structs.
*/
#ifndef TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_LOAD_JSON_H_
#define TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_LOAD_JSON_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <ctype.h>
#include <inttypes.h>
#include <stdio.h>
#include <tvm/runtime/crt/error_codes.h>
enum {
JSON_READ_TYPE_U8 = 1,
JSON_READ_TYPE_S8 = 2,
JSON_READ_TYPE_U16 = 3,
JSON_READ_TYPE_S16 = 4,
JSON_READ_TYPE_U32 = 5,
JSON_READ_TYPE_S32 = 6,
JSON_READ_TYPE_F32 = 7,
JSON_READ_TYPE_F64 = 8,
JSON_READ_TYPE_GRAPH_EXECUTOR_NODE = 9,
JSON_READ_TYPE_GRAPH_EXECUTOR_NODE_ENTRY = 10,
JSON_READ_TYPE_GRAPH_EXECUTOR_GRAPH_ATTR = 11
};
typedef struct Seq {
uint32_t* data;
uint64_t allocated;
uint32_t size;
void (*push_back)(struct Seq* seq, uint32_t src);
uint32_t* (*back)(struct Seq* seq);
void (*pop_back)(struct Seq* seq);
} Seq;
/*!
* \brief Lightweight JSON Reader to read any STL compositions and structs.
* The user need to know the schema of the
*/
typedef struct JSONReader {
/*! \brief internal reader string */
char* is_;
char* isptr;
/*! \brief "\\r" counter */
size_t line_count_r_;
/*! \brief "\\n" counter */
size_t line_count_n_;
/*!
* \brief record how many element processed in
* current array/object scope.
*/
Seq* scope_counter_;
char (*NextChar)(struct JSONReader* reader);
char (*NextNonSpace)(struct JSONReader* reader);
char (*PeekNextChar)(struct JSONReader* reader);
char (*PeekNextNonSpace)(struct JSONReader* reader);
int (*ReadUnsignedInteger)(struct JSONReader* reader, unsigned int* out_value);
int (*ReadInteger)(struct JSONReader* reader, int64_t* out_value);
int (*ReadString)(struct JSONReader* reader, char* out_str, size_t out_str_size);
void (*BeginArray)(struct JSONReader* reader);
void (*BeginObject)(struct JSONReader* reader);
uint8_t (*NextObjectItem)(struct JSONReader* reader, char* out_key, size_t out_key_size);
uint8_t (*NextArrayItem)(struct JSONReader* reader);
int (*ArrayLength)(struct JSONReader* reader, size_t* num_elements);
} JSONReader;
/*!
* \brief Constructor of JSONReader class
* \param is the input source.
* \param reader Pointer to the JSONReader to initialize.
* \return kTvmErrorNoError on success.
*/
tvm_crt_error_t JSONReader_Create(const char* is, JSONReader* reader);
/*!
* \brief Deallocate dynamic memory used in the JSONReader instance.
* NOTE: this doesn't actually free the passed-in reader itself, just dynamically-allocated members.
* \param reader Pointer to a JSONReader passed to JSONReader_Create.
* \return kTvmErrorNoError on success.
*/
tvm_crt_error_t JSONReader_Release(JSONReader* reader);
#ifdef __cplusplus
}
#endif
#endif // TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_GRAPH_EXECUTOR_LOAD_JSON_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/include/tvm/runtime/crt/internal/memory/page_allocator.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file runtime/crt/include/tvm/runtime/crt/internal/memory/page_allocator.h
* \brief Defines data types and functions used in the internal memory manager.
* Exposed for testing.
*/
#ifndef TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_MEMORY_PAGE_ALLOCATOR_H_
#define TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_MEMORY_PAGE_ALLOCATOR_H_
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/error_codes.h>
#include <tvm/runtime/crt/page_allocator.h>
#include "crt_config.h"
#ifdef __cplusplus
extern "C" {
#endif
/*! \brief A page in the DRAM */
typedef struct Page {
/*! \brief Start location in page table */
tvm_index_t ptable_begin;
/*! \brief The total number of pages */
tvm_index_t num_pages;
/*! \brief Data */
uint8_t* data;
} Page;
// construct a new page
Page PageCreate(uint8_t* memory_pool, size_t page_size_bytes, tvm_index_t ptable_begin,
tvm_index_t num_pages);
typedef struct PageTable {
// Pointer to beginning of memory pool.
uint8_t* memory_pool;
// Size of one page.
size_t page_size_bytes;
Page* page;
size_t max_pages;
size_t num_pages;
void (*resize)(struct PageTable* ptable, size_t size, Page* page);
} PageTable;
typedef struct PageEntry {
uint8_t* addr;
Page page;
} PageEntry;
typedef struct TLB {
PageEntry* entries;
size_t max_pages;
uint32_t num_pages;
void (*set)(struct TLB* tlb, uint8_t* data, Page* page);
PageEntry* (*find)(struct TLB* tlb, uint8_t* data);
} TLB;
typedef struct IndexedEntry {
tvm_index_t index;
Page page;
} IndexedEntry;
typedef struct MultiMap {
IndexedEntry* entries;
size_t max_entries;
size_t num_entries;
IndexedEntry* (*lower_bound)(struct MultiMap* map, uint32_t npage);
IndexedEntry* (*end)(struct MultiMap* map);
void (*erase)(struct MultiMap* map, IndexedEntry* entry);
void (*insert)(struct MultiMap* map, uint32_t npage, Page* p);
} MultiMap;
/*!
* \brief DRAM memory manager
* Implements simple paging to allow physical address translation.
*/
typedef struct MemoryManager {
// Public interface for this object.
MemoryManagerInterface interface;
// Physical address -> page
PageTable ptable;
// Virtual address -> page
TLB pmap;
// Free map
MultiMap free_map;
} MemoryManager;
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TVM_RUNTIME_CRT_INCLUDE_TVM_RUNTIME_CRT_INTERNAL_MEMORY_PAGE_ALLOCATOR_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/memory/page_allocator.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
/*!
* \file memory.c
* \brief Virtual memory manager
*
* To maximize portability, thread-safe feature has been dropped for now.
*/
#include <inttypes.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tvm/runtime/c_runtime_api.h>
#include <tvm/runtime/crt/error_codes.h>
#include <tvm/runtime/crt/internal/memory/page_allocator.h>
#include <tvm/runtime/crt/logging.h>
#include <tvm/runtime/crt/platform.h>
// construct a new page
Page PageCreate(uint8_t* memory_pool, size_t page_size_bytes, tvm_index_t ptable_begin,
tvm_index_t num_pages) {
Page page;
page.ptable_begin = ptable_begin;
page.num_pages = num_pages;
page.data = memory_pool + ptable_begin * page_size_bytes;
return page;
}
void PageTable_Resize(struct PageTable* ptable, size_t new_size, Page* page) {
CHECK_LE(ptable->num_pages, new_size, "size value (%zu) is smaller than expected (%zu).",
new_size, ptable->num_pages);
for (uint32_t idx = ptable->num_pages; idx < new_size; idx++) {
ptable->page[idx] = *page;
}
ptable->num_pages = new_size;
}
void TLB_Set(TLB* tlb, uint8_t* data, Page* page) {
PageEntry* entry = tlb->find(tlb, data);
if (entry == 0) {
tlb->entries[tlb->num_pages].addr = data;
tlb->entries[tlb->num_pages].page = *page;
tlb->num_pages++;
} else {
entry->addr = data;
entry->page = *page;
}
}
PageEntry* TLB_Find(TLB* tlb, uint8_t* data) {
PageEntry* entry = 0;
for (uint32_t idx = 0; idx < tlb->num_pages; idx++) {
if (tlb->entries[idx].addr == data) {
entry = tlb->entries + idx;
break;
}
}
return entry;
}
IndexedEntry* MultiMap_LowerBound(struct MultiMap* map, uint32_t npage) {
IndexedEntry* entry = 0;
for (uint32_t idx = 0; idx < map->num_entries; idx++) {
if (map->entries[idx].index >= npage) {
entry = map->entries + idx;
break;
}
}
return entry;
}
IndexedEntry* MultiMap_End(struct MultiMap* map) {
IndexedEntry* entry = 0;
return entry;
}
void MultiMap_Erase(struct MultiMap* map, IndexedEntry* entry) {
for (uint32_t idx = 0; idx < map->num_entries; idx++) {
if ((map->entries + idx) == entry) {
// NOTE: do not use memcpy due to overlap.
for (uint32_t src_idx = idx + 1; src_idx < map->num_entries; src_idx++) {
map->entries[src_idx - 1] = map->entries[src_idx];
}
map->num_entries--;
break;
}
}
}
void MultiMap_Insert(struct MultiMap* map, uint32_t npage, Page* p) {
CHECK_LE(map->num_entries + 1, map->max_entries, "invalid number of free pages.");
for (uint32_t idx = map->num_entries; idx < (map->num_entries + npage); idx++) {
map->entries[map->num_entries].index = npage;
map->entries[map->num_entries].page = *p;
}
map->num_entries++;
}
/*!
* \brief Allocate memory from manager
* \param size The size of memory
* \return The virtual address
*/
tvm_crt_error_t PageMemoryManager_Allocate(MemoryManagerInterface* interface, size_t num_bytes,
DLDevice dev, void** out_ptr) {
MemoryManager* mgr = (MemoryManager*)interface;
*out_ptr = 0;
PageTable* ptable = &(mgr->ptable);
tvm_index_t npage = (num_bytes + ptable->page_size_bytes - 1) / ptable->page_size_bytes;
MultiMap* free_map = &(mgr->free_map);
IndexedEntry* it = free_map->lower_bound(free_map, npage);
tvm_index_t start = 0;
if (it != free_map->end(free_map)) {
Page p = it->page;
free_map->erase(free_map, it);
*out_ptr = p.data;
start = p.ptable_begin;
npage = p.num_pages;
} else {
start = ptable->num_pages;
if ((unsigned)(start + npage) > ptable->max_pages) {
#if TVM_CRT_DEBUG > 1
TVMLogf("insufficient memory, start=%" PRId32 ", npage=%" PRId32 ", total=%" PRId32 " / %zu",
(int32_t)start, (int32_t)npage, (int32_t)(start + npage), mgr->pmap.max_pages);
#endif
return kTvmErrorPlatformNoMemory;
}
/* insert page entry */
Page p = PageCreate(ptable->memory_pool, ptable->page_size_bytes, start, npage);
ptable->resize(ptable, start + npage, &p);
*out_ptr = p.data;
TLB* pmap = &(mgr->pmap);
pmap->set(pmap, *out_ptr, &p);
}
mgr->interface.vleak_size++;
#if TVM_CRT_DEBUG > 1
TVMLogf("allocate: addr=%p, start=%" PRId64 "/%zu, npage=%" PRId64 ", vleak=%d\n", data, start,
ptable->max_pages, npage, mgr->interface.vleak_size);
#endif // TVM_CRT_DEBUG
return kTvmErrorNoError;
}
/*!
* \brief Reallocate memory from manager
* \param ptr Pointer holding a pointer to the memory area to be reallocated
* \param num_bytes The size of memory now required.
* \return kTvmErrorNoError on success.
*/
tvm_crt_error_t PageMemoryManager_Realloc(MemoryManagerInterface* interface, void** ptr,
tvm_index_t num_bytes) {
MemoryManager* mgr = (MemoryManager*)interface;
uint8_t* data = *((uint8_t**)ptr); // NOLINT(*)
PageTable* ptable = &(mgr->ptable);
TLB* pmap = &(mgr->pmap);
MultiMap* free_map = &(mgr->free_map);
tvm_index_t start = 0;
tvm_index_t npage = (num_bytes + ptable->page_size_bytes - 1) / ptable->page_size_bytes;
if (ptr) {
// get page size for given pointer
CHECK_NE(pmap->num_pages, 0, "invalid translation look-aside buffer.");
PageEntry* entry = pmap->find(pmap, (uint8_t*)ptr); // NOLINT(*)
CHECK_NE(entry, 0, "no valid page entry found.");
Page* pptr = &(entry->page);
// if the page size is smaller than target page size,
// try allocate new space
if (pptr->num_pages < npage) {
// TODO(liangfu): found out whether we can extend current entry
//
// insert new page entry
IndexedEntry* it = free_map->lower_bound(free_map, npage);
if (it != free_map->end(free_map)) {
data = it->page.data;
start = it->page.ptable_begin;
npage = it->page.num_pages;
free_map->erase(free_map, it);
} else {
start = ptable->num_pages;
if ((unsigned)(start + npage) > ptable->max_pages) {
#if TVM_CRT_DEBUG > 1
TVMLogf("insufficient memory, start=%" PRId64 ", npage=%" PRId64 ", total=%" PRId64 "",
start, npage, start + npage);
#endif
return kTvmErrorPlatformNoMemory;
}
Page p = PageCreate(mgr->ptable.memory_pool, mgr->ptable.page_size_bytes, start, npage);
ptable->resize(ptable, start + npage, &p);
data = p.data;
pmap->set(pmap, data, &p);
}
// copy previous data to the new entry
memcpy(data, ptr, ptable->page_size_bytes * pptr->num_pages);
// release memory
free_map->insert(free_map, pptr->num_pages, pptr);
} else {
start = pptr->ptable_begin;
}
} else {
IndexedEntry* it = free_map->lower_bound(free_map, npage);
if (it != free_map->end(free_map)) {
Page p = it->page;
free_map->erase(free_map, it);
data = p.data;
start = p.ptable_begin;
npage = p.num_pages;
} else {
PageTable* ptable = &(mgr->ptable);
start = ptable->num_pages;
if ((unsigned)(start + npage) > ptable->max_pages) {
#if TVM_CRT_DEBUG > 1
TVMLogf("insufficient memory, start=%" PRId64 ", npage=%" PRId64 ", total=%" PRId64 "",
start, npage, start + npage);
#endif
/* insert page entry */
Page p = PageCreate(mgr->ptable.memory_pool, mgr->ptable.page_size_bytes, start, npage);
ptable->resize(ptable, start + npage, &p);
data = p.data;
TLB* pmap = &(mgr->pmap);
pmap->set(pmap, data, &p);
}
mgr->interface.vleak_size++;
}
}
#if TVM_CRT_DEBUG > 1
TVMLogf("reallocate: addr=%p, start=%" PRId64 "/%zu, npage=%" PRId64 ", vleak=%d, size=%zu", data,
start, mgr->ptable.max_pages, npage, mgr->interface.vleak_size, size);
#endif // TVM_CRT_DEBUG
return kTvmErrorNoError;
}
/*!
* \brief Free the memory.
* \param interface Pointer to this structure.
* \param ptr A pointer returned from TVMPlatformMemoryAllocate which should be free'd.
* \param dev Execution device passed to TVMPlatformMemoryAllocate. Fixed to {kDLCPU, 0}.
* \return kTvmErrorNoError if successful; a descriptive error code otherwise.
*/
tvm_crt_error_t PageMemoryManager_Free(MemoryManagerInterface* interface, void* ptr, DLDevice dev) {
MemoryManager* mgr = (MemoryManager*)interface;
TLB* pmap = &(mgr->pmap);
CHECK_NE(pmap->num_pages, 0, "invalid translation look-aside buffer.");
PageEntry* entry = pmap->find(pmap, (uint8_t*)ptr); // NOLINT(*)
CHECK_NE(entry, 0, "no valid page entry found.");
Page* p = &(entry->page);
MultiMap* free_map = &(mgr->free_map);
free_map->insert(free_map, p->num_pages, p);
mgr->interface.vleak_size--;
#if TVM_CRT_DEBUG > 1
TVMLogf("release: addr=%p, start=%" PRId64 "/%zu, npage=%zu, vleak=%d", ptr,
entry->page.ptable_begin, mgr->ptable.max_pages, entry->page.num_pages,
mgr->interface.vleak_size);
#endif // TVM_CRT_DEBUG
return kTvmErrorNoError;
}
tvm_crt_error_t PageMemoryManagerCreate(MemoryManagerInterface** interface, uint8_t* memory_pool,
size_t memory_pool_size_bytes,
size_t page_size_bytes_log2) {
memset(memory_pool, 0, memory_pool_size_bytes);
// Allocate enough space for MAX_PAGES.
size_t page_size_bytes = 1 << page_size_bytes_log2;
size_t metadata_bytes_per_page = sizeof(Page) + sizeof(PageEntry) + sizeof(IndexedEntry);
size_t bytes_needed_per_page = page_size_bytes + metadata_bytes_per_page;
size_t num_pages = (memory_pool_size_bytes - sizeof(MemoryManager)) / bytes_needed_per_page;
uint8_t* metadata_cursor = memory_pool + (num_pages << page_size_bytes_log2);
MemoryManager* manager = (MemoryManager*)metadata_cursor;
*interface = &manager->interface;
/* handle MemoryManager member functions */
manager->interface.Allocate = PageMemoryManager_Allocate;
// manager->Realloc = MemoryManager_Reallocate;
manager->interface.Free = PageMemoryManager_Free;
metadata_cursor += sizeof(MemoryManager);
manager->interface.Allocate = PageMemoryManager_Allocate;
manager->interface.Free = PageMemoryManager_Free;
manager->ptable.memory_pool = memory_pool;
/* handle PageTable member functions */
manager->ptable.page = (Page*)metadata_cursor;
metadata_cursor += sizeof(Page) * num_pages;
manager->ptable.page_size_bytes = (1 << page_size_bytes_log2);
manager->ptable.max_pages = num_pages;
manager->ptable.resize = PageTable_Resize;
/* handle TLB member functions */
manager->pmap.entries = (PageEntry*)metadata_cursor;
metadata_cursor += sizeof(PageEntry) * num_pages;
manager->pmap.max_pages = num_pages;
manager->pmap.num_pages = 0;
manager->pmap.set = TLB_Set;
manager->pmap.find = TLB_Find;
/* handle free_map member functions */
manager->free_map.entries = (IndexedEntry*)metadata_cursor;
metadata_cursor += sizeof(IndexedEntry) * num_pages;
manager->free_map.max_entries = num_pages;
manager->free_map.lower_bound = MultiMap_LowerBound;
manager->free_map.end = MultiMap_End;
manager->free_map.erase = MultiMap_Erase;
manager->free_map.insert = MultiMap_Insert;
return kTvmErrorNoError;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/crt/memory/stack_allocator.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// LINT_C_FILE
#include <tvm/runtime/crt/stack_allocator.h>
tvm_crt_error_t StackMemoryManager_Allocate_Body(tvm_workspace_t* tvm_runtime_workspace,
int32_t nbytes, void** current_alloc,
uint8_t do_lifo_check) {
// reserve bytes at the end of the allocation such that
// next_alloc % TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES == 0.
uint32_t offset_bytes =
(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES - nbytes) & (TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES - 1);
uint8_t* workspace_end = tvm_runtime_workspace->workspace + tvm_runtime_workspace->workspace_size;
if (tvm_runtime_workspace->next_alloc + nbytes + offset_bytes > workspace_end) {
return kTvmErrorPlatformNoMemory;
}
(*current_alloc) = tvm_runtime_workspace->next_alloc;
uint8_t* next_alloc = tvm_runtime_workspace->next_alloc + nbytes + offset_bytes;
if (do_lifo_check != 0) {
if (next_alloc + STACK_ALLOCATOR_TAG_SIZE_BYTES > workspace_end) {
return kTvmErrorPlatformNoMemory;
}
const uint32_t total_size = (nbytes + offset_bytes + STACK_ALLOCATOR_TAG_SIZE_BYTES);
*((uint32_t*)next_alloc) = total_size ^ STACK_ALLOCATOR_TAG;
next_alloc += STACK_ALLOCATOR_TAG_SIZE_BYTES;
}
tvm_runtime_workspace->next_alloc = next_alloc;
return kTvmErrorNoError;
}
tvm_crt_error_t StackMemoryManager_Allocate(tvm_workspace_t* tvm_runtime_workspace, int32_t nbytes,
void** current_alloc) {
uint8_t do_lifo_check = 0;
#ifdef TVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK
do_lifo_check = 1;
#endif
return StackMemoryManager_Allocate_Body(tvm_runtime_workspace, nbytes, current_alloc,
do_lifo_check);
}
tvm_crt_error_t StackMemoryManager_Free_Body(tvm_workspace_t* tvm_runtime_workspace, void* ptr,
uint8_t do_lifo_check) {
if (do_lifo_check != 0) {
uint32_t tag = *(((uint32_t*)tvm_runtime_workspace->next_alloc) - 1);
uint32_t actual_size = (tvm_runtime_workspace->next_alloc - (uint8_t*)ptr);
uint32_t expected_size = tag ^ STACK_ALLOCATOR_TAG;
if (expected_size != actual_size) {
return kTvmErrorPlatformStackAllocBadFree;
}
}
tvm_runtime_workspace->next_alloc = (uint8_t*)ptr;
return kTvmErrorNoError;
}
tvm_crt_error_t StackMemoryManager_Free(tvm_workspace_t* tvm_runtime_workspace, void* ptr) {
uint8_t do_lifo_check = 0;
#ifdef TVM_CRT_STACK_ALLOCATOR_ENABLE_LIFO_CHECK
do_lifo_check = 1;
#endif
return StackMemoryManager_Free_Body(tvm_runtime_workspace, ptr, do_lifo_check);
}
tvm_crt_error_t StackMemoryManager_Init(tvm_workspace_t* tvm_runtime_workspace,
uint8_t* g_aot_memory, size_t workspace_size) {
// We need to round up g_aot_memory in case it is not aligned to
// TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES.
uintptr_t unaligned_mask = TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES - 1;
uint8_t* memory_aligned =
(uint8_t*)(((uintptr_t)g_aot_memory + unaligned_mask) & ~unaligned_mask);
uint32_t offset = (uintptr_t)(memory_aligned - g_aot_memory);
tvm_runtime_workspace->next_alloc = memory_aligned;
tvm_runtime_workspace->workspace = memory_aligned;
tvm_runtime_workspace->workspace_size = workspace_size - offset;
return kTvmErrorNoError;
}
| https://github.com/zk-ml/tachikoma |
src/runtime/cuda/cuda_common.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file cuda_common.h
* \brief Common utilities for CUDA
*/
#ifndef TVM_RUNTIME_CUDA_CUDA_COMMON_H_
#define TVM_RUNTIME_CUDA_CUDA_COMMON_H_
#include <cuda_runtime.h>
#include <tvm/runtime/packed_func.h>
#include <string>
#include "../workspace_pool.h"
namespace tvm {
namespace runtime {
#define CUDA_DRIVER_CALL(x) \
{ \
CUresult result = x; \
if (result != CUDA_SUCCESS && result != CUDA_ERROR_DEINITIALIZED) { \
const char* msg; \
cuGetErrorName(result, &msg); \
LOG(FATAL) << "CUDAError: " #x " failed with error: " << msg; \
} \
}
#define CUDA_CALL(func) \
{ \
cudaError_t e = (func); \
ICHECK(e == cudaSuccess || e == cudaErrorCudartUnloading) \
<< "CUDA: " << cudaGetErrorString(e); \
}
/*! \brief Thread local workspace */
class CUDAThreadEntry {
public:
/*! \brief The cuda stream */
cudaStream_t stream{nullptr};
/*! \brief thread local pool*/
WorkspacePool pool;
/*! \brief constructor */
CUDAThreadEntry();
// get the threadlocal workspace
static CUDAThreadEntry* ThreadLocal();
};
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CUDA_CUDA_COMMON_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/cuda/cuda_module.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file cuda_module.h
* \brief Execution handling of CUDA kernels
*/
#ifndef TVM_RUNTIME_CUDA_CUDA_MODULE_H_
#define TVM_RUNTIME_CUDA_CUDA_MODULE_H_
#include <tvm/runtime/module.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "../meta_data.h"
namespace tvm {
namespace runtime {
/*! \brief Maximum number of GPU supported in CUDAModule */
static constexpr const int kMaxNumGPUs = 32;
/*!
* \brief create a cuda module from data.
*
* \param data The module data, can be ptx, cubin
* \param fmt The format of the data, can be "ptx", "cubin"
* \param fmap The map function information map of each function.
* \param cuda_source Optional, cuda source file
*/
Module CUDAModuleCreate(std::string data, std::string fmt,
std::unordered_map<std::string, FunctionInfo> fmap,
std::string cuda_source);
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_CUDA_CUDA_MODULE_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/file_utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file file_utils.h
* \brief Minimum file manipulation utils for runtime.
*/
#ifndef TVM_RUNTIME_FILE_UTILS_H_
#define TVM_RUNTIME_FILE_UTILS_H_
#include <tvm/runtime/container/map.h>
#include <tvm/runtime/container/string.h>
#include <string>
#include <unordered_map>
#include "meta_data.h"
namespace tvm {
namespace runtime {
/*!
* \brief Get file format from given file name or format argument.
* \param file_name The name of the file.
* \param format The format of the file.
*/
std::string GetFileFormat(const std::string& file_name, const std::string& format);
/*!
* \return the directory in which TVM stores cached files.
* May be set using TVM_CACHE_DIR; defaults to system locations.
*/
std::string GetCacheDir();
/*!
* \brief Get meta file path given file name and format.
* \param file_name The name of the file.
*/
std::string GetMetaFilePath(const std::string& file_name);
/*!
* \brief Get file basename (i.e. without leading directories)
* \param file_name The name of the file.
* \return the base name
*/
std::string GetFileBasename(const std::string& file_name);
/*!
* \brief Load binary file into a in-memory buffer.
* \param file_name The name of the file.
* \param data The data to be loaded.
*/
void LoadBinaryFromFile(const std::string& file_name, std::string* data);
/*!
* \brief Load binary file into a in-memory buffer.
* \param file_name The name of the file.
* \param data The binary data to be saved.
*/
void SaveBinaryToFile(const std::string& file_name, const std::string& data);
/*!
* \brief Save meta data to file.
* \param file_name The name of the file.
* \param fmap The function info map.
*/
void SaveMetaDataToFile(const std::string& file_name,
const std::unordered_map<std::string, FunctionInfo>& fmap);
/*!
* \brief Load meta data to file.
* \param file_name The name of the file.
* \param fmap The function info map.
*/
void LoadMetaDataFromFile(const std::string& file_name,
std::unordered_map<std::string, FunctionInfo>* fmap);
/*!
* \brief Copy the content of an existing file to another file.
* \param src_file_name Path to the source file.
* \param dest_file_name Path of the destination file. If this file already exists,
* replace its content.
*/
void CopyFile(const std::string& src_file_name, const std::string& dest_file_name);
/*!
* \brief Remove (unlink) a file.
* \param file_name The file name.
*/
void RemoveFile(const std::string& file_name);
constexpr uint64_t kTVMNDArrayListMagic = 0xF7E58D4F05049CB7;
/*!
* \brief Load parameters from a string.
* \param param_blob Serialized string of parameters.
* \return Map of parameter name to parameter value.
*/
Map<String, NDArray> LoadParams(const std::string& param_blob);
/*!
* \brief Load parameters from a stream.
* \param strm Stream to load parameters from.
* \return Map of parameter name to parameter value.
*/
Map<String, NDArray> LoadParams(dmlc::Stream* strm);
/*!
* \brief Serialize parameters to a byte array.
* \param params Parameters to save.
* \return String containing binary parameter data.
*/
std::string SaveParams(const Map<String, NDArray>& params);
/*!
* \brief Serialize parameters to a stream.
* \param strm Stream to write to.
* \param params Parameters to save.
*/
void SaveParams(dmlc::Stream* strm, const Map<String, NDArray>& params);
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_FILE_UTILS_H_
| https://github.com/zk-ml/tachikoma |
src/runtime/graph_executor/graph_executor.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \brief Tiny graph executor that can run graph
* containing only tvm PackedFunc.
* \file graph_executor.h
*/
#ifndef TVM_RUNTIME_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#define TVM_RUNTIME_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
#include <dlpack/dlpack.h>
#include <dmlc/json.h>
#include <dmlc/memory_io.h>
#include <tvm/runtime/ndarray.h>
#include <tvm/runtime/packed_func.h>
#include <memory>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
namespace tvm {
namespace runtime {
/*! \brief macro to do C API call */
#define TVM_CCALL(func) \
{ \
int ret = (func); \
ICHECK_EQ(ret, 0) << TVMGetLastError(); \
}
/*! \brief operator attributes about tvm op */
struct TVMOpParam {
std::string func_name;
std::unordered_map<std::string, ObjectRef> attrs;
uint32_t num_inputs;
uint32_t num_outputs;
uint32_t flatten_data;
};
/*!
* \brief Tiny graph executor.
*
* This runtime can be accessible in various languages via
* TVM runtime PackedFunc API.
*/
class TVM_DLL GraphExecutor : public ModuleNode {
struct OpArgs {
std::vector<DLTensor> args;
std::vector<TVMValue> arg_values;
std::vector<int> arg_tcodes;
std::vector<int64_t> shape_data;
};
public:
using ShapeInfo = Map<String, ObjectRef>;
using DtypeInfo = Map<String, ObjectRef>;
/*!
* \brief Get member function to front-end
* \param name The name of the function.
* \param sptr_to_self The pointer to the module node.
* \return The corresponding member function.
*/
virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self);
/*!
* \return The type key of the executor.
*/
const char* type_key() const final { return "GraphExecutor"; }
void Run();
/*!
* \brief Initialize the graph executor with graph and device.
* \param graph_json The execution graph.
* \param module The module containing the compiled functions for the host
* processor.
* \param devs The device of the host and devices where graph nodes will be
* executed on.
* \param lookup_linked_param_func If given, a PackedFunc invoked to lookup linked parameters
* by storage_id. If not given, linked parameters are looked-up using an internal implementation,
* which is not compatible with RPCModules. Default is nullptr.
*/
void Init(const std::string& graph_json, tvm::runtime::Module module,
const std::vector<Device>& devs, const PackedFunc lookup_linked_param_func = nullptr);
/*!
* \brief Get the input index given the name of input.
* \param name The name of the input.
* \return The index of input.
*/
int GetInputIndex(const std::string& name);
/*!
* \brief Get the input info of Graph by parsing the input nodes.
* \return The shape and dtype tuple.
*/
std::tuple<ShapeInfo, DtypeInfo> GetInputInfo() const;
/*!
* \brief Get the output index given the name of output.
* \param name The name of the output.
* \return The index of output.
*/
int GetOutputIndex(const std::string& name);
/*!
* \brief set index-th input to the graph.
* \param index The input index.
* \param data_in The input data.
*/
void SetInput(int index, DLTensor* data_in);
/*!
* \brief set index-th input to the graph without copying the data
* \param index The input index.
* \param data_ref The input data that is referred.
*/
void SetInputZeroCopy(int index, DLTensor* data_ref);
/*!
* \brief set index-th output to the graph without copying the data.
* \param index The output index.
* \param data_ref The output data that is referred.
*/
void SetOutputZeroCopy(int index, DLTensor* data_ref);
/*!
* \brief Get the number of outputs
*
* \return The number of outputs from graph.
*/
int NumOutputs() const;
/*!
* \brief Get the number of inputs
*
* \return The number of inputs to the graph.
*/
int NumInputs() const;
/*!
* \brief Return NDArray for given input index.
* \param index The input index.
*
* \return NDArray corresponding to given input node index.
*/
NDArray GetInput(int index) const;
/*!
* \brief Return NDArray for given output index.
* \param index The output index.
*
* \return NDArray corresponding to given output node index.
*/
NDArray GetOutput(int index) const;
/*!
* \brief Copy index-th output to data_out.
* \param index The output index.
* \param data_out the output data.
*/
void CopyOutputTo(int index, DLTensor* data_out);
/*!
* \brief Load parameters from binary stream
* \param strm The input stream.
*/
void LoadParams(dmlc::Stream* strm);
/*!
* \brief Load parameters from parameter blob.
* \param param_blob A binary blob of parameter.
*/
void LoadParams(const std::string& param_blob);
/*!
* \brief Share parameters from pre-existing GraphExecutor instance.
* \param other A GraphExecutor instance, previously with |LoadParams| called with the
* identical input |param_blob|.
* \param strm The input stream.
*/
void ShareParams(const GraphExecutor& other, dmlc::Stream* strm);
/*!
* \brief Get total number of nodes.
* \return Total number of nodes.
*/
uint32_t GetNumOfNodes() const { return static_cast<uint32_t>(nodes_.size()); }
std::string GetNodeName(uint32_t nid) const { return nodes_[nid].name; }
protected:
// Memory pool entry.
struct PoolEntry {
int device_type;
std::vector<int64_t> shape;
DLDataType dtype;
int param_data_entry;
NDArray linked_param;
std::string scope;
// PoolEntry(int s, int dev_type, void* pre_linked_param) :
// size(s), device_type(dev_type), pre_linked_param(std::move(pre_linked_param)) {}
};
// Node entry
struct NodeEntry {
uint32_t node_id;
uint32_t index;
uint32_t version;
inline bool operator==(const NodeEntry& other) const {
return node_id == other.node_id && index == other.index && version == other.version;
}
// JSON Loader
void Load(dmlc::JSONReader* reader) {
reader->BeginArray();
ICHECK(reader->NextArrayItem()) << "invalid json format";
reader->Read(&node_id);
ICHECK(reader->NextArrayItem()) << "invalid json format";
reader->Read(&index);
if (reader->NextArrayItem()) {
reader->Read(&version);
ICHECK(!reader->NextArrayItem()) << "invalid json format";
} else {
version = 0;
}
}
};
// Node
struct Node {
// operator type in string
std::string op_type;
// name of the op
std::string name;
// parameters
TVMOpParam param;
// inputs
std::vector<NodeEntry> inputs;
// control deps
std::vector<uint32_t> control_deps;
// JSON Loader
void LoadAttrs(dmlc::JSONReader* reader, TVMOpParam* param) {
int bitmask = 0;
std::string key, value;
reader->BeginObject();
while (reader->NextObjectItem(&key)) {
reader->Read(&value);
if (key == "func_name") {
param->func_name = value;
bitmask |= 1;
} else if (key == "num_inputs") {
param->num_inputs = strtoul(value.c_str(), nullptr, 10);
bitmask |= 2;
} else if (key == "num_outputs") {
param->num_outputs = strtoul(value.c_str(), nullptr, 10);
bitmask |= 4;
} else if (key == "flatten_data") {
param->flatten_data = strtoul(value.c_str(), nullptr, 10);
bitmask |= 8;
} else {
param->attrs[key] = String(value);
}
}
ICHECK_EQ(bitmask, 1 | 2 | 4 | 8) << "invalid format";
}
// JSON Loader
void Load(dmlc::JSONReader* reader) {
reader->BeginObject();
int bitmask = 0;
std::string key;
while (reader->NextObjectItem(&key)) {
if (key == "op") {
reader->Read(&op_type);
bitmask |= 1;
} else if (key == "name") {
reader->Read(&name);
bitmask |= 2;
} else if (key == "inputs") {
reader->Read(&inputs);
bitmask |= 4;
} else if (key == "attr" || key == "attrs") {
this->LoadAttrs(reader, ¶m);
} else if (key == "control_deps") {
reader->Read(&control_deps);
} else {
LOG(FATAL) << "do not support key " << key;
}
}
ICHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format";
}
};
struct GraphAttr {
size_t storage_num_not_alloctaed{0};
std::vector<int> storage_id;
std::vector<int> device_index;
std::vector<std::string> dltype;
std::vector<std::string> storage_scope;
std::vector<std::vector<int64_t>> shape;
// The graph attribute fields.
void Load(dmlc::JSONReader* reader) {
reader->BeginObject();
int bitmask = 0;
std::string key, type;
while (reader->NextObjectItem(&key)) {
if (key == "dltype") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
ICHECK_EQ(type, "list_str");
ICHECK(reader->NextArrayItem());
reader->Read(&dltype);
ICHECK(!reader->NextArrayItem());
bitmask |= 1;
} else if (key == "storage_id") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
ICHECK_EQ(type, "list_int");
ICHECK(reader->NextArrayItem());
reader->Read(&storage_id);
ICHECK(!reader->NextArrayItem());
bitmask |= 2;
} else if (key == "storage_scope") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
ICHECK_EQ(type, "list_str");
ICHECK(reader->NextArrayItem());
reader->Read(&storage_scope);
ICHECK(!reader->NextArrayItem());
bitmask |= 1;
} else if (key == "shape") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
ICHECK_EQ(type, "list_shape");
ICHECK(reader->NextArrayItem());
reader->Read(&shape);
ICHECK(!reader->NextArrayItem());
bitmask |= 4;
} else if (key == "device_index") {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
ICHECK_EQ(type, "list_int");
ICHECK(reader->NextArrayItem());
reader->Read(&device_index);
ICHECK(!reader->NextArrayItem());
} else {
reader->BeginArray();
ICHECK(reader->NextArrayItem());
reader->Read(&type);
if (type == "list_int") {
ICHECK(reader->NextArrayItem());
std::vector<int> temp;
reader->Read(&temp);
} else if (type == "size_t") {
ICHECK(reader->NextArrayItem());
size_t temp;
reader->Read(&temp);
} else {
LOG(FATAL) << "cannot skip graph attr " << key;
}
ICHECK(!reader->NextArrayItem());
}
}
ICHECK_EQ(bitmask, 1 | 2 | 4) << "invalid format";
}
};
// The graph attribute fields.
void Load(dmlc::JSONReader* reader) {
reader->BeginObject();
int bitmask = 0;
std::string key;
while (reader->NextObjectItem(&key)) {
if (key == "nodes") {
reader->Read(&nodes_);
bitmask |= 1;
} else if (key == "arg_nodes") {
reader->Read(&input_nodes_);
bitmask |= 2;
} else if (key == "node_row_ptr") {
reader->Read(&node_row_ptr_);
bitmask |= 4;
} else if (key == "heads") {
reader->Read(&outputs_);
bitmask |= 8;
} else if (key == "attrs") {
reader->Read(&attrs_);
bitmask |= 16;
} else if (key == "metadata") {
break;
} else {
LOG(FATAL) << "key " << key << " is not supported";
}
}
ICHECK_EQ(bitmask, 1 | 2 | 4 | 8 | 16) << "invalid format";
}
/*! \brief PackedFunc to lookup a linked paramter from a local Module. */
void DefaultLookupLinkedParam(TVMArgs args, TVMRetValue* rv);
/*! \brief Delete NDArray::Container with linked (i.e. static) data. */
static void LinkedNDArrayDeleter(Object* container);
/*! \brief Setup the temporal storage */
void SetupStorage();
/*! \brief Setup the executors. */
void SetupOpExecs();
/*!
* \brief Check the legality of external DLTensor*.
* \param external The external DLTensor*.
* \param eid The data_enrty_ index.
*/
void CheckExternalDLTensor(const DLTensor* external, uint32_t eid) const;
/*!
* \brief Create an execution function given input.
* \param attrs The node attributes.
* \param args The arguments to the functor, including inputs and outputs.
* \return The created executor.
*/
std::pair<std::function<void()>, std::shared_ptr<OpArgs>> CreateTVMOp(
const TVMOpParam& attrs, const std::vector<DLTensor>& args);
// Get node entry index.
uint32_t entry_id(uint32_t nid, uint32_t index) const { return node_row_ptr_[nid] + index; }
// Get node entry index.
uint32_t entry_id(const NodeEntry& e) const { return entry_id(e.node_id, e.index); }
// Number of node entries.
uint32_t num_node_entries() const { return node_row_ptr_.back(); }
/*! \brief The graph nodes. */
std::vector<Node> nodes_;
/*! \brief The argument nodes. */
std::vector<uint32_t> input_nodes_;
/*! \brief The parameter names. */
std::unordered_set<std::string> param_names_;
/*! \brief Map of input names to input indices. */
std::unordered_map<std::string, uint32_t> input_map_;
/*! \brief Map of output names to output indices. */
std::unordered_map<std::string, uint32_t> output_map_;
/*! \brief Used for quick node input DLTensor* lookup given an input eid. */
std::vector<std::vector<DLTensor*>> input_dltensors_;
/*! \brief Used for quick node output DLTensor* lookup given an output eid. */
std::vector<std::vector<DLTensor*>> output_dltensors_;
/*! \brief Used for quick node(both model output and op input) DLTensor* lookup given an eid. */
std::vector<std::vector<DLTensor*>> both_output_opinput_dltensors_;
/*! \brief Used for quick entry indexing. */
std::vector<uint32_t> node_row_ptr_;
/*! \brief Output entries. */
std::vector<NodeEntry> outputs_;
/*! \brief Additional graph attributes. */
GraphAttr attrs_;
/*! \brief The code module that contains both host and device code. */
tvm::runtime::Module module_;
/*! \brief Execution context of all devices including the host. */
std::vector<Device> devices_;
/*! \brief Common storage pool for all devices. */
std::vector<NDArray> storage_pool_;
/*! \brief Data entry of each node. */
std::vector<NDArray> data_entry_;
/*! \brief Data alignment of each node. */
std::vector<size_t> data_alignment_;
/*! \brief Operator on each node. */
std::vector<std::function<void()>> op_execs_;
/*! \brief Linked parameter lookup function. */
PackedFunc lookup_linked_param_;
/*! \brief Module's _lookup_linked_param function, used by DefaultLookupLinkedParam. */
PackedFunc module_lookup_linked_param_;
/*!
* \brief True when module_lookup_linked_param_ is valid.
* When the module does not include linked parmeters, module_lookup_linked_param_ will be nullptr.
*/
bool module_lookup_linked_param_valid_;
};
std::vector<Device> GetAllDevice(const TVMArgs& args, int dev_start_arg);
} // namespace runtime
} // namespace tvm
#endif // TVM_RUNTIME_GRAPH_EXECUTOR_GRAPH_EXECUTOR_H_
| https://github.com/zk-ml/tachikoma |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.