file_path
stringlengths
7
180
content
stringlengths
0
811k
repo
stringclasses
11 values
include/tvm/node/functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/functor.h * \brief Defines the Functor data structures. */ #ifndef TVM_NODE_FUNCTOR_H_ #define TVM_NODE_FUNCTOR_H_ #include <dmlc/logging.h> #include <tvm/runtime/object.h> #include <type_traits> #include <utility> #include <vector> namespace tvm { using runtime::ObjectRef; /*! * \brief A dynamically dispatched functor on the type of the first argument. * * This is a class that is useful to construct polymorphic dispatching * base on the AST/IR node's type. * * \code * NodeFunctor<std::string (const ObjectRef& n, std::string prefix)> tostr; * tostr.set_dispatch<Add>([](const ObjectRef& op, std::string prefix) { * return prefix + "Add"; * }); * tostr.set_dispatch<IntImm>([](const ObjectRef& op, std::string prefix) { * return prefix + "IntImm" * }); * * Expr x = make_const(1); * Expr y = x + x; * // dispatch to IntImm, outputs "MyIntImm" * LOG(INFO) << tostr(x, "My"); * // dispatch to IntImm, outputs "MyAdd" * LOG(INFO) << tostr(y, "My"); * \endcode * * \tparam FType function signiture * This type if only defined for FType with function signature */ template <typename FType> class NodeFunctor; template <typename R, typename... Args> class NodeFunctor<R(const ObjectRef& n, Args...)> { private: /*! \brief internal function pointer type */ typedef R (*FPointer)(const ObjectRef& n, Args...); /*! \brief refer to itself. */ using TSelf = NodeFunctor<R(const ObjectRef& n, Args...)>; /*! \brief internal function table */ std::vector<FPointer> func_; public: /*! \brief the result type of this functor */ using result_type = R; /*! * \brief Whether the functor can dispatch the corresponding Node * \param n The node to be dispatched * \return Whether dispatching function is registered for n's type. */ bool can_dispatch(const ObjectRef& n) const { uint32_t type_index = n->type_index(); return type_index < func_.size() && func_[type_index] != nullptr; } /*! * \brief invoke the functor, dispatch on type of n * \param n The Node argument * \param args The additional arguments * \return The result. */ R operator()(const ObjectRef& n, Args... args) const { ICHECK(can_dispatch(n)) << "NodeFunctor calls un-registered function on type " << n->GetTypeKey(); return (*func_[n->type_index()])(n, std::forward<Args>(args)...); } /*! * \brief set the dispatcher for type TNode * \param f The function to be set. * \tparam TNode the type of Node to be dispatched. * \return reference to self. */ template <typename TNode> TSelf& set_dispatch(FPointer f) { // NOLINT(*) uint32_t tindex = TNode::RuntimeTypeIndex(); if (func_.size() <= tindex) { func_.resize(tindex + 1, nullptr); } ICHECK(func_[tindex] == nullptr) << "Dispatch for " << TNode::_type_key << " is already set"; func_[tindex] = f; return *this; } /*! * \brief unset the dispatcher for type TNode * * \tparam TNode the type of Node to be dispatched. * \return reference to self. */ template <typename TNode> TSelf& clear_dispatch() { // NOLINT(*) uint32_t tindex = TNode::RuntimeTypeIndex(); ICHECK_LT(tindex, func_.size()) << "clear_dispatch: index out of range"; func_[tindex] = nullptr; return *this; } }; #define TVM_REG_FUNC_VAR_DEF(ClsName) static TVM_ATTRIBUTE_UNUSED auto& __make_functor##_##ClsName /*! * \brief Useful macro to set NodeFunctor dispatch in a global static field. * * \code * // Use NodeFunctor to implement ReprPrinter similar to Visitor Pattern. * // vtable allows easy patch of new Node types, without changing * // interface of ReprPrinter. * * class ReprPrinter { * public: * std::ostream& stream; * // the dispatch function. * void print(Expr e) { * const static FType& f = *vtable(); * f(e, this); * } * * using FType = NodeFunctor<void (const ObjectRef&, ReprPrinter* )>; * // function to return global function table * static FType& vtable(); * }; * * // in cpp/cc file * ReprPrinter::FType& ReprPrinter::vtable() { // NOLINT(*) * static FType inst; return inst; * } * * TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable) * .set_dispatch<Add>([](const ObjectRef& ref, ReprPrinter* p) { * auto* n = static_cast<const Add*>(ref.get()); * p->print(n->a); * p->stream << '+' * p->print(n->b); * }); * * * \endcode * * \param ClsName The name of the class * \param FField The static function that returns a singleton of NodeFunctor. */ #define TVM_STATIC_IR_FUNCTOR(ClsName, FField) \ TVM_STR_CONCAT(TVM_REG_FUNC_VAR_DEF(ClsName), __COUNTER__) = ClsName::FField() } // namespace tvm #endif // TVM_NODE_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/node.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/node.h * \brief Definitions and helper macros for IR/AST nodes. * * The node folder contains base utilities for IR/AST nodes, * invariant of which specific language dialect. * * We implement AST/IR nodes as sub-classes of runtime::Object. * The base class Node is just an alias of runtime::Object. * * Besides the runtime type checking provided by Object, * node folder contains additional functionalities such as * reflection and serialization, which are important features * for building a compiler infra. */ #ifndef TVM_NODE_NODE_H_ #define TVM_NODE_NODE_H_ #include <tvm/node/reflection.h> #include <tvm/node/repr_printer.h> #include <tvm/node/structural_equal.h> #include <tvm/node/structural_hash.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/memory.h> #include <tvm/runtime/object.h> #include <string> #include <type_traits> #include <utility> #include <vector> namespace tvm { using runtime::Downcast; using runtime::GetRef; using runtime::make_object; using runtime::Object; using runtime::ObjectPtr; using runtime::ObjectPtrEqual; using runtime::ObjectPtrHash; using runtime::ObjectRef; using runtime::PackedFunc; using runtime::TVMArgs; using runtime::TVMRetValue; using runtime::TypeIndex; } // namespace tvm #endif // TVM_NODE_NODE_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/object_path.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/object_path.h * ObjectPath class that represents a path from a root object to one of its descendants * via attribute access, array indexing etc. */ #ifndef TVM_NODE_OBJECT_PATH_H_ #define TVM_NODE_OBJECT_PATH_H_ #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/object.h> #include <string> namespace tvm { using runtime::Object; using runtime::ObjectPtr; using runtime::ObjectRef; class ObjectPath; /*! * \brief Path to an object from some root object. * * Motivation: * * Same IR node object can be referenced in several different contexts inside a larger IR object. * For example, a variable could be referenced in several statements within a block. * * This makes it impossible to use an object pointer to uniquely identify a "location" within * the larger IR object for error reporting purposes. The ObjectPath class addresses this problem * by serving as a unique "locator". */ class ObjectPathNode : public Object { public: /*! \brief Get the parent path */ Optional<ObjectPath> GetParent() const; /*! * \brief Get the length of the path. * * For example, the path returned by `ObjectPath::Root()` has length 1. */ int32_t Length() const; /*! * \brief Get a path prefix of the given length. * * Provided `length` must not exceed the `Length()` of this path. */ ObjectPath GetPrefix(int32_t length) const; /*! * \brief Check if this path is a prefix of another path. * * The prefix is not strict, i.e. a path is considered a prefix of itself. */ bool IsPrefixOf(const ObjectPath& other) const; /*! \brief Check if two paths are equal. */ bool PathsEqual(const ObjectPath& other) const; /*! \brief Extend this path with access to an object attribute. */ ObjectPath Attr(const char* attr_key) const; /*! \brief Extend this path with access to an object attribute. */ ObjectPath Attr(Optional<String> attr_key) const; /*! \brief Extend this path with access to an array element. */ ObjectPath ArrayIndex(int32_t index) const; /*! \brief Extend this path with access to a missing array element. */ ObjectPath MissingArrayElement(int32_t index) const; /*! \brief Extend this path with access to a map value. */ ObjectPath MapValue(ObjectRef key) const; /*! \brief Extend this path with access to a missing map entry. */ ObjectPath MissingMapEntry() const; static constexpr const char* _type_key = "ObjectPath"; TVM_DECLARE_BASE_OBJECT_INFO(ObjectPathNode, Object); protected: explicit ObjectPathNode(const ObjectPathNode* parent); friend class ObjectPath; friend std::string GetObjectPathRepr(const ObjectPathNode* node); const ObjectPathNode* ParentNode() const; /*! Compares just the last node of the path, without comparing the whole path. */ virtual bool LastNodeEqual(const ObjectPathNode* other) const = 0; virtual std::string LastNodeString() const = 0; private: Optional<ObjectRef> parent_; int32_t length_; }; class ObjectPath : public ObjectRef { public: /*! \brief Create a path that represents the root object itself. */ static ObjectPath Root(); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ObjectPath, ObjectRef, ObjectPathNode); }; //------------------------------------------------------------------------- //----- Concrete object path nodes ------------------------------------ //------------------------------------------------------------------------- // ----- Root ----- class RootPathNode final : public ObjectPathNode { public: explicit RootPathNode(); static constexpr const char* _type_key = "RootPath"; TVM_DECLARE_FINAL_OBJECT_INFO(RootPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class RootPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(RootPath, ObjectPath, RootPathNode); }; // ----- Attribute access ----- class AttributeAccessPathNode final : public ObjectPathNode { public: /*! \brief Name of the attribute being accessed. Must be a static string. */ String attr_key; explicit AttributeAccessPathNode(const ObjectPathNode* parent, String attr_key); static constexpr const char* _type_key = "AttributeAccessPath"; TVM_DECLARE_FINAL_OBJECT_INFO(AttributeAccessPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class AttributeAccessPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(AttributeAccessPath, ObjectPath, AttributeAccessPathNode); }; // ----- Unknown attribute access ----- class UnknownAttributeAccessPathNode final : public ObjectPathNode { public: explicit UnknownAttributeAccessPathNode(const ObjectPathNode* parent); static constexpr const char* _type_key = "UnknownAttributeAccessPath"; TVM_DECLARE_FINAL_OBJECT_INFO(UnknownAttributeAccessPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class UnknownAttributeAccessPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(UnknownAttributeAccessPath, ObjectPath, UnknownAttributeAccessPathNode); }; // ----- Array element access by index ----- class ArrayIndexPathNode : public ObjectPathNode { public: /*! \brief Index of the array element that is being accessed. */ int32_t index; explicit ArrayIndexPathNode(const ObjectPathNode* parent, int32_t index); static constexpr const char* _type_key = "ArrayIndexPath"; TVM_DECLARE_FINAL_OBJECT_INFO(ArrayIndexPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class ArrayIndexPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ArrayIndexPath, ObjectPath, ArrayIndexPathNode); }; // ----- Missing array element ----- class MissingArrayElementPathNode : public ObjectPathNode { public: /*! \brief Index of the array element that is missing. */ int32_t index; explicit MissingArrayElementPathNode(const ObjectPathNode* parent, int32_t index); static constexpr const char* _type_key = "MissingArrayElementPath"; TVM_DECLARE_FINAL_OBJECT_INFO(MissingArrayElementPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class MissingArrayElementPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(MissingArrayElementPath, ObjectPath, MissingArrayElementPathNode); }; // ----- Map value ----- class MapValuePathNode : public ObjectPathNode { public: /*! \brief Key of the map entry that is being accessed */ ObjectRef key; explicit MapValuePathNode(const ObjectPathNode* parent, ObjectRef key); static constexpr const char* _type_key = "MapValuePath"; TVM_DECLARE_FINAL_OBJECT_INFO(MapValuePathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class MapValuePath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(MapValuePath, ObjectPath, MapValuePathNode); }; // ----- Missing map entry ----- class MissingMapEntryPathNode : public ObjectPathNode { public: explicit MissingMapEntryPathNode(const ObjectPathNode* parent); static constexpr const char* _type_key = "MissingMapEntryPath"; TVM_DECLARE_FINAL_OBJECT_INFO(MissingMapEntryPathNode, ObjectPathNode); protected: bool LastNodeEqual(const ObjectPathNode* other) const final; std::string LastNodeString() const final; }; class MissingMapEntryPath : public ObjectPath { public: TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(MissingMapEntryPath, ObjectPath, MissingMapEntryPathNode); }; } // namespace tvm #endif // TVM_NODE_OBJECT_PATH_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/reflection.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/reflection.h * \brief Reflection and serialization of compiler IR/AST nodes. */ #ifndef TVM_NODE_REFLECTION_H_ #define TVM_NODE_REFLECTION_H_ #include <tvm/node/structural_equal.h> #include <tvm/node/structural_hash.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/memory.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <string> #include <type_traits> #include <vector> namespace tvm { using runtime::Object; using runtime::ObjectPtr; using runtime::ObjectRef; /*! * \brief Visitor class to get the attributes of an AST/IR node. * The content is going to be called for each field. * * Each objects that wants reflection will need to implement * a VisitAttrs function and call visitor->Visit on each of its field. */ class AttrVisitor { public: //! \cond Doxygen_Suppress TVM_DLL virtual ~AttrVisitor() = default; TVM_DLL virtual void Visit(const char* key, double* value) = 0; TVM_DLL virtual void Visit(const char* key, int64_t* value) = 0; TVM_DLL virtual void Visit(const char* key, uint64_t* value) = 0; TVM_DLL virtual void Visit(const char* key, int* value) = 0; TVM_DLL virtual void Visit(const char* key, bool* value) = 0; TVM_DLL virtual void Visit(const char* key, std::string* value) = 0; TVM_DLL virtual void Visit(const char* key, void** value) = 0; TVM_DLL virtual void Visit(const char* key, DataType* value) = 0; TVM_DLL virtual void Visit(const char* key, runtime::NDArray* value) = 0; TVM_DLL virtual void Visit(const char* key, runtime::ObjectRef* value) = 0; template <typename ENum, typename = typename std::enable_if<std::is_enum<ENum>::value>::type> void Visit(const char* key, ENum* ptr) { static_assert(std::is_same<int, typename std::underlying_type<ENum>::type>::value, "declare enum to be enum int to use visitor"); this->Visit(key, reinterpret_cast<int*>(ptr)); } //! \endcond }; /*! * \brief Virtual function table to support IR/AST node reflection. * * Functions are stored in columnar manner. * Each column is a vector indexed by Object's type_index. */ class ReflectionVTable { public: /*! * \brief Visitor function. * \note We use function pointer, instead of std::function * to reduce the dispatch overhead as field visit * does not need as much customization. */ typedef void (*FVisitAttrs)(Object* self, AttrVisitor* visitor); /*! * \brief Equality comparison function. */ typedef bool (*FSEqualReduce)(const Object* self, const Object* other, SEqualReducer equal); /*! * \brief Structural hash reduction function. */ typedef void (*FSHashReduce)(const Object* self, SHashReducer hash_reduce); /*! * \brief creator function. * \param repr_bytes Repr bytes to create the object. * If this is not empty then FReprBytes must be defined for the object. * \return The created function. */ typedef ObjectPtr<Object> (*FCreate)(const std::string& repr_bytes); /*! * \brief Function to get a byte representation that can be used to recover the object. * \param node The node pointer. * \return bytes The bytes that can be used to recover the object. */ typedef std::string (*FReprBytes)(const Object* self); /*! * \brief Dispatch the VisitAttrs function. * \param self The pointer to the object. * \param visitor The attribute visitor. */ inline void VisitAttrs(Object* self, AttrVisitor* visitor) const; /*! * \brief Get repr bytes if any. * \param self The pointer to the object. * \param repr_bytes The output repr bytes, can be null, in which case the function * simply queries if the ReprBytes function exists for the type. * \return Whether repr bytes exists */ inline bool GetReprBytes(const Object* self, std::string* repr_bytes) const; /*! * \brief Dispatch the SEqualReduce function. * \param self The pointer to the object. * \param other The pointer to another object to be compared. * \param equal The equality comparator. * \return the result. */ bool SEqualReduce(const Object* self, const Object* other, SEqualReducer equal) const; /*! * \brief Dispatch the SHashReduce function. * \param self The pointer to the object. * \param hash_reduce The hash reducer. * \return the result. */ void SHashReduce(const Object* self, SHashReducer hash_reduce) const; /*! * \brief Create an initial object using default constructor * by type_key and global key. * * \param type_key The type key of the object. * \param repr_bytes Bytes representation of the object if any. */ TVM_DLL ObjectPtr<Object> CreateInitObject(const std::string& type_key, const std::string& repr_bytes = "") const; /*! * \brief Create an object by giving kwargs about its fields. * * \param type_key The type key. * \param kwargs the arguments in format key1, value1, ..., key_n, value_n. * \return The created object. */ TVM_DLL ObjectRef CreateObject(const std::string& type_key, const runtime::TVMArgs& kwargs); /*! * \brief Create an object by giving kwargs about its fields. * * \param type_key The type key. * \param kwargs The field arguments. * \return The created object. */ TVM_DLL ObjectRef CreateObject(const std::string& type_key, const Map<String, ObjectRef>& kwargs); /*! * \brief Get an field object by the attr name. * \param self The pointer to the object. * \param attr_name The name of the field. * \return The corresponding attribute value. * \note This function will throw an exception if the object does not contain the field. */ TVM_DLL runtime::TVMRetValue GetAttr(Object* self, const String& attr_name) const; /*! * \brief List all the fields in the object. * \return All the fields. */ TVM_DLL std::vector<std::string> ListAttrNames(Object* self) const; /*! \return The global singleton. */ TVM_DLL static ReflectionVTable* Global(); class Registry; template <typename T, typename TraitName> inline Registry Register(); private: /*! \brief Attribute visitor. */ std::vector<FVisitAttrs> fvisit_attrs_; /*! \brief Structural equal function. */ std::vector<FSEqualReduce> fsequal_reduce_; /*! \brief Structural hash function. */ std::vector<FSHashReduce> fshash_reduce_; /*! \brief Creation function. */ std::vector<FCreate> fcreate_; /*! \brief ReprBytes function. */ std::vector<FReprBytes> frepr_bytes_; }; /*! \brief Registry of a reflection table. */ class ReflectionVTable::Registry { public: Registry(ReflectionVTable* parent, uint32_t type_index) : parent_(parent), type_index_(type_index) {} /*! * \brief Set fcreate function. * \param f The creator function. * \return Reference to self. */ Registry& set_creator(FCreate f) { // NOLINT(*) ICHECK_LT(type_index_, parent_->fcreate_.size()); parent_->fcreate_[type_index_] = f; return *this; } /*! * \brief Set bytes repr function. * \param f The ReprBytes function. * \return Reference to self. */ Registry& set_repr_bytes(FReprBytes f) { // NOLINT(*) ICHECK_LT(type_index_, parent_->frepr_bytes_.size()); parent_->frepr_bytes_[type_index_] = f; return *this; } private: ReflectionVTable* parent_; uint32_t type_index_; }; #define TVM_REFLECTION_REG_VAR_DEF \ static TVM_ATTRIBUTE_UNUSED ::tvm::ReflectionVTable::Registry __make_reflection /*! * \brief Directly register reflection VTable. * \param TypeName The name of the type. * \param TraitName A trait class that implements functions like VisitAttrs and SEqualReduce. * * \code * * // Example SEQualReduce traits for runtime StringObj. * * struct StringObjTrait { * static constexpr const std::nullptr_t VisitAttrs = nullptr; * * static void SHashReduce(const runtime::StringObj* key, SHashReducer hash_reduce) { * hash_reduce->SHashReduceHashedValue(runtime::String::HashBytes(key->data, key->size)); * } * * static bool SEqualReduce(const runtime::StringObj* lhs, * const runtime::StringObj* rhs, * SEqualReducer equal) { * if (lhs == rhs) return true; * if (lhs->size != rhs->size) return false; * if (lhs->data != rhs->data) return true; * return std::memcmp(lhs->data, rhs->data, lhs->size) != 0; * } * }; * * TVM_REGISTER_REFLECTION_VTABLE(runtime::StringObj, StringObjTrait); * * \endcode * * \note This macro can be called in different place as TVM_REGISTER_OBJECT_TYPE. * And can be used to register the related reflection functions for runtime objects. */ #define TVM_REGISTER_REFLECTION_VTABLE(TypeName, TraitName) \ TVM_STR_CONCAT(TVM_REFLECTION_REG_VAR_DEF, __COUNTER__) = \ ::tvm::ReflectionVTable::Global()->Register<TypeName, TraitName>() /*! * \brief Register a node type to object registry and reflection registry. * \param TypeName The name of the type. * \note This macro will call TVM_REGISTER_OBJECT_TYPE for the type as well. */ #define TVM_REGISTER_NODE_TYPE(TypeName) \ TVM_REGISTER_OBJECT_TYPE(TypeName); \ TVM_REGISTER_REFLECTION_VTABLE(TypeName, ::tvm::detail::ReflectionTrait<TypeName>) \ .set_creator([](const std::string&) -> ObjectPtr<Object> { \ return ::tvm::runtime::make_object<TypeName>(); \ }) // Implementation details namespace detail { template <typename T, bool = T::_type_has_method_visit_attrs> struct ImplVisitAttrs { static constexpr const std::nullptr_t VisitAttrs = nullptr; }; template <typename T> struct ImplVisitAttrs<T, true> { static void VisitAttrs(T* self, AttrVisitor* v) { self->VisitAttrs(v); } }; template <typename T, bool = T::_type_has_method_sequal_reduce> struct ImplSEqualReduce { static constexpr const std::nullptr_t SEqualReduce = nullptr; }; template <typename T> struct ImplSEqualReduce<T, true> { static bool SEqualReduce(const T* self, const T* other, SEqualReducer equal) { return self->SEqualReduce(other, equal); } }; template <typename T, bool = T::_type_has_method_shash_reduce> struct ImplSHashReduce { static constexpr const std::nullptr_t SHashReduce = nullptr; }; template <typename T> struct ImplSHashReduce<T, true> { static void SHashReduce(const T* self, SHashReducer hash_reduce) { self->SHashReduce(hash_reduce); } }; template <typename T> struct ReflectionTrait : public ImplVisitAttrs<T>, public ImplSEqualReduce<T>, public ImplSHashReduce<T> {}; template <typename T, typename TraitName, bool = std::is_null_pointer<decltype(TraitName::VisitAttrs)>::value> struct SelectVisitAttrs { static constexpr const std::nullptr_t VisitAttrs = nullptr; }; template <typename T, typename TraitName> struct SelectVisitAttrs<T, TraitName, false> { static void VisitAttrs(Object* self, AttrVisitor* v) { TraitName::VisitAttrs(static_cast<T*>(self), v); } }; template <typename T, typename TraitName, bool = std::is_null_pointer<decltype(TraitName::SEqualReduce)>::value> struct SelectSEqualReduce { static constexpr const std::nullptr_t SEqualReduce = nullptr; }; template <typename T, typename TraitName> struct SelectSEqualReduce<T, TraitName, false> { static bool SEqualReduce(const Object* self, const Object* other, SEqualReducer equal) { return TraitName::SEqualReduce(static_cast<const T*>(self), static_cast<const T*>(other), equal); } }; template <typename T, typename TraitName, bool = std::is_null_pointer<decltype(TraitName::SHashReduce)>::value> struct SelectSHashReduce { static constexpr const std::nullptr_t SHashReduce = nullptr; }; template <typename T, typename TraitName> struct SelectSHashReduce<T, TraitName, false> { static void SHashReduce(const Object* self, SHashReducer hash_reduce) { return TraitName::SHashReduce(static_cast<const T*>(self), hash_reduce); } }; } // namespace detail template <typename T, typename TraitName> inline ReflectionVTable::Registry ReflectionVTable::Register() { uint32_t tindex = T::RuntimeTypeIndex(); if (tindex >= fvisit_attrs_.size()) { fvisit_attrs_.resize(tindex + 1, nullptr); fcreate_.resize(tindex + 1, nullptr); frepr_bytes_.resize(tindex + 1, nullptr); fsequal_reduce_.resize(tindex + 1, nullptr); fshash_reduce_.resize(tindex + 1, nullptr); } // functor that implements the redirection. fvisit_attrs_[tindex] = ::tvm::detail::SelectVisitAttrs<T, TraitName>::VisitAttrs; fsequal_reduce_[tindex] = ::tvm::detail::SelectSEqualReduce<T, TraitName>::SEqualReduce; fshash_reduce_[tindex] = ::tvm::detail::SelectSHashReduce<T, TraitName>::SHashReduce; return Registry(this, tindex); } inline void ReflectionVTable::VisitAttrs(Object* self, AttrVisitor* visitor) const { uint32_t tindex = self->type_index(); if (tindex >= fvisit_attrs_.size() || fvisit_attrs_[tindex] == nullptr) { return; } fvisit_attrs_[tindex](self, visitor); } inline bool ReflectionVTable::GetReprBytes(const Object* self, std::string* repr_bytes) const { uint32_t tindex = self->type_index(); if (tindex < frepr_bytes_.size() && frepr_bytes_[tindex] != nullptr) { if (repr_bytes != nullptr) { *repr_bytes = frepr_bytes_[tindex](self); } return true; } else { return false; } } /*! * \brief Given an object and an address of its attribute, return the key of the attribute. * \return nullptr if no attribute with the given address exists. */ Optional<String> GetAttrKeyByAddress(const Object* object, const void* attr_address); } // namespace tvm #endif // TVM_NODE_REFLECTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/repr_printer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/repr_printer.h * \brief Printer class to print repr string of each AST/IR nodes. */ #ifndef TVM_NODE_REPR_PRINTER_H_ #define TVM_NODE_REPR_PRINTER_H_ #include <tvm/node/functor.h> #include <iostream> namespace tvm { /*! \brief A printer class to print the AST/IR nodes. */ class ReprPrinter { public: /*! \brief The output stream */ std::ostream& stream; /*! \brief The indentation level. */ int indent{0}; explicit ReprPrinter(std::ostream& stream) // NOLINT(*) : stream(stream) {} /*! \brief The node to be printed. */ TVM_DLL void Print(const ObjectRef& node); /*! \brief Print indent to the stream */ TVM_DLL void PrintIndent(); // Allow registration to be printer. using FType = NodeFunctor<void(const ObjectRef&, ReprPrinter*)>; TVM_DLL static FType& vtable(); }; /*! * \brief Dump the node to stderr, used for debug purposes. * \param node The input node */ TVM_DLL void Dump(const runtime::ObjectRef& node); /*! * \brief Dump the node to stderr, used for debug purposes. * \param node The input node */ TVM_DLL void Dump(const runtime::Object* node); } // namespace tvm namespace tvm { namespace runtime { // default print function for all objects // provide in the runtime namespace as this is where objectref originally comes from. inline std::ostream& operator<<(std::ostream& os, const ObjectRef& n) { // NOLINT(*) ReprPrinter(os).Print(n); return os; } } // namespace runtime } // namespace tvm #endif // TVM_NODE_REPR_PRINTER_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/serialization.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Utility functions for serialization. * \file tvm/node/serialization.h */ #ifndef TVM_NODE_SERIALIZATION_H_ #define TVM_NODE_SERIALIZATION_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/object.h> #include <string> namespace tvm { /*! * \brief save the node as well as all the node it depends on as json. * This can be used to serialize any TVM object * * \return the string representation of the node. */ TVM_DLL std::string SaveJSON(const runtime::ObjectRef& node); /*! * \brief Internal implementation of LoadJSON * Load tvm Node object from json and return a shared_ptr of Node. * \param json_str The json string to load from. * * \return The shared_ptr of the Node. */ TVM_DLL runtime::ObjectRef LoadJSON(std::string json_str); } // namespace tvm #endif // TVM_NODE_SERIALIZATION_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/structural_equal.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/structural_equal.h * \brief Structural equality comparison. */ #ifndef TVM_NODE_STRUCTURAL_EQUAL_H_ #define TVM_NODE_STRUCTURAL_EQUAL_H_ #include <tvm/node/functor.h> #include <tvm/node/object_path.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/data_type.h> #include <string> namespace tvm { /*! * \brief Equality definition of base value class. */ class BaseValueEqual { public: bool operator()(const double& lhs, const double& rhs) const { // fuzzy float pt comparison constexpr double atol = 1e-9; if (lhs == rhs) return true; double diff = lhs - rhs; return diff > -atol && diff < atol; } bool operator()(const int64_t& lhs, const int64_t& rhs) const { return lhs == rhs; } bool operator()(const uint64_t& lhs, const uint64_t& rhs) const { return lhs == rhs; } bool operator()(const int& lhs, const int& rhs) const { return lhs == rhs; } bool operator()(const bool& lhs, const bool& rhs) const { return lhs == rhs; } bool operator()(const std::string& lhs, const std::string& rhs) const { return lhs == rhs; } bool operator()(const DataType& lhs, const DataType& rhs) const { return lhs == rhs; } template <typename ENum, typename = typename std::enable_if<std::is_enum<ENum>::value>::type> bool operator()(const ENum& lhs, const ENum& rhs) const { return lhs == rhs; } }; /*! * \brief Pair of `ObjectPath`s, one for each object being tested for structural equality. */ class ObjectPathPairNode : public Object { public: ObjectPath lhs_path; ObjectPath rhs_path; ObjectPathPairNode(ObjectPath lhs_path, ObjectPath rhs_path); static constexpr const char* _type_key = "ObjectPathPair"; TVM_DECLARE_FINAL_OBJECT_INFO(ObjectPathPairNode, Object); }; class ObjectPathPair : public ObjectRef { public: ObjectPathPair(ObjectPath lhs_path, ObjectPath rhs_path); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ObjectPathPair, ObjectRef, ObjectPathPairNode); }; /*! * \brief Content-aware structural equality comparator for objects. * * The structural equality is recursively defined in the DAG of IR nodes via SEqual. * There are two kinds of nodes: * * - Graph node: a graph node in lhs can only be mapped as equal to * one and only one graph node in rhs. * - Normal node: equality is recursively defined without the restriction * of graph nodes. * * Vars(tir::Var, TypeVar) and non-constant relay expression nodes are graph nodes. * For example, it means that `%1 = %x + %y; %1 + %1` is not structurally equal * to `%1 = %x + %y; %2 = %x + %y; %1 + %2` in relay. * * A var-type node(e.g. tir::Var, TypeVar) can be mapped as equal to another var * with the same type if one of the following condition holds: * * - They appear in a same definition point(e.g. function argument). * - They points to the same VarNode via the same_as relation. * - They appear in a same usage point, and map_free_vars is set to be True. */ class StructuralEqual : public BaseValueEqual { public: // inheritate operator() using BaseValueEqual::operator(); /*! * \brief Compare objects via strutural equal. * \param lhs The left operand. * \param rhs The right operand. * \return The comparison result. */ TVM_DLL bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const; }; /*! * \brief A Reducer class to reduce the structural equality result of two objects. * * The reducer will call the SEqualReduce function of each objects recursively. * Importantly, the reducer may not directly use recursive calls to resolve the * equality checking. Instead, it can store the necessary equality conditions * and check later via an internally managed stack. */ class SEqualReducer { private: struct PathTracingData; public: /*! \brief Internal handler that defines custom behaviors.. */ class Handler { public: /*! * \brief Reduce condition to equality of lhs and rhs. * * \param lhs The left operand. * \param rhs The right operand. * \param map_free_vars Whether do we allow remap variables if possible. * \param current_paths Optional paths to `lhs` and `rhs` objects, for error traceability. * * \return false if there is an immediate failure, true otherwise. * \note This function may save the equality condition of (lhs == rhs) in an internal * stack and try to resolve later. */ virtual bool SEqualReduce(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars, const Optional<ObjectPathPair>& current_paths) = 0; /*! * \brief Mark the comparison as failed, but don't fail immediately. * * This is useful for producing better error messages when comparing containers. * For example, if two array sizes mismatch, it's better to mark the comparison as failed * but compare array elements anyway, so that we could find the true first mismatch. */ virtual void DeferFail(const ObjectPathPair& mismatch_paths) = 0; /*! * \brief Lookup the graph node equal map for vars that are already mapped. * * This is an auxiliary method to check the Map<Var, Value> equality. * \param lhs an lhs value. * * \return The corresponding rhs value if any, nullptr if not available. */ virtual ObjectRef MapLhsToRhs(const ObjectRef& lhs) = 0; /*! * \brief Mark current comparison as graph node equal comparison. */ virtual void MarkGraphNode() = 0; protected: using PathTracingData = SEqualReducer::PathTracingData; }; /*! \brief default constructor */ SEqualReducer() = default; /*! * \brief Constructor with a specific handler. * \param handler The equal handler for objects. * \param tracing_data Optional pointer to the path tracing data. * \param map_free_vars Whether or not to map free variables. */ explicit SEqualReducer(Handler* handler, const PathTracingData* tracing_data, bool map_free_vars) : handler_(handler), tracing_data_(tracing_data), map_free_vars_(map_free_vars) {} /*! * \brief Reduce condition to comparison of two attribute values. * \param lhs The left operand. * \param rhs The right operand. * \return the immediate check result. */ bool operator()(const double& lhs, const double& rhs) const; bool operator()(const int64_t& lhs, const int64_t& rhs) const; bool operator()(const uint64_t& lhs, const uint64_t& rhs) const; bool operator()(const int& lhs, const int& rhs) const; bool operator()(const bool& lhs, const bool& rhs) const; bool operator()(const std::string& lhs, const std::string& rhs) const; bool operator()(const DataType& lhs, const DataType& rhs) const; template <typename ENum, typename = typename std::enable_if<std::is_enum<ENum>::value>::type> bool operator()(const ENum& lhs, const ENum& rhs) const { using Underlying = typename std::underlying_type<ENum>::type; static_assert(std::is_same<Underlying, int>::value, "Enum must have `int` as the underlying type"); return EnumAttrsEqual(static_cast<int>(lhs), static_cast<int>(rhs), &lhs, &rhs); } /*! * \brief Reduce condition to comparison of two objects. * \param lhs The left operand. * \param rhs The right operand. * \return the immediate check result. */ bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const; /*! * \brief Reduce condition to comparison of two objects. * * Like `operator()`, but with an additional `paths` parameter that specifies explicit object * paths for `lhs` and `rhs`. This is useful for implementing SEqualReduce() methods for container * objects like Array and Map, or other custom objects that store nested objects that are not * simply attributes. * * Can only be called when `IsPathTracingEnabled()` is `true`. * * \param lhs The left operand. * \param rhs The right operand. * \param paths Object paths for `lhs` and `rhs`. * \return the immediate check result. */ bool operator()(const ObjectRef& lhs, const ObjectRef& rhs, const ObjectPathPair& paths) const { ICHECK(IsPathTracingEnabled()) << "Path tracing must be enabled when calling this function"; return ObjectAttrsEqual(lhs, rhs, map_free_vars_, &paths); } /*! * \brief Reduce condition to comparison of two definitions, * where free vars can be mapped. * * Call this function to compare definition points such as function params * and var in a let-binding. * * \param lhs The left operand. * \param rhs The right operand. * \return the immediate check result. */ bool DefEqual(const ObjectRef& lhs, const ObjectRef& rhs); /*! * \brief Reduce condition to comparison of two arrays. * \param lhs The left operand. * \param rhs The right operand. * \return the immediate check result. */ template <typename T> bool operator()(const Array<T>& lhs, const Array<T>& rhs) const { if (tracing_data_ == nullptr) { // quick specialization for Array to reduce amount of recursion // depth as array comparison is pretty common. if (lhs.size() != rhs.size()) return false; for (size_t i = 0; i < lhs.size(); ++i) { if (!(operator()(lhs[i], rhs[i]))) return false; } return true; } // If tracing is enabled, fall back to the regular path const ObjectRef& lhs_obj = lhs; const ObjectRef& rhs_obj = rhs; return (*this)(lhs_obj, rhs_obj); } /*! * \brief Implementation for equality rule of var type objects(e.g. TypeVar, tir::Var). * \param lhs The left operand. * \param rhs The right operand. * \return the result. */ bool FreeVarEqualImpl(const runtime::Object* lhs, const runtime::Object* rhs) const { // var need to be remapped, so it belongs to graph node. handler_->MarkGraphNode(); // We only map free vars if they corresponds to the same address // or map free_var option is set to be true. return lhs == rhs || map_free_vars_; } /*! \return Get the internal handler. */ Handler* operator->() const { return handler_; } /*! \brief Check if this reducer is tracing paths to the first mismatch. */ bool IsPathTracingEnabled() const { return tracing_data_ != nullptr; } /*! * \brief Get the paths of the currently compared objects. * * Can only be called when `IsPathTracingEnabled()` is true. */ const ObjectPathPair& GetCurrentObjectPaths() const; /*! * \brief Specify the object paths of a detected mismatch. * * Can only be called when `IsPathTracingEnabled()` is true. */ void RecordMismatchPaths(const ObjectPathPair& paths) const; private: bool EnumAttrsEqual(int lhs, int rhs, const void* lhs_address, const void* rhs_address) const; bool ObjectAttrsEqual(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars, const ObjectPathPair* paths) const; static void GetPathsFromAttrAddressesAndStoreMismatch(const void* lhs_address, const void* rhs_address, const PathTracingData* tracing_data); template <typename T> static bool CompareAttributeValues(const T& lhs, const T& rhs, const PathTracingData* tracing_data); /*! \brief Internal class pointer. */ Handler* handler_ = nullptr; /*! \brief Pointer to the current path tracing context, or nullptr if path tracing is disabled. */ const PathTracingData* tracing_data_ = nullptr; /*! \brief Whether or not to map free vars. */ bool map_free_vars_ = false; }; /*! \brief The default handler for equality testing. * * Users can derive from this class and override the DispatchSEqualReduce method, * to customize equality testing. */ class SEqualHandlerDefault : public SEqualReducer::Handler { public: SEqualHandlerDefault(bool assert_mode, Optional<ObjectPathPair>* first_mismatch); virtual ~SEqualHandlerDefault(); bool SEqualReduce(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars, const Optional<ObjectPathPair>& current_paths) override; void DeferFail(const ObjectPathPair& mismatch_paths) override; ObjectRef MapLhsToRhs(const ObjectRef& lhs) override; void MarkGraphNode() override; /*! * \brief The entry point for equality testing * \param lhs The left operand. * \param rhs The right operand. * \param map_free_vars Whether or not to remap variables if possible. * \return The equality result. */ virtual bool Equal(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars); protected: /*! * \brief The dispatcher for equality testing of intermediate objects * \param lhs The left operand. * \param rhs The right operand. * \param map_free_vars Whether or not to remap variables if possible. * \param current_paths Optional paths to `lhs` and `rhs` objects, for error traceability. * \return The equality result. */ virtual bool DispatchSEqualReduce(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars, const Optional<ObjectPathPair>& current_paths); private: class Impl; Impl* impl; }; } // namespace tvm #endif // TVM_NODE_STRUCTURAL_EQUAL_H_
https://github.com/zk-ml/tachikoma
include/tvm/node/structural_hash.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/node/structural_equal.h * \brief Structural hash class. */ #ifndef TVM_NODE_STRUCTURAL_HASH_H_ #define TVM_NODE_STRUCTURAL_HASH_H_ #include <tvm/node/functor.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/ndarray.h> #include <functional> #include <string> namespace tvm { /*! * \brief Hash definition of base value classes. */ class BaseValueHash { public: size_t operator()(const double& key) const { return std::hash<double>()(key); } size_t operator()(const int64_t& key) const { return std::hash<int64_t>()(key); } size_t operator()(const uint64_t& key) const { return std::hash<uint64_t>()(key); } size_t operator()(const int& key) const { return std::hash<int>()(key); } size_t operator()(const bool& key) const { return std::hash<bool>()(key); } size_t operator()(const std::string& key) const { return std::hash<std::string>()(key); } size_t operator()(const runtime::DataType& key) const { return std::hash<int32_t>()(static_cast<int32_t>(key.code()) | (static_cast<int32_t>(key.bits()) << 8) | (static_cast<int32_t>(key.lanes()) << 16)); } template <typename ENum, typename = typename std::enable_if<std::is_enum<ENum>::value>::type> bool operator()(const ENum& key) const { return std::hash<size_t>()(static_cast<size_t>(key)); } }; /*! * \brief Content-aware structural hasing. * * The structural hash value is recursively defined in the DAG of IRNodes. * There are two kinds of nodes: * * - Normal node: the hash value is defined by its content and type only. * - Graph node: each graph node will be assigned a unique index ordered by the * first occurence during the visit. The hash value of a graph node is * combined from the hash values of its contents and the index. */ class StructuralHash : public BaseValueHash { public: // inheritate operator() using BaseValueHash::operator(); /*! * \brief Compute structural hashing value for an object. * \param key The left operand. * \return The hash value. */ TVM_DLL size_t operator()(const ObjectRef& key) const; }; /*! * \brief A Reducer class to reduce the structural hash value. * * The reducer will call the SEqualHash function of each objects recursively. * * A SEqualHash function will make a sequence of calls to the reducer to * indicate a sequence of child hash values that the reducer need to combine * inorder to obtain the hash value of the hash value of the parent object. * * Importantly, the reducer may not directly use recursive calls * to compute the hash values of child objects directly. * * Instead, it can store the necessary hash computing task into a stack * and reduce the result later. */ class SHashReducer { public: /*! \brief Internal handler that defines custom behaviors. */ class Handler { public: /*! * \brief Append hashed_value to the current sequence of hashes. * * \param hashed_value The hashed value */ virtual void SHashReduceHashedValue(size_t hashed_value) = 0; /*! * \brief Append hash value of key to the current sequence of hashes. * * \param key The object to compute hash from. * \param map_free_vars Whether to map free variables by their occurence number. */ virtual void SHashReduce(const ObjectRef& key, bool map_free_vars) = 0; /*! * \brief Apppend a hash value of free variable to the current sequence of hashes. * * \param var The var of interest. * \param map_free_vars Whether to map free variables by their occurence number. * * \note If map_free_vars is set to be true, * internally the handler can maintain a counter to encode free variables * by their order of occurence. This helps to resolve variable * mapping of function parameters and let binding variables. * * If map_free_vars is set to be false, the address of the variable will be used. */ virtual void SHashReduceFreeVar(const runtime::Object* var, bool map_free_vars) = 0; /*! * \brief Lookup a hash value for key * * \param key The hash key. * \param hashed_value the result hash value * * \return Whether there is already a pre-computed hash value. */ virtual bool LookupHashedValue(const ObjectRef& key, size_t* hashed_value) = 0; /*! * \brief Mark current comparison as graph node in hashing. * Graph node hash will depends on the graph structure. */ virtual void MarkGraphNode() = 0; }; /*! \brief default constructor */ SHashReducer() = default; /*! * \brief Constructor with a specific handler. * \param handler The equal handler for objects. * \param map_free_vars Whether to map free variables. */ explicit SHashReducer(Handler* handler, bool map_free_vars) : handler_(handler), map_free_vars_(map_free_vars) {} /*! * \brief Push hash of key to the current sequence of hash values. * \param key The key to be hashed. */ template <typename T, typename = typename std::enable_if<!std::is_base_of<ObjectRef, T>::value>::type> void operator()(const T& key) const { // handle normal values. handler_->SHashReduceHashedValue(BaseValueHash()(key)); } /*! * \brief Push hash of key to the current sequence of hash values. * \param key The key to be hashed. */ void operator()(const ObjectRef& key) const { return handler_->SHashReduce(key, map_free_vars_); } /*! * \brief Push hash of key to the current sequence of hash values. * \param key The key to be hashed. * \note This function indicate key could contain var defintions. */ void DefHash(const ObjectRef& key) const { return handler_->SHashReduce(key, true); } /*! * \brief Implementation for hash for a free var. * \param var The variable. * \return the result. */ void FreeVarHashImpl(const runtime::Object* var) const { handler_->SHashReduceFreeVar(var, map_free_vars_); } /*! \return Get the internal handler. */ Handler* operator->() const { return handler_; } private: /*! \brief Internal class pointer. */ Handler* handler_; /*! * \brief Whether or not to map free variables by their occurence * If the flag is false, then free variables will be mapped * by their in-memory address. */ bool map_free_vars_; }; /*! \brief The default handler for hash key computation * * Users can derive from this class and override the DispatchSHash method, * to customize hashing. */ class SHashHandlerDefault : public SHashReducer::Handler { public: SHashHandlerDefault(); virtual ~SHashHandlerDefault(); void SHashReduceHashedValue(size_t hashed_value) override; void SHashReduce(const ObjectRef& key, bool map_free_vars) override; void SHashReduceFreeVar(const runtime::Object* var, bool map_free_vars) override; bool LookupHashedValue(const ObjectRef& key, size_t* hashed_value) override; void MarkGraphNode() override; /*! * \brief The entry point for hashing * \param object The object to be hashed. * \param map_free_vars Whether or not to remap variables if possible. * \return The hash result. */ virtual size_t Hash(const ObjectRef& object, bool map_free_vars); protected: /*! * \brief The dispatcher for hashing of intermediate objects * \param object An intermediate object to be hashed. * \param map_free_vars Whether or not to remap variables if possible. * \return The hash result. */ virtual void DispatchSHash(const ObjectRef& object, bool map_free_vars); private: class Impl; Impl* impl; }; class SEqualReducer; struct NDArrayContainerTrait { static constexpr const std::nullptr_t VisitAttrs = nullptr; static void SHashReduce(const runtime::NDArray::Container* key, SHashReducer hash_reduce); static bool SEqualReduce(const runtime::NDArray::Container* lhs, const runtime::NDArray::Container* rhs, SEqualReducer equal); }; } // namespace tvm #endif // TVM_NODE_STRUCTURAL_HASH_H_
https://github.com/zk-ml/tachikoma
include/tvm/parser/parser.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_PARSER_PARSER_H_ #define TVM_PARSER_PARSER_H_ /*! * \file include/tvm/parser/parser.h * \brief A parser for TVM IR. */ #include <tvm/ir/module.h> #include <tvm/ir/transform.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/registry.h> #include <fstream> #include <string> namespace tvm { namespace parser { using MetaTable = Map<String, Array<ObjectRef>>; IRModule ParseModule(const std::string& file_name, const std::string& file_content, const Optional<IRModule>& init_module = Optional<IRModule>(), const MetaTable& init_meta_table = MetaTable()); /*! * \brief This pass pretty-prints mod then parses it back so as to establish spans and sources * for all Relay sub-expressions. This improves error and debugging diagnostics downstream for * modules constructed programaticaly rather than textually. */ transform::Pass AnnotateSpans(); } // namespace parser } // namespace tvm #endif // TVM_PARSER_PARSER_H_
https://github.com/zk-ml/tachikoma
include/tvm/parser/source_map.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file source_map.h * \brief A map from source names to source code. */ #ifndef TVM_PARSER_SOURCE_MAP_H_ #define TVM_PARSER_SOURCE_MAP_H_ #include <tvm/ir/span.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/registry.h> #include <fstream> #include <string> #include <utility> #include <vector> namespace tvm { namespace parser { /*! \brief A program source in any language. * * Could represent the source from an ML framework or a source * representing a tvm::IRModule. */ class Source; class SourceNode : public Object { public: /*! \brief The source name. */ SourceName source_name; /*! \brief The raw source. */ String source; /*! \brief A mapping of line breaks into the raw source. */ std::vector<std::pair<int, int>> line_map; // override attr visitor void VisitAttrs(AttrVisitor* v) { v->Visit("source_name", &source_name); v->Visit("source", &source); } static constexpr const char* _type_key = "Source"; TVM_DECLARE_FINAL_OBJECT_INFO(SourceNode, Object); }; class Source : public ObjectRef { public: TVM_DLL Source(SourceName src_name, std::string source); TVM_DLL tvm::String GetLine(int line); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Source, ObjectRef, SourceNode); }; /*! * \brief A mapping from a unique source name to source fragment. */ class SourceMap; /*! * \brief Stores locations in frontend source that generated a node. */ class SourceMapNode : public Object { public: /*! \brief The source mapping. */ Map<SourceName, Source> source_map; // override attr visitor void VisitAttrs(AttrVisitor* v) { v->Visit("source_map", &source_map); } bool SEqualReduce(const SourceMapNode* other, SEqualReducer equal) const { return equal(source_map, other->source_map); } static constexpr const char* _type_key = "SourceMap"; TVM_DECLARE_FINAL_OBJECT_INFO(SourceMapNode, Object); }; class SourceMap : public ObjectRef { public: TVM_DLL SourceMap(Map<SourceName, Source> source_map); TVM_DLL SourceMap(std::initializer_list<std::pair<SourceName, Source>> source_map) : SourceMap(Map<SourceName, Source>(source_map)) {} TVM_DLL SourceMap() : SourceMap(Map<SourceName, Source>()) {} void Add(const Source& source); SourceMapNode* operator->() { ICHECK(get() != nullptr); return static_cast<SourceMapNode*>(get_mutable()); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(SourceMap, ObjectRef, SourceMapNode); }; } // namespace parser } // namespace tvm #endif // TVM_PARSER_SOURCE_MAP_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/adt.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/adt.h * \brief Algebraic data types for Relay */ #ifndef TVM_RELAY_ADT_H_ #define TVM_RELAY_ADT_H_ #include <tvm/ir/adt.h> #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <tvm/relay/expr.h> #include <tvm/relay/type.h> #include <functional> #include <string> #include <utility> namespace tvm { namespace relay { using Constructor = tvm::Constructor; using ConstructorNode = tvm::ConstructorNode; using TypeData = tvm::TypeData; using TypeDataNode = tvm::TypeDataNode; /*! \brief Base type for declaring relay pattern. */ class PatternNode : public RelayNode { public: static constexpr const char* _type_key = "relay.Pattern"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_BASE_OBJECT_INFO(PatternNode, Object); }; /*! * \brief Pattern is the base type for an ADT match pattern in Relay. * * Given an ADT value, a pattern might accept it and bind the pattern variable to some value * (typically a subnode of the input or the input). Otherwise, the pattern rejects the value. * * ADT pattern matching thus takes a list of values and binds to the first that accepts the value. */ class Pattern : public ObjectRef { public: Pattern() {} explicit Pattern(ObjectPtr<tvm::Object> p) : ObjectRef(p) {} using ContainerType = PatternNode; }; /*! \brief A wildcard pattern: Accepts all input and binds nothing. */ class PatternWildcard; /*! \brief PatternWildcard container node */ class PatternWildcardNode : public PatternNode { public: void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("span", &span); } bool SEqualReduce(const PatternNode* other, SEqualReducer equal) const { return true; } void SHashReduce(SHashReducer hash_reduce) const {} static constexpr const char* _type_key = "relay.PatternWildcard"; TVM_DECLARE_FINAL_OBJECT_INFO(PatternWildcardNode, PatternNode); }; class PatternWildcard : public Pattern { public: /* \brief Overload the default constructors. */ TVM_DLL PatternWildcard(); explicit PatternWildcard(ObjectPtr<Object> n) : Pattern(n) {} /* \brief Copy constructor. */ PatternWildcard(const PatternWildcard& pat) : PatternWildcard(pat.data_) {} /* \brief Move constructor. */ PatternWildcard(PatternWildcard&& pat) : PatternWildcard(std::move(pat.data_)) {} /* \brief Copy assignment. */ PatternWildcard& operator=(const PatternWildcard& other) { (*this).data_ = other.data_; return *this; } /* \brief Move assignment. */ PatternWildcard& operator=(PatternWildcard&& other) { (*this).data_ = std::move(other.data_); return *this; } const PatternWildcardNode* operator->() const { return static_cast<const PatternWildcardNode*>(get()); } using ContainerType = PatternWildcardNode; }; /*! \brief A var pattern. Accept all input and bind to a var. */ class PatternVar; /*! \brief PatternVar container node */ class PatternVarNode : public PatternNode { public: /*! \brief Variable that stores the matched value. */ tvm::relay::Var var; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("var", &var); v->Visit("span", &span); } bool SEqualReduce(const PatternVarNode* other, SEqualReducer equal) const { return equal.DefEqual(var, other->var); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.DefHash(var); } static constexpr const char* _type_key = "relay.PatternVar"; TVM_DECLARE_FINAL_OBJECT_INFO(PatternVarNode, PatternNode); }; class PatternVar : public Pattern { public: /*! * \brief Constructor * \param var The var to construct a pattern */ TVM_DLL explicit PatternVar(tvm::relay::Var var); TVM_DEFINE_OBJECT_REF_METHODS(PatternVar, Pattern, PatternVarNode); }; /*! \brief A constructor pattern. Matches a value with the given constructor, binds recursively. */ class PatternConstructor; /*! \brief PatternVar container node */ class PatternConstructorNode : public PatternNode { public: /*! Constructor matched by the pattern. */ Constructor constructor; /*! Sub-patterns to match against each input to the constructor. */ tvm::Array<Pattern> patterns; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("constructor", &constructor); v->Visit("patterns", &patterns); v->Visit("span", &span); } bool SEqualReduce(const PatternConstructorNode* other, SEqualReducer equal) const { return equal(constructor, other->constructor) && equal(patterns, other->patterns); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(constructor); hash_reduce(patterns); } static constexpr const char* _type_key = "relay.PatternConstructor"; TVM_DECLARE_FINAL_OBJECT_INFO(PatternConstructorNode, PatternNode); }; class PatternConstructor : public Pattern { public: /*! * \brief Constructor * \param constructor The constructor of a pattern * \param patterns The sub-patterns for matching */ TVM_DLL PatternConstructor(Constructor constructor, tvm::Array<Pattern> patterns); TVM_DEFINE_OBJECT_REF_METHODS(PatternConstructor, Pattern, PatternConstructorNode); }; /*! \brief A tuple pattern. Matches a tuple, binds recursively. */ class PatternTuple; /*! \brief PatternVar container node */ class PatternTupleNode : public PatternNode { public: /* TODO(@jroesch): rename to field_pats */ /*! Sub-patterns to match against each value of the tuple. */ tvm::Array<Pattern> patterns; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("patterns", &patterns); v->Visit("span", &span); } bool SEqualReduce(const PatternTupleNode* other, SEqualReducer equal) const { return equal(patterns, other->patterns); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(patterns); } static constexpr const char* _type_key = "relay.PatternTuple"; TVM_DECLARE_FINAL_OBJECT_INFO(PatternTupleNode, PatternNode); }; class PatternTuple : public Pattern { public: /*! * \brief Constructor * \param patterns The sub-patterns to match against each value of the tuple */ TVM_DLL explicit PatternTuple(tvm::Array<Pattern> patterns); TVM_DEFINE_OBJECT_REF_METHODS(PatternTuple, Pattern, PatternTupleNode); }; /*! \brief A clause in a match expression. */ class Clause; /*! \brief Clause container node. */ class ClauseNode : public Object { public: /*! \brief The pattern the clause matches. */ Pattern lhs; /*! \brief The resulting value. */ Expr rhs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("lhs", &lhs); v->Visit("rhs", &rhs); } bool SEqualReduce(const ClauseNode* other, SEqualReducer equal) const { return equal(lhs, other->lhs) && equal(rhs, other->rhs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(lhs); hash_reduce(rhs); } static constexpr const char* _type_key = "relay.Clause"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(ClauseNode, Object); }; class Clause : public ObjectRef { public: /*! * \brief Constructor * \param lhs The pattern matched by the clause. * \param rhs The resulting value */ TVM_DLL explicit Clause(Pattern lhs, Expr rhs); TVM_DEFINE_OBJECT_REF_METHODS(Clause, ObjectRef, ClauseNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ClauseNode); }; /*! * \brief Returns \p clause with the given properties. A null property denotes 'no change'. * Returns \p clause if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Clause WithFields(Clause clause, Optional<Pattern> opt_lhs = Optional<Pattern>(), Optional<Expr> opt_rhs = Optional<Expr>()); /*! \brief ADT pattern matching exression. */ class Match; /*! \brief Match container node. */ class MatchNode : public ExprNode { public: /*! \brief The input being deconstructed. */ Expr data; /*! \brief The match node clauses. */ tvm::Array<Clause> clauses; /*! \brief Should this match be complete (cover all cases)? * If yes, the type checker will generate an error if there are any missing cases. */ bool complete; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("data", &data); v->Visit("clauses", &clauses); v->Visit("complete", &complete); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const MatchNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(data, other->data) && equal(clauses, other->clauses) && equal(complete, other->complete); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(data); hash_reduce(clauses); hash_reduce(complete); } static constexpr const char* _type_key = "relay.Match"; TVM_DECLARE_FINAL_OBJECT_INFO(MatchNode, ExprNode); }; class Match : public Expr { public: /*! * \brief Constructor * \param data the input being deconstructed. * \param clauses The clauses for matching. * \param complete Indicate if this match is complete. * \param span The span of the expression. */ TVM_DLL Match(Expr data, tvm::Array<Clause> clauses, bool complete = true, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Match, RelayExpr, MatchNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(MatchNode); }; /*! * \brief Returns \p match with the given properties. A null property denotes 'no change'. * Returns \p match if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Match WithFields(Match match, Optional<Expr> opt_data = Optional<Expr>(), Optional<Array<Clause>> opt_clauses = Optional<Array<Clause>>(), Optional<Bool> opt_complete = Optional<Bool>(), Optional<Span> opt_span = Optional<Span>()); } // namespace relay } // namespace tvm #endif // TVM_RELAY_ADT_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/analysis.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/analysis.h * \brief The set of Relay analysis passes written in C++. */ #ifndef TVM_RELAY_ANALYSIS_H_ #define TVM_RELAY_ANALYSIS_H_ #include <tvm/ir/module.h> #include <tvm/relay/adt.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/relay/type.h> #include <tvm/runtime/logging.h> #include <string> #include <unordered_map> namespace tvm { namespace relay { /*! * \brief Check that types are well kinded by applying "kinding rules". * * This pass ensures we do not do things that violate the design of the * type system when writing down types. * * For example tensors are not allowed to contain functions in Relay. * * We check this by ensuring the `dtype` field of a Tensor always contains * a data type such as `int`, `float`, `uint`. * * \param t The type to check. * \param mod The global module. * \param diag_ctx The Diagnostic context. * * \return The kind of the passed type. */ TVM_DLL Kind KindCheck(const Type& t, const IRModule& mod, Optional<DiagnosticContext> diag_ctx = Optional<DiagnosticContext>()); /*! * \brief Check whether an expression is constant. * * If the inputs of an expression are all constant, it means the expression * itself is constant also. * * \param e the expression. * * \return whether the expression is constant. */ TVM_DLL bool ConstantCheck(const Expr& e); /*! * \brief Check whether an expression is in the basic block normal form. * * \param e the expression. * * \return whether the expression is in the basic block normal form. */ TVM_DLL bool BasicBlockNormalFormCheck(const Expr& e); /*! * \brief Check that each Var is only bound once. * * For example, the expression `let x = 1 in let x = 2 in 3` bound x twice. * * `let f = (x -> x) in let g = (x -> x + 1) in f(g(2))` also bound x twice, * although x is not shadowed. * * \param expr the expression to check. * \param diag_ctx the diagnostic context * * \return true iff all Var in expr is bound at most once. */ TVM_DLL bool WellFormed(const Expr& expr, Optional<DiagnosticContext> diag_ctx = Optional<DiagnosticContext>()); /*! * \brief Get all bound variables from expression expr. * * Bound variables are all variables that are declared in the expr. * They only have meaning inside that expr, and can only be used in it. * * \param expr the expression. * * \return List of bound vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<Var> BoundVars(const Expr& expr); /*! * \brief Get all bound variables from pattern pat. * * Bound variables are all variables that got bound by the pat. * They only have meaning inside that expr, and can only be used in it. * * \param pat the Pattern. * * \return List of bound vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<Var> BoundVars(const Pattern& pat); /*! * \brief Get free type parameters from expression expr. * * Free variables are variables that are not bound by a * let or a function parameter in the context. * * \param expr the expression. * * \return List of free vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<Var> FreeVars(const Expr& expr); /*! * \brief Get all variables from expression expr. * * \param expr the expression. * * \return List of all vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<Var> AllVars(const Expr& expr); /*! * \brief Get free TypeVars from expression expr. * * Free type parameters are type parameters that are not bound by a function * type in the context. * * \param expr the expression. * \param mod the module. * * \return List of free vars, in the PostDFS order visited by expr. */ TVM_DLL tvm::Array<TypeVar> FreeTypeVars(const Expr& expr, const IRModule& mod); /*! * \brief Get free TypeVars from type t. * * Free type parameters are type parameters that are not bound by a function * type in the context. * * \param t the type. * \param mod the module. * * \return List of free type vars, in the PostDFS order visited by type. */ TVM_DLL tvm::Array<TypeVar> FreeTypeVars(const Type& t, const IRModule& mod); /*! * \brief Get all bound type variables from expression expr. * * Bound variables are all type variables that are declared in the expr. * They only have meaning inside that expr, and can only be used in it. * * \param expr the expression. * \param mod the module. * * \return List of bound type vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<TypeVar> BoundTypeVars(const Expr& expr, const IRModule& mod); /*! * \brief Get all bound type variables from type t. * * Bound variables are all type variables that are declared in the type. * They only have meaning inside that type, and can only be used in it. * * \param t the type * \param mod the module. * * \return List of bound type vars, in the PostDFS order visited by type. */ TVM_DLL tvm::Array<TypeVar> BoundTypeVars(const Type& t, const IRModule& mod); /*! * \brief Get all type variables in expression expr. * * \param expr the expression. * \param mod the module. * * \return List of type vars, in the PostDFS order in the expression. */ TVM_DLL tvm::Array<TypeVar> AllTypeVars(const Expr& expr, const IRModule& mod); /*! * \brief Get all type variables in type t. * * \param t the type. * \param mod the module. * * \return List of type vars, in the PostDFS order visited by type. */ TVM_DLL tvm::Array<TypeVar> AllTypeVars(const Type& t, const IRModule& mod); /*! * \brief Finds cases that the given match expression does not catch, if any. * * \param match the match expression to test * * \param mod The module used for accessing global type var definitions, can be None. * * \return Returns a list of cases (as patterns) that are not handled by the match * expression. */ TVM_DLL Array<Pattern> UnmatchedCases(const Match& match, const IRModule& mod); /*! * \brief Get reference counter of each internal ExprNode in body. * * \param body The body expression. * * \return The reference count mapping. */ TVM_DLL std::unordered_map<const Object*, size_t> GetExprRefCount(const Expr& body); /*! * \brief Get the updated module for collecting calibration data. * * \param mod The module to be updated. * * \return The updated module. */ TVM_DLL IRModule GetCalibrateModule(IRModule mod); /*! * \brief Get the output map between subgrpahs and its inputs/output. * * \param mod The module for running calibration. * * \return The mapping between a subgraph name and its postition in the output tuple. */ TVM_DLL Map<GlobalVar, Array<Integer>> GetCalibrateOutputMap(const IRModule& mod); } // namespace relay } // namespace tvm #endif // TVM_RELAY_ANALYSIS_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/algorithm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/vision.h * \brief Auxiliary attributes for vision operators. */ #ifndef TVM_RELAY_ATTRS_ALGORITHM_H_ #define TVM_RELAY_ATTRS_ALGORITHM_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <tvm/relay/expr.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes used in argsort operators */ struct ArgsortAttrs : public tvm::AttrsNode<ArgsortAttrs> { int axis; bool is_ascend; DataType dtype; TVM_DECLARE_ATTRS(ArgsortAttrs, "relay.attrs.ArgsortAttrs") { TVM_ATTR_FIELD(axis).set_default(-1).describe( "Axis along which to sort the input tensor." "If not given, the flattened array is used."); TVM_ATTR_FIELD(is_ascend).set_default(true).describe( "Whether to sort in ascending or descending order." "By default, sort in ascending order"); TVM_ATTR_FIELD(dtype) .set_default(NullValue<DataType>()) .describe("DType of the output indices."); } }; struct TopKAttrs : public tvm::AttrsNode<TopKAttrs> { Optional<Integer> k; int axis; bool is_ascend; std::string ret_type; DataType dtype; TVM_DECLARE_ATTRS(TopKAttrs, "relay.attrs.TopkAttrs") { TVM_ATTR_FIELD(k).describe("Number of top elements to select"); TVM_ATTR_FIELD(axis).set_default(-1).describe("Axis along which to sort the input tensor."); TVM_ATTR_FIELD(ret_type).set_default("both").describe( "The return type [both, values, indices]." "both - return both top k data and indices." "values - return top k data only." "indices - return top k indices only."); TVM_ATTR_FIELD(is_ascend).set_default(false).describe( "Whether to sort in ascending or descending order." "By default, sort in descending order"); TVM_ATTR_FIELD(dtype) .set_default(NullValue<DataType>()) .describe("Data type of the output indices."); } }; struct SearchSortedAttrs : public tvm::AttrsNode<SearchSortedAttrs> { bool right; DataType dtype; TVM_DECLARE_ATTRS(SearchSortedAttrs, "relay.attrs.SearchSortedAttrs") { TVM_ATTR_FIELD(right).set_default(false).describe( "Controls which index is returned if a value lands exactly on one of sorted values. If " " false, the index of the first suitable location found is given. If true, return the " "last such index. If there is no suitable index, return either 0 or N (where N is the " "size of the innermost dimension)."); TVM_ATTR_FIELD(dtype) .set_default(DataType::Int(32)) .describe("Data type of the output indices."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_ALGORITHM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/annotation.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/annotation.h * \brief Attribute for annotation operators. */ #ifndef TVM_RELAY_ATTRS_ANNOTATION_H_ #define TVM_RELAY_ATTRS_ANNOTATION_H_ #include <tvm/ir/attrs.h> #include <string> namespace tvm { namespace relay { /*! * \brief Annotate an expression to be cast into specific data type. */ struct CastHintAttrs : public tvm::AttrsNode<CastHintAttrs> { DataType dtype; TVM_DECLARE_ATTRS(CastHintAttrs, "relay.attrs.CastHintAttrs") { TVM_ATTR_FIELD(dtype).describe("The data type denoted to be cast."); } }; /*! * \brief Options for the operators used to annotate a compiler. */ struct CompilerAttrs : public tvm::AttrsNode<CompilerAttrs> { /*! \brief A 3rd party compiler for code generation. */ std::string compiler; TVM_DECLARE_ATTRS(CompilerAttrs, "relay.attrs.CompilerAttrs") { TVM_ATTR_FIELD(compiler).describe("A 3rd party compiler used for code generation."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_ANNOTATION_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/bitserial.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/bitserial.h * \brief Auxiliary attributes for bitserial operators. */ #ifndef TVM_RELAY_ATTRS_BITSERIAL_H_ #define TVM_RELAY_ATTRS_BITSERIAL_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes used in bitpack operators */ struct BitPackAttrs : public tvm::AttrsNode<BitPackAttrs> { int bits; int pack_axis; int bit_axis; DataType pack_type; std::string name; TVM_DECLARE_ATTRS(BitPackAttrs, "relay.attrs.BitPackAttrs") { TVM_ATTR_FIELD(bits).set_default(1).describe("Number of bits to quantize with."); TVM_ATTR_FIELD(pack_axis).set_default(1).describe( "Axis that should be compressed, typically channels."); TVM_ATTR_FIELD(bit_axis).set_default(-1).describe("New axis for packed bits."); TVM_ATTR_FIELD(pack_type) .set_default(NullValue<DataType>()) .describe("Type of int to pack bits into."); TVM_ATTR_FIELD(name).set_default("BitPack").describe("Name of operation."); } }; /*! \brief Attribues used in bitserial convolution operators */ struct BinaryConv2DAttrs : public tvm::AttrsNode<BinaryConv2DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; IndexExpr channels; Array<IndexExpr> kernel_size; int activation_bits; int weight_bits; std::string data_layout; std::string kernel_layout; DataType pack_dtype; DataType out_dtype; bool unipolar; TVM_DECLARE_ATTRS(BinaryConv2DAttrs, "relay.attrs.BinaryConv2DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero the input is implicitly zero-padded" "on both sides for padding number of points."); TVM_ATTR_FIELD(kernel_size) .set_default(Array<IndexExpr>({3, 3})) .describe("Specifies the dimensions of the convolution window."); TVM_ATTR_FIELD(channels) .set_default(NullValue<IndexExpr>()) .describe("Number of output channels, needed for shape inference."); TVM_ATTR_FIELD(activation_bits) .set_default(1) .describe("Number of bits activation should be packed with."); TVM_ATTR_FIELD(weight_bits) .set_default(1) .describe("Number of bits kernel should be packed with."); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe("Dimension ordering of input data, can be 'NCHW' or NHWC'."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIHW") .describe("Dimension ordering of kernel data, can be 'OIHW' or HWIO'."); TVM_ATTR_FIELD(pack_dtype) .set_default(NullValue<DataType>()) .describe("Datatype to pack bits into."); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output datatype."); TVM_ATTR_FIELD(unipolar).set_default(true).describe( "Whether to use unipolar or bipolar quantization."); } }; /*~ \brief Attributes for bitserial dense operator */ struct BinaryDenseAttrs : public tvm::AttrsNode<BinaryDenseAttrs> { IndexExpr units; int data_bits; int weight_bits; DataType pack_dtype; DataType out_dtype; bool unipolar; TVM_DECLARE_ATTRS(BinaryDenseAttrs, "relay.attrs.BinaryDenseAttrs") { TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation."); TVM_ATTR_FIELD(data_bits).set_default(1).describe( "Number of bits to pack for incoming tensor."); TVM_ATTR_FIELD(weight_bits) .set_default(1) .describe("Number of bits to pack for weight tensor."); TVM_ATTR_FIELD(pack_dtype) .set_default(NullValue<DataType>()) .describe("Datatype to pack bits into before computation."); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output data type."); TVM_ATTR_FIELD(unipolar).set_default(true).describe( "Whether to use unipolar or bipolar quantization for inputs."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_BITSERIAL_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/call.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/call.h * \brief Attribute for call_lowered operator. */ #ifndef TVM_RELAY_ATTRS_CALL_H_ #define TVM_RELAY_ATTRS_CALL_H_ #include <tvm/ir/attrs.h> #include <string> namespace tvm { namespace relay { /*! * \brief Metadata for calls to TIR functions, useful for program analysis crossing Relay and TIR. */ struct CallLoweredAttrs : public tvm::AttrsNode<CallLoweredAttrs> { /*! \brief Additional metadata attached to the call node. Should be replaced by explict fields. */ Map<String, ObjectRef> metadata; TVM_DECLARE_ATTRS(CallLoweredAttrs, "relay.attrs.CallLoweredAttrs") { TVM_ATTR_FIELD(metadata) .describe("Metadata attached to the lowered function call.") .set_default(Map<String, ObjectRef>()); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_CALL_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/debug.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/debug.h * \brief Auxiliary attributes for debug operators. */ #ifndef TVM_RELAY_ATTRS_DEBUG_H_ #define TVM_RELAY_ATTRS_DEBUG_H_ #include <tvm/ir/attrs.h> #include <tvm/ir/env_func.h> #include <string> namespace tvm { namespace relay { /*! * \brief Options for the debug operators. */ struct DebugAttrs : public tvm::AttrsNode<DebugAttrs> { EnvFunc debug_func; TVM_DECLARE_ATTRS(DebugAttrs, "relay.attrs.DebugAttrs") { TVM_ATTR_FIELD(debug_func).describe("The function to use when debugging."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_DEBUG_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/device_copy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/device_copy.h * \brief Attribute for the device copy operator. */ #ifndef TVM_RELAY_ATTRS_DEVICE_COPY_H_ #define TVM_RELAY_ATTRS_DEVICE_COPY_H_ #include <tvm/ir/attrs.h> #include <tvm/target/virtual_device.h> #include <string> namespace tvm { namespace relay { /*! * \brief Options for the device copy operators. */ struct DeviceCopyAttrs : public tvm::AttrsNode<DeviceCopyAttrs> { VirtualDevice src_virtual_device = VirtualDevice::FullyUnconstrained(); VirtualDevice dst_virtual_device = VirtualDevice::FullyUnconstrained(); TVM_DECLARE_ATTRS(DeviceCopyAttrs, "relay.attrs.DeviceCopyAttrs") { TVM_ATTR_FIELD(src_virtual_device) .describe("The (virtual) device and scope where the op copies data from."); TVM_ATTR_FIELD(dst_virtual_device) .describe("The (virtual) device and scope where the op copies data to."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_DEVICE_COPY_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/image.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/image.h * \brief Auxiliary attributes for image operators. */ #ifndef TVM_RELAY_ATTRS_IMAGE_H_ #define TVM_RELAY_ATTRS_IMAGE_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes used in image resize1d operator */ struct Resize1DAttrs : public tvm::AttrsNode<Resize1DAttrs> { Array<IndexExpr> size; Array<FloatImm> roi; std::string layout; std::string method; std::string coordinate_transformation_mode; std::string rounding_method; double cubic_alpha; int cubic_exclude; double extrapolation_value; DataType out_dtype; TVM_DECLARE_ATTRS(Resize1DAttrs, "relay.attrs.Resize1DAttrs") { TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size."); TVM_ATTR_FIELD(roi) .set_default(NullValue<Array<FloatImm>>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCW").describe( "Dimension ordering of input data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel and width" "dimensions respectively. Resize is applied on the" "'W' dimension."); TVM_ATTR_FIELD(method).set_default("linear").describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "linear - Linear Interpolation" "cubic - Cubic Interpolation"); TVM_ATTR_FIELD(coordinate_transformation_mode) .set_default("half_pixel") .describe( "Describes how to transform the coordinate in the resized tensor" "to the coordinate in the original tensor." "Refer to the ONNX Resize operator specification for details" "Available options are half_pixel, align_corners and asymmetric"); TVM_ATTR_FIELD(rounding_method) .set_default("round") .describe( "indicates how to find the \"nearest\" pixel in nearest_neighbor method" "Available options are round, floor, and ceil."); TVM_ATTR_FIELD(cubic_alpha) .set_default(-0.5) .describe("Spline Coefficient for cubic interpolation"); TVM_ATTR_FIELD(cubic_exclude) .set_default(0) .describe("Flag to exclude exterior of the image during cubic interpolation"); TVM_ATTR_FIELD(extrapolation_value) .set_default(0.0) .describe("Value to return when roi is outside of the image"); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output data type."); } }; /*! \brief Attributes used in image resize2d operator */ struct Resize2DAttrs : public tvm::AttrsNode<Resize2DAttrs> { Array<IndexExpr> size; Array<FloatImm> roi; std::string layout; std::string method; std::string coordinate_transformation_mode; std::string rounding_method; double cubic_alpha; int cubic_exclude; double extrapolation_value; DataType out_dtype; TVM_DECLARE_ATTRS(Resize2DAttrs, "relay.attrs.Resize2DAttrs") { TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size."); TVM_ATTR_FIELD(roi) .set_default(NullValue<Array<FloatImm>>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Resize is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(method).set_default("linear").describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "linear - Bilinear Interpolation" "cubic - Bicubic Interpolation"); TVM_ATTR_FIELD(coordinate_transformation_mode) .set_default("half_pixel") .describe( "Describes how to transform the coordinate in the resized tensor" "to the coordinate in the original tensor." "Refer to the ONNX Resize operator specification for details" "Available options are half_pixel, align_corners and asymmetric"); TVM_ATTR_FIELD(rounding_method) .set_default("round") .describe( "indicates how to find the \"nearest\" pixel in nearest_neighbor method" "Available options are round, floor, and ceil."); TVM_ATTR_FIELD(cubic_alpha) .set_default(-0.5) .describe("Spline Coefficient for Bicubic Interpolation"); TVM_ATTR_FIELD(cubic_exclude) .set_default(0) .describe("Flag to exclude exterior of the image during bicubic interpolation"); TVM_ATTR_FIELD(extrapolation_value) .set_default(0.0) .describe("Value to return when roi is outside of the image"); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output data type."); } }; /*! \brief Attributes used in image resize3d operator */ struct Resize3DAttrs : public tvm::AttrsNode<Resize3DAttrs> { Array<IndexExpr> size; Array<FloatImm> roi; std::string layout; std::string method; std::string coordinate_transformation_mode; std::string rounding_method; double cubic_alpha; int cubic_exclude; double extrapolation_value; DataType out_dtype; TVM_DECLARE_ATTRS(Resize3DAttrs, "relay.attrs.Resize3DAttrs") { TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size."); TVM_ATTR_FIELD(roi) .set_default(NullValue<Array<FloatImm>>()) .describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'"); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Resize3d is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(method).set_default("linear").describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "linear - Trilinear Interpolation" "cubic - Tricubic Interpolation"); TVM_ATTR_FIELD(coordinate_transformation_mode) .set_default("half_pixel") .describe( "Describes how to transform the coordinate in the resized tensor" "to the coordinate in the original tensor." "Refer to the ONNX Resize operator specification for details" "Available options are half_pixel, align_corners and asymmetric"); TVM_ATTR_FIELD(rounding_method) .set_default("round") .describe( "indicates how to find the \"nearest\" pixel in nearest_neighbor method" "Available options are round, floor, and ceil."); TVM_ATTR_FIELD(cubic_alpha) .set_default(-0.5) .describe("Spline Coefficient for Tricubic Interpolation"); TVM_ATTR_FIELD(cubic_exclude) .set_default(0) .describe("Flag to exclude exterior of the image during tricubic interpolation"); TVM_ATTR_FIELD(extrapolation_value) .set_default(0.0) .describe("Value to return when roi is outside of the image"); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output data type."); } }; /*! \brief Attributes used in image crop_and_resize operator */ struct CropAndResizeAttrs : public tvm::AttrsNode<CropAndResizeAttrs> { Array<IndexExpr> crop_size; std::string layout; std::string method; double extrapolation_value; DataType out_dtype; TVM_DECLARE_ATTRS(CropAndResizeAttrs, "relay.attrs.CropAndResizeAttrs") { TVM_ATTR_FIELD(crop_size).set_default(NullValue<Array<IndexExpr>>()).describe("Target Size."); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Resize is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(method) .set_default("bilinear") .describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "bilinear - Bilinear Interpolation"); TVM_ATTR_FIELD(extrapolation_value) .set_default(0.0) .describe("Specify value for extrapolation."); TVM_ATTR_FIELD(out_dtype).set_default(NullValue<DataType>()).describe("Output data type."); } }; /*! \brief Attributes used in dilation operators */ struct Dilation2DAttrs : public tvm::AttrsNode<Dilation2DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilations; std::string data_layout; std::string kernel_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Dilation2DAttrs, "relay.attrs.Dilation2DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the sliding window. [stride_height, stride_width]."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(dilations) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation rate to use. [dilation_height, dilation_width]"); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("IHW") .describe( "Dimension ordering of weight. Can be 'IHW', 'HWI', etc." "'I', 'H', 'W' stands for input_channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in image affine_grid operator */ struct AffineGridAttrs : public tvm::AttrsNode<AffineGridAttrs> { Array<IndexExpr> target_shape; TVM_DECLARE_ATTRS(AffineGridAttrs, "relay.attrs.AffineGridAttrs") { TVM_ATTR_FIELD(target_shape).describe("Specifies the output shape (H, W)."); } }; /*! \brief Attributes used in image grid_sample operator */ struct GridSampleAttrs : public tvm::AttrsNode<GridSampleAttrs> { String method; String layout; String padding_mode; bool align_corners; TVM_DECLARE_ATTRS(GridSampleAttrs, "relay.attrs.GridSampleAttrs") { TVM_ATTR_FIELD(method) .set_default("bilinear") .describe( "Specify the mode to use for scaling." "nearest - 2D or 3D Nearest Interpolation." "bilinear - '2D Bilinear' or '3D Trilinear' Interpolation." "bicubic - 2D Bicubic Interpolation."); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NCDHW', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively." "2D Resize is applied on the 'H' and 'W' dimensions." "3D Resize is applied on the 'D' and 'H' and 'W' dimensions."); TVM_ATTR_FIELD(padding_mode) .set_default("zeros") .describe( "If :attr:'grid' has values outside the range of '[-1, 1]', the corresponding" "outputs are handled as defined by padding_mode. Options are" "padding_mode='zeros': use '0' for out-of-bound grid locations," "padding_mode='border': use border values for out-of-bound grid locations" "padding_mode='reflection': use values at locations reflected by" "the border for out-of-bound grid locations. For location far away" "from the border, it will keep being reflected until becoming in bound," "e.g., (normalized) pixel location 'x = -3.5' reflects by border '-1'" "and becomes 'x' = 1.5, then reflects by border '1' and becomes" "'x' = -0.5"); TVM_ATTR_FIELD(align_corners) .set_default(true) .describe( "Geometrically, we consider the pixels of the" "input as squares rather than points." "If set to True, the extrema (-1 and 1) are considered as referring" "to the center points of the input's corner pixels. If set to False, they" "are instead considered as referring to the corner points of the input's corner" "pixels, making the sampling more resolution agnostic."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_IMAGE_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/memory.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/memory.h * \brief Attributes for memory operators. */ #ifndef TVM_RELAY_ATTRS_MEMORY_H_ #define TVM_RELAY_ATTRS_MEMORY_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/expr.h> #include <tvm/target/virtual_device.h> #include <string> #include <vector> namespace tvm { namespace relay { std::vector<TensorType> FlattenTupleType(const Type& type); std::vector<Expr> FromTupleType(const Type& type, const Expr& expr); Expr ToTupleType(const Type& t, const std::vector<Expr>& exprs); /*! * \brief Options for allocating storage. */ struct AllocStorageAttrs : public tvm::AttrsNode<AllocStorageAttrs> { DataType dtype; VirtualDevice virtual_device = VirtualDevice::FullyUnconstrained(); TVM_DECLARE_ATTRS(AllocStorageAttrs, "relay.attrs.AllocStorageAttrs") { TVM_ATTR_FIELD(dtype) .describe("The dtype of the tensor to allocate.") .set_default(DataType::Float(32, 1)); TVM_ATTR_FIELD(virtual_device).describe("The virtual device on which to allocate memory."); } }; /*! * \brief Options for allocating tensors. */ struct AllocTensorAttrs : public tvm::AttrsNode<AllocTensorAttrs> { Constant const_shape; Array<IndexExpr> assert_shape; DataType dtype; TVM_DECLARE_ATTRS(AllocTensorAttrs, "relay.attrs.AllocTensorAttrs") { TVM_ATTR_FIELD(dtype) .describe("The dtype of the tensor to allocate.") .set_default(DataType::Float(32, 1)); TVM_ATTR_FIELD(const_shape).describe("The shape of constant used to aid in type inference."); TVM_ATTR_FIELD(assert_shape) .describe( "The shape to cast the return type of the allocation to, " "used to specify the shape obtained via further analysis."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_MEMORY_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/nn.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/nn.h * \brief Auxiliary attributes for nn operators. */ #ifndef TVM_RELAY_ATTRS_NN_H_ #define TVM_RELAY_ATTRS_NN_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <string> namespace tvm { namespace relay { /*! * \brief Add a 1D Tensor to an axis of a data. * * \note bias_add is a special add operator that is in nn * and enables automatic derivation of bias's shape. * You can directly use add for more generalized case. */ struct BiasAddAttrs : public tvm::AttrsNode<BiasAddAttrs> { int axis; TVM_DECLARE_ATTRS(BiasAddAttrs, "relay.attrs.BiasAddAttrs") { TVM_ATTR_FIELD(axis).describe("The axis to add the bias").set_default(1); } }; /*! \brief Attributes used in 1D convolution operators */ struct Conv1DAttrs : public tvm::AttrsNode<Conv1DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; tvm::String data_layout; tvm::String kernel_layout; tvm::String out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Conv1DAttrs, "relay.attrs.Conv1DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({ 1, })) .describe("Specifies the stride of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "on both sides for padding number of points"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({ 1, })) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Currently unused but may be added in the future."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCW") .describe( "Dimension ordering of input data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Convolution is applied on the 'W'" "dimension."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIW") .describe( "Dimension ordering of weight. Can be 'OIW', or 'WIO', etc." "'O', 'I', 'W' stands for num_filter, input_channel, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in convolution operators */ struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; tvm::String data_layout; tvm::String kernel_layout; tvm::String out_layout; tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights DataType out_dtype; TVM_DECLARE_ATTRS(Conv2DAttrs, "relay.attrs.Conv2DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIHW") .describe( "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc." "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in winograd weight transformation operators */ struct ConvWinogradWeightTransformAttrs : public tvm::AttrsNode<ConvWinogradWeightTransformAttrs> { int tile_size; TVM_DECLARE_ATTRS(ConvWinogradWeightTransformAttrs, "relay.attrs.ConvWinogradWeightTransformAttrs") { TVM_ATTR_FIELD(tile_size).describe( "Tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)"); } }; /*! \brief Attributes used in gemm weight transformation operators */ struct ConvGemmWeightTransformAttrs : public tvm::AttrsNode<ConvGemmWeightTransformAttrs> { int tile_rows; int tile_cols; TVM_DECLARE_ATTRS(ConvGemmWeightTransformAttrs, "relay.attrs.ConvGemmWeightTransformAttrs") { TVM_ATTR_FIELD(tile_rows).describe("Tile rows of the weight transformation for ConvGemm."); TVM_ATTR_FIELD(tile_cols).describe("Tile columns of the weight transformation for ConvGemm."); } }; /*! \brief Attributes used in convolution operators with winograd algorithm */ struct Conv2DWinogradAttrs : public tvm::AttrsNode<Conv2DWinogradAttrs> { int tile_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; tvm::String data_layout; tvm::String kernel_layout; tvm::String out_layout; tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights DataType out_dtype; TVM_DECLARE_ATTRS(Conv2DWinogradAttrs, "relay.attrs.Conv2DWinogradAttrs") { TVM_ATTR_FIELD(tile_size).describe( "The tile size of winograd. E.g. 2 for F(2x2, 3x3) and 4 for F(4x4, 3x3)"); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIHW") .describe( "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc." "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in winograd weight transformation operators */ struct Conv2DWinogradNNPACKWeightTransformAttrs : public tvm::AttrsNode<Conv2DWinogradNNPACKWeightTransformAttrs> { int convolution_algorithm; DataType out_dtype; TVM_DECLARE_ATTRS(Conv2DWinogradNNPACKWeightTransformAttrs, "relay.attrs.Conv2DWinogradNNPACKWeightTransformAttrs") { TVM_ATTR_FIELD(convolution_algorithm) .describe( "The convolution algorithm for Winograd NNPACK. " "E.g. tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8 for WT_8x8, " "tvm.contrib.nnpack.ConvolutionAlgorithm.WT_8x8_FP16 for WT_8x8_FP16"); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in convolution operators */ struct Conv3DAttrs : public tvm::AttrsNode<Conv3DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; tvm::String data_layout; tvm::String kernel_layout; tvm::String out_layout; tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights DataType out_dtype; TVM_DECLARE_ATTRS(Conv3DAttrs, "relay.attrs.Conv3DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : back, bottom, right will use same padding as front, top, left" "six int : padding width in the order of (front, top, left, back, bottom," "right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCDHW") .describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Convolution is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIDHW") .describe( "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc." "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height," "and width dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in transposed convolution operator */ struct Conv3DTransposeAttrs : public tvm::AttrsNode<Conv3DTransposeAttrs> { IndexExpr channels; Array<IndexExpr> kernel_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> output_padding; Array<IndexExpr> dilation; int groups; tvm::String data_layout; tvm::String kernel_layout; tvm::String out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Conv3DTransposeAttrs, "relay.attrs.Conv3DTransposeAttrs") { TVM_ATTR_FIELD(channels) .set_default(NullValue<IndexExpr>()) .describe( "The dimensionality of the output space" "i.e. the number of output channels in the convolution."); TVM_ATTR_FIELD(kernel_size) .describe("The dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("The strides of the convolution."); TVM_ATTR_FIELD(output_padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "Zero-padding added to one side of the output." "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : front, bottom, right will use same padding as back, top, left" "six int : padding width in the order of (front, top, left, back, bottom, right)"); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : front, bottom, right will use same padding as back, top, left" "six int : padding width in the order of (front, top, left, back, bottom, right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(data_layout) .set_default("NCDHW") .describe( "Dimension ordering of data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Convolution is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIDHW") .describe( "Dimension ordering of data and weight. Can be 'OIDHW', 'OIDHW16o16i', etc." "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Default to be same as input layout."); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in 3d winograd convolution operators */ struct Conv3DWinogradAttrs : public tvm::AttrsNode<Conv3DWinogradAttrs> { int tile_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; std::string data_layout; std::string kernel_layout; std::string out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Conv3DWinogradAttrs, "relay.attrs.Conv3DWinogradAttrs") { TVM_ATTR_FIELD(tile_size).describe( "The tile size of winograd. E.g. 2 for F(2x2x2, 3x3x3) and 4 for F(4x4x4, 3x3x3)"); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : back, bottom, right will use same padding as front, top, left" "six int : padding width in the order of (front, top, left, back, bottom," "right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCDHW") .describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Convolution is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIDHW") .describe( "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc." "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height," "and width dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in softmax operators */ struct SoftmaxAttrs : public tvm::AttrsNode<SoftmaxAttrs> { int axis; TVM_DECLARE_ATTRS(SoftmaxAttrs, "relay.attrs.SoftmaxAttrs") { TVM_ATTR_FIELD(axis).set_default(-1).describe("The axis to sum over when computing softmax."); } }; /*! \brief Attributes used in transposed convolution operator */ struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> { IndexExpr channels; Array<IndexExpr> kernel_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> output_padding; Array<IndexExpr> dilation; int groups; std::string data_layout; std::string kernel_layout; std::string out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Conv2DTransposeAttrs, "relay.attrs.Conv2DTransposeAttrs") { TVM_ATTR_FIELD(channels) .set_default(NullValue<IndexExpr>()) .describe( "The dimensionality of the output space" "i.e. the number of output channels in the convolution."); TVM_ATTR_FIELD(kernel_size) .describe("The dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("The strides of the convolution."); TVM_ATTR_FIELD(output_padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "Zero-padding added to one side of the output." "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe( "Dimension ordering of data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIHW") .describe( "Dimension ordering of data and weight. Can be 'OIHW', 'OIHW16o16i', etc." "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Default to be same as input layout."); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in dilate operator */ struct DilateAttrs : public tvm::AttrsNode<DilateAttrs> { Array<IndexExpr> strides; double dilation_value; TVM_DECLARE_ATTRS(DilateAttrs, "relay.attrs.DilateAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Dilation stride on each dimension, 1 means no dilation."); TVM_ATTR_FIELD(dilation_value).set_default(0.0).describe("Value used to dilate the input."); } }; /*! \brief Attributes used in 1D transposed convolution operator */ struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> { IndexExpr channels; Array<IndexExpr> kernel_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> output_padding; Array<IndexExpr> dilation; int groups; std::string data_layout; std::string kernel_layout; std::string out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relay.attrs.Conv1DTransposeAttrs") { TVM_ATTR_FIELD(channels) .set_default(NullValue<IndexExpr>()) .describe( "The dimensionality of the output space" "i.e. the number of output channels in the convolution."); TVM_ATTR_FIELD(kernel_size) .describe("The dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1})) .describe("The strides of the convolution."); TVM_ATTR_FIELD(output_padding) .set_default(Array<IndexExpr>({0})) .describe("Zero-padding added to one side of the output."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0})) .describe( "Symmetric or asymmetric padding." "Single value: the input is implicitly zero-padded on both sides." "Two values: padding[0] is used for left input padding, " "padding[1] is used for right input padding,"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(data_layout) .set_default("NCW") .describe( "Dimension ordering of data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Convolution is applied on the" "'W' dimension."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIW") .describe( "Dimension ordering of data and weight. Can be 'OIW', 'OIW16o16i', etc." "'O', 'I', 'W' stands for num_filter, input_channel, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Default to be same as input layout."); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes for max pool operator */ struct MaxPool2DAttrs : public tvm::AttrsNode<MaxPool2DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; tvm::String layout; tvm::String out_layout; bool ceil_mode; TVM_DECLARE_ATTRS(MaxPool2DAttrs, "relay.attrs.MaxPool2DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); } }; /*! \brief Attributes for avg pool operator */ struct AvgPool2DAttrs : public tvm::AttrsNode<AvgPool2DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; tvm::String layout; tvm::String out_layout; bool ceil_mode; bool count_include_pad; TVM_DECLARE_ATTRS(AvgPool2DAttrs, "relay.attrs.AvgPool2DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); TVM_ATTR_FIELD(count_include_pad) .set_default(false) .describe("When true, will include padding to compute the average"); } }; /*! \brief Attributes for global pool operator */ struct GlobalPool2DAttrs : public tvm::AttrsNode<GlobalPool2DAttrs> { tvm::String layout; tvm::String out_layout; TVM_DECLARE_ATTRS(GlobalPool2DAttrs, "relay.attrs.GlobalPool2DAttrs") { TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); } }; /*! \brief Attributes for 1d adaptive pool operator */ struct AdaptivePool1DAttrs : public tvm::AttrsNode<AdaptivePool1DAttrs> { Array<IndexExpr> output_size; std::string layout; tvm::String out_layout; TVM_DECLARE_ATTRS(AdaptivePool1DAttrs, "relay.attrs.AdaptivePool1DAttrs") { TVM_ATTR_FIELD(output_size).set_default(Array<IndexExpr>({})).describe("Output width."); TVM_ATTR_FIELD(layout).set_default("NCW").describe( "Dimension ordering of input data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the" "'W' dimension."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the" "'W' dimension."); } }; /*! \brief Attributes for 2d adaptive pool operator */ struct AdaptivePool2DAttrs : public tvm::AttrsNode<AdaptivePool2DAttrs> { Array<IndexExpr> output_size; std::string layout; tvm::String out_layout; TVM_DECLARE_ATTRS(AdaptivePool2DAttrs, "relay.attrs.AdaptivePool2DAttrs") { TVM_ATTR_FIELD(output_size) .set_default(Array<IndexExpr>({})) .describe("Output height and width."); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Pooling is applied on the 'H' and" "'W' dimensions."); } }; /*! \brief Attributes for 3d adaptive pool operator */ struct AdaptivePool3DAttrs : public tvm::AttrsNode<AdaptivePool3DAttrs> { Array<IndexExpr> output_size; std::string layout; tvm::String out_layout; TVM_DECLARE_ATTRS(AdaptivePool3DAttrs, "relay.attrs.AdaptivePool3DAttrs") { TVM_ATTR_FIELD(output_size) .set_default(Array<IndexExpr>({})) .describe("Output depth, height and width."); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on 'D', 'H' and" "'W' dimensions."); } }; /*! \brief Attributes for 1D max pool operator */ struct MaxPool1DAttrs : public tvm::AttrsNode<MaxPool1DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> dilation; Array<IndexExpr> padding; std::string layout; tvm::String out_layout; bool ceil_mode; TVM_DECLARE_ATTRS(MaxPool1DAttrs, "relay.attrs.MaxPool1DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding supports both symmetric and asymmetric as" "one int : same padding used on each side" "two int : indicates left padding, right padding"); TVM_ATTR_FIELD(layout).set_default("NCW").describe( "Dimension ordering of input data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the 'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCW', 'NWC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the 'W' dimensions."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); } }; /*! \brief Attributes for 1D avg pool operator */ struct AvgPool1DAttrs : public tvm::AttrsNode<AvgPool1DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> dilation; Array<IndexExpr> padding; std::string layout; tvm::String out_layout; bool ceil_mode; bool count_include_pad; TVM_DECLARE_ATTRS(AvgPool1DAttrs, "relay.attrs.AvgPool1DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding supports both symmetric and asymmetric as" "one int : same padding used on each side" "two int : indicates left padding, right padding"); TVM_ATTR_FIELD(layout).set_default("NCW").describe( "Dimension ordering of input data. Can be 'NCW', 'NHC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the 'W' dimension."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCW', 'NHC', etc." "'N', 'C', 'W' stands for batch, channel, and width" "dimensions respectively. Pooling is applied on the 'W' dimension."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); TVM_ATTR_FIELD(count_include_pad) .set_default(false) .describe("When true, will include padding to compute the average"); } }; /*! \brief Attributes for 3D max pool operator */ struct MaxPool3DAttrs : public tvm::AttrsNode<MaxPool3DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> dilation; Array<IndexExpr> padding; std::string layout; tvm::String out_layout; bool ceil_mode; TVM_DECLARE_ATTRS(MaxPool3DAttrs, "relay.attrs.MaxPool3DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : back, bottom, right will use same padding as front, top, left" "six int : padding width in the order of (front, top, left, back, bottom, right)"); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); } }; /*! \brief Attributes for 3D avg pool operator */ struct AvgPool3DAttrs : public tvm::AttrsNode<AvgPool3DAttrs> { Array<IndexExpr> pool_size; Array<IndexExpr> strides; Array<IndexExpr> dilation; Array<IndexExpr> padding; std::string layout; tvm::String out_layout; bool ceil_mode; bool count_include_pad; TVM_DECLARE_ATTRS(AvgPool3DAttrs, "relay.attrs.AvgPool3DAttrs") { TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows."); TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1, 1})) .describe("Specifies the dilation of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "three int : back, bottom, right will use same padding as front, top, left" "six int : padding width in the order of (front, top, left, back, bottom, right)"); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Pooling is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(ceil_mode).set_default(false).describe( "When true, will use ceil instead of floor to compute the output shape."); TVM_ATTR_FIELD(count_include_pad) .set_default(false) .describe("When true, will include padding to compute the average"); } }; /*! \brief Attributes for matmul operator */ struct MatmulAttrs : public tvm::AttrsNode<MatmulAttrs> { IndexExpr units; DataType out_dtype; bool transpose_a; bool transpose_b; // layout of B after auto-scheduler's layout rewrite tvm::String auto_scheduler_rewritten_layout; Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights TVM_DECLARE_ATTRS(MatmulAttrs, "relay.attrs.MatmulAttrs") { TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); TVM_ATTR_FIELD(transpose_a) .set_default(false) .describe("Whether the first input tensor is in transposed format."); TVM_ATTR_FIELD(transpose_b) .set_default(false) .describe("Whether the second input tensor is in transposed format."); } }; /*! \brief Attributes for dense operator */ struct DenseAttrs : public tvm::AttrsNode<DenseAttrs> { IndexExpr units; // layout of B after auto-scheduler's layout rewrite tvm::String auto_scheduler_rewritten_layout; Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights DataType out_dtype; TVM_DECLARE_ATTRS(DenseAttrs, "relay.attrs.DenseAttrs") { TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes for dense_pack operator */ struct DensePackAttrs : public tvm::AttrsNode<DensePackAttrs> { IndexExpr units; DataType out_dtype; tvm::String weight_layout; TVM_DECLARE_ATTRS(DensePackAttrs, "relay.attrs.DensePackAttrs") { TVM_ATTR_FIELD(units).describe("Number of hidden units of the dense transformation."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); TVM_ATTR_FIELD(weight_layout) .set_default("NC") .describe("Dimension ordering of weight. Packed layouts, such as NC8n, are possible."); } }; /*! \brief Attributes for batch matmul operator. */ struct BatchMatmulAttrs : public tvm::AttrsNode<BatchMatmulAttrs> { DataType out_dtype; bool transpose_a; bool transpose_b; tvm::String auto_scheduler_rewritten_layout; // The layout after auto-scheduler's layout rewrite Array<PrimExpr> meta_schedule_original_shape; // The original shape of the weights TVM_DECLARE_ATTRS(BatchMatmulAttrs, "relay.attrs.BatchMatmulAttrs") { // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); TVM_ATTR_FIELD(transpose_a) .set_default(false) .describe("Whether the first input tensor is in transposed format."); TVM_ATTR_FIELD(transpose_b) .set_default(false) .describe("Whether the second input tensor is in transposed format."); } }; /*! \brief Attributes for sparse_dense operator */ struct SparseDenseAttrs : public tvm::AttrsNode<SparseDenseAttrs> { bool sparse_lhs; TVM_DECLARE_ATTRS(SparseDenseAttrs, "relay.attrs.SparseDenseAttrs") { TVM_ATTR_FIELD(sparse_lhs) .set_default(false) .describe( "Indicate whether sparse matrix is multiplied on the right or the left. If true, then " "the operation is S * D^T (D dense, S sparse). If false, the operation is D * S^T"); } }; /*! \brief Attributes for sparse_transpose operator */ struct SparseTransposeAttrs : public tvm::AttrsNode<SparseTransposeAttrs> { TVM_DECLARE_ATTRS(SparseTransposeAttrs, "relay.attrs.SparseTransposeAttrs") {} }; /*! \brief Attributes for sparse_dense operator */ struct SparseConv2DAttrs : public tvm::AttrsNode<SparseConv2DAttrs> { std::string layout; Array<IndexExpr> kernel_size; TVM_DECLARE_ATTRS(SparseConv2DAttrs, "relay.attrs.SparseConv2DAttrs") { TVM_ATTR_FIELD(layout).set_default("NHWC").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC'" "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(kernel_size) .set_default(Array<IndexExpr>{1, 1}) .describe("Kernel size for SparseConv2D, 1x1 or 3x3. "); } }; /*! \brief Attributes for FIFO buffer operator */ struct FIFOBufferAttrs : public tvm::AttrsNode<FIFOBufferAttrs> { int axis; TVM_DECLARE_ATTRS(FIFOBufferAttrs, "relay.attrs.FIFOBufferAttrs") { TVM_ATTR_FIELD(axis).set_default(0); } }; /*! \brief Attributes for upsampling operator */ struct UpSamplingAttrs : public tvm::AttrsNode<UpSamplingAttrs> { double scale_h; double scale_w; tvm::String layout; tvm::String method; bool align_corners; TVM_DECLARE_ATTRS(UpSamplingAttrs, "relay.attrs.UpSamplingAttrs") { TVM_ATTR_FIELD(scale_h).describe("The upsampling factor for height"); TVM_ATTR_FIELD(scale_w).describe("The upsampling factor for width"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Upsampling is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(method) .set_default("nearest_neighbor") .describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "bilinear - Bilinear Interpolation" "bicubic - Bicubic Interpolation"); TVM_ATTR_FIELD(align_corners) .set_default(false) .describe("Should be true to preserve the values at the corner pixels"); } }; /*! \brief Attributes for upsampling3d operator */ struct UpSampling3DAttrs : public tvm::AttrsNode<UpSampling3DAttrs> { double scale_d; double scale_h; double scale_w; std::string layout; std::string method; std::string coordinate_transformation_mode; TVM_DECLARE_ATTRS(UpSampling3DAttrs, "relay.attrs.UpSampling3DAttrs") { TVM_ATTR_FIELD(scale_d).describe("The upsampling factor for depth"); TVM_ATTR_FIELD(scale_h).describe("The upsampling factor for height"); TVM_ATTR_FIELD(scale_w).describe("The upsampling factor for width"); TVM_ATTR_FIELD(layout).set_default("NCDHW").describe( "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc." "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width" "dimensions respectively. Upsampling is applied on the 'D', 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(method) .set_default("nearest_neighbor") .describe( "Specify the mode to use for scaling." "nearest_neighbor - Nearest Neighbor" "trilinear - Trilinear Interpolation"); TVM_ATTR_FIELD(coordinate_transformation_mode) .set_default("half_pixel") .describe( "Describes how to transform the coordinate in the resized tensor" "to the coordinate in the original tensor." "Refer to the ONNX Resize operator specification for details" "Available options are half_pixel, align_corners and asymmetric"); } }; /*! \brief Attributes used for the padding operator */ struct PadAttrs : public tvm::AttrsNode<PadAttrs> { Array<Array<Integer>> pad_width; tvm::String pad_mode; TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs") { TVM_ATTR_FIELD(pad_width).describe( "Number of values padded to the edges of each axis, " "in the format of ((before_1, after_1), ..., (before_N, after_N))"); TVM_ATTR_FIELD(pad_mode) .set_default("constant") .describe( "Padding type to use. \"constant\" pads with constant_value, " "\"edge\" pads using the edge values of the input array, " "\"reflect\" pads by reflecting values with respect to the edges."); } }; /*! \brief Attributes used for the MirrorPadding operator */ struct MirrorPadAttrs : public tvm::AttrsNode<MirrorPadAttrs> { std::string mode; Array<Array<IndexExpr>> pad_width; TVM_DECLARE_ATTRS(MirrorPadAttrs, "relay.attrs.MirrorPadAttrs") { TVM_ATTR_FIELD(mode) .set_default("SYMMETRIC") .describe("Specifies how mirroring should be performed."); TVM_ATTR_FIELD(pad_width).describe( "Number of values padded to the edges of each axis, " "in the format of ((before_1, after_1), ..., (before_N, after_N))"); } }; /*! \brief Attributes for leaky relu operator */ struct LeakyReluAttrs : public tvm::AttrsNode<LeakyReluAttrs> { double alpha; TVM_DECLARE_ATTRS(LeakyReluAttrs, "relay.attrs.LeakyReluAttrs") { TVM_ATTR_FIELD(alpha).set_lower_bound(0.0).set_default(0.25).describe( "Slope coefficient for the negative half axis."); } }; /*! \brief Attributes for prelu operator */ struct PReluAttrs : public tvm::AttrsNode<PReluAttrs> { int axis; TVM_DECLARE_ATTRS(PReluAttrs, "relay.attrs.PReluAttrs") { TVM_ATTR_FIELD(axis).set_default(1).describe( "Specify which shape axis the channel is specified."); } }; /*! \brief Attributes used in dropout operator */ struct DropoutAttrs : public tvm::AttrsNode<DropoutAttrs> { double rate; TVM_DECLARE_ATTRS(DropoutAttrs, "relay.attrs.DropoutAttrs") { TVM_ATTR_FIELD(rate) .describe("Fraction of the input that gets dropped out during training time") .set_default(0.5); } }; // struct DropoutAttrs /*! \brief Attributes used in batch_norm operator */ struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> { int axis; double epsilon; bool center; bool scale; TVM_DECLARE_ATTRS(BatchNormAttrs, "relay.attrs.BatchNormAttrs") { TVM_ATTR_FIELD(axis).describe("Specify which shape axis denotes the channel.").set_default(1); TVM_ATTR_FIELD(epsilon) .describe("Small float added to variance to avoid dividing by zero") .set_default(1e-5); TVM_ATTR_FIELD(center) .describe("If True, add offset of beta to normalized tensor. If False, beta is ignored") .set_default(true); TVM_ATTR_FIELD(scale) .describe( "If True, multiply by gamma. If False, gamma is not used. " "When the next layer is piecewise linear (also, e.g., nn.relu), " "this can be disabled since the scaling will be done by the next layer.") .set_default(true); } }; // struct BatchNormAttrs /*! \brief Attributes used in instance_norm operator */ struct InstanceNormAttrs : public tvm::AttrsNode<InstanceNormAttrs> { int axis; double epsilon; bool center; bool scale; TVM_DECLARE_ATTRS(InstanceNormAttrs, "relay.attrs.InstanceNormAttrs") { TVM_ATTR_FIELD(axis).describe("Specify which shape axis denotes the channel.").set_default(1); TVM_ATTR_FIELD(epsilon) .describe("Small float added to variance to avoid dividing by zero") .set_default(1e-5); TVM_ATTR_FIELD(center).set_default(true).describe( "If true, add offset of beta to normalized tensor; " "otherwise, beta is ignored."); TVM_ATTR_FIELD(scale).set_default(true).describe( "If true, multiply by gamma; otherwise, gamma is ignored."); } }; // struct InstanceNormAttrs /*! \brief Attributes used in layer_norm operator */ struct LayerNormAttrs : public tvm::AttrsNode<LayerNormAttrs> { int axis; double epsilon; bool center; bool scale; TVM_DECLARE_ATTRS(LayerNormAttrs, "relay.attrs.LayerNormAttrs") { TVM_ATTR_FIELD(axis).set_default(-1).describe("Specify which shape axis denotes the channel."); TVM_ATTR_FIELD(epsilon).set_default(1e-5).describe( "Small float added to variance to avoid dividing by zero"); TVM_ATTR_FIELD(center).set_default(true).describe( "If true, add offset of beta to normalized tensor; " "otherwise, beta is ignored."); TVM_ATTR_FIELD(scale).set_default(true).describe( "If true, multiply by gamma; otherwise, gamma is ignored."); } }; // struct LayerNormAttrs /*! \brief Attributes used in group_norm operator */ struct GroupNormAttrs : public tvm::AttrsNode<GroupNormAttrs> { int num_groups; int axis; double epsilon; bool center; bool scale; TVM_DECLARE_ATTRS(GroupNormAttrs, "relay.attrs.GroupNormAttrs") { TVM_ATTR_FIELD(num_groups) .set_default(0) .describe("Specify number of groups to separate the channels into."); TVM_ATTR_FIELD(axis).set_default(1).describe("Specify which shape axis denotes the channel."); TVM_ATTR_FIELD(epsilon).set_default(1e-5).describe( "Small float added to variance to avoid dividing by zero"); TVM_ATTR_FIELD(center).set_default(true).describe( "If true, add offset of beta to normalized tensor; " "otherwise, beta is ignored."); TVM_ATTR_FIELD(scale).set_default(true).describe( "If true, multiply by gamma; otherwise, gamma is ignored."); } }; // struct GroupNormAttrs /*! \brief Attributes for LRN operator */ struct LRNAttrs : public tvm::AttrsNode<LRNAttrs> { int size; int axis; double bias; double alpha; double beta; TVM_DECLARE_ATTRS(LRNAttrs, "relay.attrs.LRNAttrs") { TVM_ATTR_FIELD(size).set_default(5).describe( "The size of the local region to be considered for normalization."); TVM_ATTR_FIELD(axis).set_default(1).describe("Axis of input data layout channel."); TVM_ATTR_FIELD(bias).set_default(2).describe("The offset parameter to avoid division by 0."); TVM_ATTR_FIELD(alpha).set_default(0.0001).describe("The scaling parameter."); TVM_ATTR_FIELD(beta).set_default(0.75).describe("The exponent parameter."); } }; /*! \brief Attributes for L2Normalize operator */ struct L2NormalizeAttrs : public tvm::AttrsNode<L2NormalizeAttrs> { double eps; Array<Integer> axis; TVM_DECLARE_ATTRS(L2NormalizeAttrs, "relay.attrs.L2NormalizeAttrs") { TVM_ATTR_FIELD(eps).describe("A lower bound value for the norm, to avoid division by 0."); TVM_ATTR_FIELD(axis).describe("Axis over the normalization applied."); } }; /*! \brief Attributes for DeformableConv2D operator */ struct DeformableConv2DAttrs : public tvm::AttrsNode<DeformableConv2DAttrs> { Array<IndexExpr> strides; Array<IndexExpr> padding; Array<IndexExpr> dilation; int deformable_groups; int groups; IndexExpr channels; Array<IndexExpr> kernel_size; std::string data_layout; std::string kernel_layout; std::string out_layout; DataType out_dtype; TVM_DECLARE_ATTRS(DeformableConv2DAttrs, "relay.attrs.DeformableConv2DAttrs") { TVM_ATTR_FIELD(strides) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the strides of the convolution."); TVM_ATTR_FIELD(padding) .set_default(Array<IndexExpr>({0, 0})) .describe( "If padding is non-zero, then the input is implicitly zero-padded" "Padding support both symmetric and asymmetric as" "one int : same padding used on all sides" "two int : bottom, right will use same padding as top, left" "four int : padding width in the order of (top, left, bottom, right)"); TVM_ATTR_FIELD(dilation) .set_default(Array<IndexExpr>({1, 1})) .describe("Specifies the dilation rate to use for dilated convolution."); TVM_ATTR_FIELD(deformable_groups) .set_default(1) .describe( "Controls the connections between inputs and offsets." "Input channels are partitioned into multiple deformable groups. Offsets" "are shared across input channels in the same deformable group."); TVM_ATTR_FIELD(groups).set_default(1).describe( "Controls the connections between inputs and outputs." "At groups=1, all inputs are convolved to all outputs." "At groups=2, the operation becomes equivalent to having two convolution" "layers side by side, each seeing half the input channels, and producing" "half the output channels, and both subsequently concatenated."); TVM_ATTR_FIELD(channels) .describe( "The number of output channels in the convolution." " If it is not set, inferred by shape of the weight.") .set_default(NullValue<IndexExpr>()); TVM_ATTR_FIELD(kernel_size) .describe("Specifies the dimensions of the convolution window.") .set_default(NullValue<Array<IndexExpr>>()); TVM_ATTR_FIELD(data_layout) .set_default("NCHW") .describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(kernel_layout) .set_default("OIHW") .describe( "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc." "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(out_layout) .set_default("") .describe( "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Default to be same as input layout."); // use 0 bits to indicate none. TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attributes used in subpixel operators */ struct SubPixelAttrs : public tvm::AttrsNode<SubPixelAttrs> { int block_size; std::string layout; std::string mode; TVM_DECLARE_ATTRS(SubPixelAttrs, "relay.attrs.SubPixelAttrs") { TVM_ATTR_FIELD(block_size) .describe("The size of subpixel blocks to compose or decompose.") .set_default(1); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively."); TVM_ATTR_FIELD(mode).set_default("DCR").describe( "Indicates order in which channels are accessed. Must be one of" "DCR or CDR."); } }; // struct SubPixelAttrs /*! \brief Attributes used in correlation operators */ struct CorrelationAttrs : public tvm::AttrsNode<CorrelationAttrs> { int kernel_size; int max_displacement; int stride1; int stride2; Array<IndexExpr> padding; bool is_multiply; String layout; TVM_DECLARE_ATTRS(CorrelationAttrs, "relay.attrs.CorrelationAttrs") { TVM_ATTR_FIELD(kernel_size) .describe("Kernel size for correlation, must be an odd number.") .set_default(1); TVM_ATTR_FIELD(max_displacement).describe("Max displacement of Correlation.").set_default(1); TVM_ATTR_FIELD(stride1).describe("Stride for data1.").set_default(1); TVM_ATTR_FIELD(stride2).describe("Stride for data2.").set_default(1); TVM_ATTR_FIELD(padding) .describe("Padding for data1 and data2.") .set_default(Array<IndexExpr>{0, 0}); TVM_ATTR_FIELD(is_multiply) .describe("Operation type is either multiplication or substraction.") .set_default(true); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively."); } }; // struct CorrelationAttrs /*! \brief Attributes used in SpaceToBatchND operator */ struct SpaceToBatchNDAttrs : public tvm::AttrsNode<SpaceToBatchNDAttrs> { Array<Integer> block_shape; Array<Array<IndexExpr>> paddings; double pad_value; TVM_DECLARE_ATTRS(SpaceToBatchNDAttrs, "relay.attrs.SpaceToBatchNDAttrs") { TVM_ATTR_FIELD(block_shape) .set_default(Array<Integer>({1, 1})) .describe("1-D containing block size for each spatial dimension."); TVM_ATTR_FIELD(paddings).describe("2-D containing paddings for each spatial dimension."); TVM_ATTR_FIELD(pad_value).set_default(0.0).describe("The value used for padding."); } }; // struct SpaceToBatchNDAttrs /*! \brief Attributes used in BatchToSpaceND operator */ struct BatchToSpaceNDAttrs : public tvm::AttrsNode<BatchToSpaceNDAttrs> { Array<Integer> block_shape; Array<Array<IndexExpr>> crops; TVM_DECLARE_ATTRS(BatchToSpaceNDAttrs, "relay.attrs.BatchToSpaceNDAttrs") { TVM_ATTR_FIELD(block_shape) .set_default(Array<Integer>({1, 1})) .describe("1-D containing block size for each spatial dimension."); TVM_ATTR_FIELD(crops).describe("2-D containing amount to crop from spatial dimension."); } }; // struct BatchToSpaceNDAttrs /*! \brief Attributes used in NLLLoss operator */ struct NLLLossAttrs : public tvm::AttrsNode<NLLLossAttrs> { std::string reduction; int ignore_index; TVM_DECLARE_ATTRS(NLLLossAttrs, "relay.attrs.NLLLossAttrs") { TVM_ATTR_FIELD(reduction).set_default("mean").describe( "The reduction method to apply to the output. Can be" "'none', 'mean' or 'sum'."); TVM_ATTR_FIELD(ignore_index).describe("The target value to ignore."); } }; // struct NLLLossAttrs } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_NN_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/on_device.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/on_device.h * \brief Attribute for the "on_device" annotation (ie operator). */ #ifndef TVM_RELAY_ATTRS_ON_DEVICE_H_ #define TVM_RELAY_ATTRS_ON_DEVICE_H_ #include <tvm/ir/attrs.h> #include <tvm/target/virtual_device.h> #include <string> namespace tvm { namespace relay { /*! * \brief Attributes for the "on_device" annotation (ie operator). * * The Relay call: * \code * on_device(sub_expr, virtual_device=S) * \endcode * constrains \p sub_expr to execute and store its result on the \p VirtualDevice \p S. * However the annotation itself may appear in an expression to be executed and stored on a * different \p VirtualDevice. If so the compiler will automatically insert a "device_copy" call to * mediate the transition between \p VirtualDevices. * * E.g.: Assuming %x and %y reside on the GPU and %z on the CPU then: * \code * multiply(on_device(add(%x, %y), virtual_device=GPU), %z) * \endcode * indicates the \p add should execute on the GPU but the \p multiply should execute on the CPU. * The compiler will rewrite this to: * \code * multiply(device_copy(add(%x, %y), src_virtual_device=GPU, dst_virtual_device=CPU), %z) * \endcode * * The \p constraint_body (default true) and \p constraint_result (default false) fields can be * used by passes for finer-grained control over how the \p VirtualDevice constraint should be * applied. */ struct OnDeviceAttrs : public tvm::AttrsNode<OnDeviceAttrs> { /*! * \brief The \p VirtualDevice to constraint to apply to the body, result, or both body and result * of the "on_device" call. */ VirtualDevice virtual_device = VirtualDevice::FullyUnconstrained(); /*! * \brief If false (the default), the result of the "on_device" call is not constrained to be * \p virtual_device. */ bool constrain_result = false; /*! * \brief If true (the default), the body of the "on_device" call is constrained to be \p * virtual_device. */ bool constrain_body = true; /*! * \brief Returns true if both the body and result are constrained. */ bool is_fixed() const { return constrain_result && constrain_body; } /*! * \brief Returns true only the body is constrained (the 'normal' case). */ bool is_normal() const { return !constrain_result && constrain_body; } TVM_DECLARE_ATTRS(OnDeviceAttrs, "relay.attrs.OnDeviceAttrs") { TVM_ATTR_FIELD(virtual_device) .describe("The (virtual) device to constrain to.") .set_default(VirtualDevice::FullyUnconstrained()); TVM_ATTR_FIELD(constrain_result) .describe("Whether the constraint applies to the overall expression") .set_default(false); TVM_ATTR_FIELD(constrain_body) .describe("Whether the constraint applies to the body sub-expression.") .set_default(true); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_ON_DEVICE_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/random.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/vision.h * \brief Auxiliary attributes for random operators. */ #ifndef TVM_RELAY_ATTRS_RANDOM_H_ #define TVM_RELAY_ATTRS_RANDOM_H_ #include <tvm/ir/attrs.h> namespace tvm { namespace relay { struct ThreefryGenerateAttrs : public tvm::AttrsNode<ThreefryGenerateAttrs> { Array<Integer> out_shape; TVM_DECLARE_ATTRS(ThreefryGenerateAttrs, "relay.attrs.ThreefryGenerateAttrs") { TVM_ATTR_FIELD(out_shape).describe("Shape of random numbers to generate"); } }; struct UniformAttrs : public tvm::AttrsNode<UniformAttrs> { Array<Integer> out_shape; DataType out_dtype; TVM_DECLARE_ATTRS(UniformAttrs, "relay.attrs.UniformAttrs") { TVM_ATTR_FIELD(out_shape).describe("Shape of random numbers to generate"); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Data type of the generated numbers"); } }; struct NormalAttrs : public tvm::AttrsNode<NormalAttrs> { Array<Integer> out_shape; DataType out_dtype; TVM_DECLARE_ATTRS(NormalAttrs, "relay.attrs.NormalAttrs") { TVM_ATTR_FIELD(out_shape).describe("Shape of random numbers to generate"); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Data type of the generated numbers"); } }; struct MultinomialAttrs : public tvm::AttrsNode<MultinomialAttrs> { Integer num_samples; TVM_DECLARE_ATTRS(MultinomialAttrs, "relay.attrs.MultinomialAttrs") { TVM_ATTR_FIELD(num_samples) .set_default(1) .describe("Number of samples to draw from the distribution."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_RANDOM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/reduce.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/reduce.h * \brief Auxiliary attributes for reduce operators. */ #ifndef TVM_RELAY_ATTRS_REDUCE_H_ #define TVM_RELAY_ATTRS_REDUCE_H_ #include <tvm/ir/attrs.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes for Reduce operators */ struct ReduceAttrs : public tvm::AttrsNode<ReduceAttrs> { Array<Integer> axis; bool keepdims; bool exclude; TVM_DECLARE_ATTRS(ReduceAttrs, "relay.attrs.ReduceAttrs") { TVM_ATTR_FIELD(axis) .set_default(NullValue<Array<Integer>>()) .describe(R"code(The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead.)code"); TVM_ATTR_FIELD(keepdims).set_default(false).describe( "If this is set to `True`, the reduced axes are left " "in the result as dimension with size one."); TVM_ATTR_FIELD(exclude).set_default(false).describe( "Whether to perform reduction on axis that are NOT in axis instead."); } }; /*! \brief Attributes for Reduce operators which reduce by finding a single element. E.g. argmin */ struct ArgReduceAttrs : public tvm::AttrsNode<ArgReduceAttrs> { Array<Integer> axis; bool keepdims; bool select_last_index; bool exclude; TVM_DECLARE_ATTRS(ArgReduceAttrs, "relay.attrs.ArgReduceAttrs") { TVM_ATTR_FIELD(axis) .set_default(NullValue<Array<Integer>>()) .describe(R"code(The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead.)code"); TVM_ATTR_FIELD(keepdims).set_default(false).describe( "If this is set to `True`, the reduced axes are left " "in the result as dimension with size one."); TVM_ATTR_FIELD(select_last_index) .set_default(false) .describe( "Whether to select the last index if the target element appears multiple times, else " "select the first index which the target element appears"); TVM_ATTR_FIELD(exclude).set_default(false).describe( "Whether to perform reduction on axis that are NOT in axis instead."); } }; struct VarianceAttrs : public tvm::AttrsNode<VarianceAttrs> { Array<Integer> axis; bool keepdims; bool exclude; bool unbiased; TVM_DECLARE_ATTRS(VarianceAttrs, "relay.attrs.VarianceAttrs") { TVM_ATTR_FIELD(axis) .set_default(NullValue<Array<Integer>>()) .describe(R"code(The axis or axes along which to perform the reduction. The default, `axis=()`, will compute over all elements into a scalar array with shape `(1,)`. If `axis` is int, a reduction is performed on a particular axis. If `axis` is a tuple of ints, a reduction is performed on all the axes specified in the tuple. If `exclude` is true, reduction will be performed on the axes that are NOT in axis instead.)code"); TVM_ATTR_FIELD(keepdims).set_default(false).describe( "If this is set to `True`, the reduced axes are left " "in the result as dimension with size one."); TVM_ATTR_FIELD(exclude).set_default(false).describe( "Whether to perform reduction on axis that are NOT in axis instead."); TVM_ATTR_FIELD(unbiased).set_default(false).describe("Whether to use the unbiased estimation."); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_REDUCE_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/transform.h * \brief Transform operators. */ #ifndef TVM_RELAY_ATTRS_TRANSFORM_H_ #define TVM_RELAY_ATTRS_TRANSFORM_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <tvm/relay/expr.h> #include <tvm/tir/index_map.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes used for the sliding_window operator */ struct SlidingWindowAttrs : public tvm::AttrsNode<SlidingWindowAttrs> { int axis; Array<Integer> window_shape; Array<Integer> strides; TVM_DECLARE_ATTRS(SlidingWindowAttrs, "relay.attrs.SlidingWindowAttrs") { TVM_ATTR_FIELD(axis).describe( "What axis the sliding window begin forming over." "Window will be slid over this axis and all following axes." "The axis value determines the window shape (and thus, the" "number of strides):" "window shape and strides must both be of length" "`data.ndim-axis`."); TVM_ATTR_FIELD(window_shape) .describe( "The window shape to form over the input." "Window shape must be of length `data.ndim-axis`."); TVM_ATTR_FIELD(strides).describe( "How to stride the window along each dimension." "Strides must be of length `data.ndim-axis`."); } }; // struct SlidingWindowAttrs /*! \brief data type cast */ struct CastAttrs : public tvm::AttrsNode<CastAttrs> { DataType dtype; TVM_DECLARE_ATTRS(CastAttrs, "relay.attrs.CastAttrs") { TVM_ATTR_FIELD(dtype).describe("Target data type"); } }; // struct CastAttrs. /*! \brief Attributes used in expand_dims operators */ struct ExpandDimsAttrs : public tvm::AttrsNode<ExpandDimsAttrs> { int axis; int num_newaxis; TVM_DECLARE_ATTRS(ExpandDimsAttrs, "relay.attrs.ExpandDimsAttrs") { TVM_ATTR_FIELD(axis).describe( "The axis at which the input array is expanded." "Should lie in range `[-data.ndim - 1, data.ndim]`." "If `axis < 0`, it is the first axis inserted;" "If `axis >= 0`, it is the last axis inserted in Python's negative indexing."); TVM_ATTR_FIELD(num_newaxis) .describe("Number of axes to be inserted. Should be >= 0.") .set_lower_bound(0) .set_default(1); } }; // struct ExpandDimsAttrs /*! \brief Attributes used in dynamic expand_dims operators */ struct DynExpandDimsAttrs : public tvm::AttrsNode<DynExpandDimsAttrs> { int num_newaxis; TVM_DECLARE_ATTRS(DynExpandDimsAttrs, "relay.attrs.DynExpandDimsAttrs") { TVM_ATTR_FIELD(num_newaxis) .describe("Number of axes to be inserted. Should be >= 0.") .set_lower_bound(0) .set_default(1); } }; // struct ExpandDimsAttrs /*! \brief Attributes used in concatenate operators */ struct ConcatenateAttrs : public tvm::AttrsNode<ConcatenateAttrs> { int axis; TVM_DECLARE_ATTRS(ConcatenateAttrs, "relay.attrs.ConcatenateAttrs") { TVM_ATTR_FIELD(axis) .describe( "The axis at which the input arrays are concatenated." "Should lie in range `[-ndim, ndim)`.") .set_default(0); } }; // struct ConcatenateAttrs /*! \brief Attributes used in transpose operators */ struct TransposeAttrs : public tvm::AttrsNode<TransposeAttrs> { Array<Integer> axes; TVM_DECLARE_ATTRS(TransposeAttrs, "relay.attrs.TransposeAttrs") { TVM_ATTR_FIELD(axes).describe("The target axes order, reverse order if not specified."); } }; // struct TransposeAttrs /*! \brief Attributes used in reshape operators */ struct ReshapeAttrs : public tvm::AttrsNode<ReshapeAttrs> { Array<Integer> newshape; bool allowzero; TVM_DECLARE_ATTRS(ReshapeAttrs, "relay.attrs.ReshapeAttrs") { TVM_ATTR_FIELD(newshape).describe( "The new shape. Should be compatible with the original shape."); TVM_ATTR_FIELD(allowzero).set_default(0).describe( "Whether to honor the value of zero in newshape."); } }; // struct ReshapeAttrs /*! \brief Attributes used in MXNet-style reshape_like operators */ struct ReshapeLikeAttrs : public tvm::AttrsNode<ReshapeLikeAttrs> { int lhs_begin; Integer lhs_end; // can be None int rhs_begin; Integer rhs_end; // can be None TVM_DECLARE_ATTRS(ReshapeLikeAttrs, "relay.attrs.ReshapeLikeAttrs") { TVM_ATTR_FIELD(lhs_begin).set_default(0).describe( "The axis of the input where reshaping should begin."); TVM_ATTR_FIELD(lhs_end) .set_default(NullValue<Integer>()) .describe("The axis of the input where reshaping should end, exclusive."); TVM_ATTR_FIELD(rhs_begin).set_default(0).describe( "The axis of the shape_like tensor to begin taking dimensions from."); TVM_ATTR_FIELD(rhs_end) .set_default(NullValue<Integer>()) .describe("The axis of the shape_like tensor to end taking dimensions from, exclusive."); } }; // struct ReshapeLikeAttrs struct ScatterAttrs : public tvm::AttrsNode<ScatterAttrs> { Integer axis; TVM_DECLARE_ATTRS(ScatterAttrs, "relay.attrs.ScatterAttrs") { TVM_ATTR_FIELD(axis).set_default(0).describe("The axis over which to select values."); } }; struct ScatterAddAttrs : public tvm::AttrsNode<ScatterAddAttrs> { Integer axis; TVM_DECLARE_ATTRS(ScatterAddAttrs, "relay.attrs.ScatterAddAttrs") { TVM_ATTR_FIELD(axis).set_default(0).describe("The axis over which to select values."); } }; struct ScatterNDAttrs : public tvm::AttrsNode<ScatterNDAttrs> { String mode; TVM_DECLARE_ATTRS(ScatterNDAttrs, "relay.attrs.ScatterNDAttrs") { TVM_ATTR_FIELD(mode).describe( "Accumulation mode of the scatter, either \"update\" or \"add\"."); } }; struct GatherAttrs : public tvm::AttrsNode<GatherAttrs> { Integer axis; TVM_DECLARE_ATTRS(GatherAttrs, "relay.attrs.GatherAttrs") { TVM_ATTR_FIELD(axis) .set_default(NullValue<Integer>()) .describe("The axis over which to select values."); } }; struct GatherNDAttrs : public tvm::AttrsNode<GatherNDAttrs> { Integer batch_dims; Optional<Integer> index_rank; TVM_DECLARE_ATTRS(GatherNDAttrs, "relay.attrs.GatherNDAttrs") { TVM_ATTR_FIELD(batch_dims).set_default(Integer(0)).describe("The number of batch dimensions."); TVM_ATTR_FIELD(index_rank) .set_default(NullValue<Integer>()) .describe( "The size of an indexing tuple, which is a fixed value. Only needed when the number of " "indexting tuples is dynamic."); } }; struct TakeAttrs : public tvm::AttrsNode<TakeAttrs> { Integer batch_dims; Integer axis; tvm::String mode; TVM_DECLARE_ATTRS(TakeAttrs, "relay.attrs.TakeAttrs") { TVM_ATTR_FIELD(batch_dims) .set_default(0) .describe("The batch_dims over which to select values."); TVM_ATTR_FIELD(axis) .set_default(NullValue<Integer>()) .describe("The axis over which to select values."); TVM_ATTR_FIELD(mode).set_default("clip").describe( "Specify how out-of-bound indices will behave." "clip - clip to the range (default)" "wrap - wrap around the indices" "fast - no clip or wrap around (user must make sure indices are in-bound)"); } }; /*! \brief Attributes that specify a tensor */ struct InitOpAttrs : public tvm::AttrsNode<InitOpAttrs> { Optional<Array<Integer>> shape; DataType dtype; TVM_DECLARE_ATTRS(InitOpAttrs, "relay.attrs.InitOpAttrs") { TVM_ATTR_FIELD(shape).describe("Target shape."); TVM_ATTR_FIELD(dtype).describe("Target data type.").set_default(NullValue<DataType>()); } }; // struct InitOpAttrs /*! \brief Attributes used in arange operators */ struct ArangeAttrs : public tvm::AttrsNode<ArangeAttrs> { Expr start; Expr stop; Expr step; DataType dtype; TVM_DECLARE_ATTRS(ArangeAttrs, "relay.attrs.ArangeAttrs") { TVM_ATTR_FIELD(start).describe("Start of interval. The interval includes this value."); TVM_ATTR_FIELD(stop).describe("Stop of interval. The interval does not include this value."); TVM_ATTR_FIELD(step).describe("Spacing between values."); TVM_ATTR_FIELD(dtype).describe("Target data type."); } }; // struct ArangeAttrs /*! \brief Attributes used in meshgrid operators */ struct MeshgridAttrs : public tvm::AttrsNode<MeshgridAttrs> { std::string indexing; TVM_DECLARE_ATTRS(MeshgridAttrs, "relay.attrs.MeshgridAttrs") { TVM_ATTR_FIELD(indexing) .describe( "Indexing mode, either \"ij\" for matrix or \"xy\" for cartesian in which first two" "dimensions are swapped.") .set_default("ij"); } }; // struct MeshgridAttrs /*! \brief Attributes used in stack operators */ struct StackAttrs : public tvm::AttrsNode<StackAttrs> { Integer axis; TVM_DECLARE_ATTRS(StackAttrs, "relay.attrs.StackAttrs") { TVM_ATTR_FIELD(axis).set_default(0).describe( "The axis in the result array along which the input arrays are stacked."); } }; // struct StackAttrs /*! \brief Attributes used in repeat operators */ struct RepeatAttrs : public tvm::AttrsNode<RepeatAttrs> { Integer repeats; Integer axis; TVM_DECLARE_ATTRS(RepeatAttrs, "relay.attrs.RepeatAttrs") { TVM_ATTR_FIELD(repeats).describe("The number of repetitions for each element."); TVM_ATTR_FIELD(axis) .set_default(NullValue<Integer>()) .describe(" The axis along which to repeat values."); } }; // struct RepeatAttrs /*! \brief Attributes used in tile operators */ struct TileAttrs : public tvm::AttrsNode<TileAttrs> { Array<Integer> reps; TVM_DECLARE_ATTRS(TileAttrs, "relay.attrs.TileAttrs") { TVM_ATTR_FIELD(reps).describe( "The number of times for repeating the tensor a." "Each dim sizeof reps must be a positive integer."); } }; // struct TileAttrs /*! \brief Attributes used in reverse operators */ struct ReverseAttrs : public tvm::AttrsNode<ReverseAttrs> { Integer axis; TVM_DECLARE_ATTRS(ReverseAttrs, "relay.attrs.ReverseAttrs") { TVM_ATTR_FIELD(axis) .set_default(NullValue<Integer>()) .describe("The axis along which to reverse elements."); } }; // struct ReverseAttrs /*! \brief Attributes used in reverse_sequence operators */ struct ReverseSequenceAttrs : public tvm::AttrsNode<ReverseSequenceAttrs> { Integer seq_axis; Integer batch_axis; TVM_DECLARE_ATTRS(ReverseSequenceAttrs, "relay.attrs.ReverseSequenceAttrs") { TVM_ATTR_FIELD(seq_axis).set_default(1).describe( "The seq axis along which to reverse elements."); TVM_ATTR_FIELD(batch_axis) .set_default(0) .describe("The batch axis along which to slice the tensor."); } }; // struct ReverseSequenceAttrs /*! \brief Attributes used in squeeze operators */ struct SqueezeAttrs : public tvm::AttrsNode<SqueezeAttrs> { // use axis to make the name numpy compatible. Array<Integer> axis; TVM_DECLARE_ATTRS(SqueezeAttrs, "relay.attrs.SqueezeAttrs") { TVM_ATTR_FIELD(axis) .describe( "The axis to squeeze in the input tensor." "If `axis = None`, all axis of dimension 1 get squeezed;" "Else, the dimension in axes get squeezed." "It is an error if an axis does not has dimension 1.") .set_default(NullValue<Array<Integer>>()); } }; // struct SqueezeAttrs struct SplitAttrs : public tvm::AttrsNode<SplitAttrs> { ObjectRef indices_or_sections; int axis; TVM_DECLARE_ATTRS(SplitAttrs, "relay.attrs.SplitAttrs") { TVM_ATTR_FIELD(indices_or_sections) .describe( "Indices or sections to split into. Accepts an int or a tuple" "If indices_or_sections is an integer, the input will be divided equally" "along given axis. If such a split is not possible, an error is raised." "If indices_or_sections is a tuple of sorted integers," "the entries indicate where along axis the array is split."); TVM_ATTR_FIELD(axis).set_default(0).describe("the axis to be splitted."); } }; /*! \brief Attributes for StridedSlice operator */ struct StridedSliceAttrs : public tvm::AttrsNode<StridedSliceAttrs> { Optional<Array<Integer>> begin; Optional<Array<Integer>> end; Optional<Array<Integer>> strides; tvm::String slice_mode; Optional<Array<Integer>> axes; TVM_DECLARE_ATTRS(StridedSliceAttrs, "relay.attrs.StridedSliceAttrs") { TVM_ATTR_FIELD(begin).describe("Indices for begin of slice, begin index is also inclusive"); TVM_ATTR_FIELD(end).describe("Indices for end of slice, end index is exclusive"); TVM_ATTR_FIELD(strides).describe( "Stride values of the slice, a stride can be negative, which causes a reverse slice."); TVM_ATTR_FIELD(slice_mode) .set_default("end") .describe( "The slice mode [end, size]." "end - The default slice mode, ending indices for the slice." "size - The input strides will be ignored, input end in this mode indicates the size" "of a slice starting at the location specified by begin. If end[i] is -1," "all remaining elements in that dimension are included in the slice"); TVM_ATTR_FIELD(axes).describe( "Axes along which slicing is applied. When it is specified, the length of begin, end, " "strides, and axes must be equal."); } }; struct SliceLikeAttrs : public tvm::AttrsNode<SliceLikeAttrs> { Array<Integer> axes; TVM_DECLARE_ATTRS(SliceLikeAttrs, "relay.attrs.SliceLikeAttrs") { TVM_ATTR_FIELD(axes).describe( "List of axes on which input data will be sliced according to the " "corresponding size of the second input. By default will slice " "on all axes. Negative axes mean counting in reverse."); } }; /*! \brief Attributes for Clip operator */ struct ClipAttrs : public tvm::AttrsNode<ClipAttrs> { double a_min; double a_max; TVM_DECLARE_ATTRS(ClipAttrs, "relay.attrs.ClipAttrs") { TVM_ATTR_FIELD(a_min).describe("The minimum clip value."); TVM_ATTR_FIELD(a_max).describe("The maximum clip value."); } }; /*! \brief Attributes for FixedPointMultiply operator */ struct FixedPointMultiplyAttrs : public tvm::AttrsNode<FixedPointMultiplyAttrs> { int32_t multiplier; int32_t shift; TVM_DECLARE_ATTRS(FixedPointMultiplyAttrs, "relay.attrs.FixedPointMultiplyAttrs") { TVM_ATTR_FIELD(multiplier) .describe("Multiplier of a fixed floating point number described as multiplier*2^(shift)"); TVM_ATTR_FIELD(shift).describe( "Shift of a fixed floating point number described as multiplier*2^(shift)"); } }; /*! \brief Attributes for per channel/per axes FixedPointMultiply operator */ struct FixedPointMultiplyPerAxisAttrs : public tvm::AttrsNode<FixedPointMultiplyPerAxisAttrs> { bool is_lshift_required; bool is_rshift_required; Array<Integer> axes; TVM_DECLARE_ATTRS(FixedPointMultiplyPerAxisAttrs, "relay.attrs.FixedPointMultiplyPerAxisAttrs") { TVM_ATTR_FIELD(is_lshift_required) .describe("Whether left shift is required in fixed point multiplication.") .set_default(false); TVM_ATTR_FIELD(is_rshift_required) .describe("Whether right shift is required in fixed point multiplication.") .set_default(false); TVM_ATTR_FIELD(axes).describe("List of axes on which input data was quantized."); } }; /*! \brief Attributes for LayoutTransform operator */ struct LayoutTransformAttrs : public tvm::AttrsNode<LayoutTransformAttrs> { std::string src_layout; std::string dst_layout; TVM_DECLARE_ATTRS(LayoutTransformAttrs, "relay.attrs.LayoutTransformAttrs") { TVM_ATTR_FIELD(src_layout).describe("The source layout of the tensor. (e.g. NCHW)"); TVM_ATTR_FIELD(dst_layout).describe("The destination layout of the tensor. (e.g. NCHW16c)"); } }; /*! \brief Attributes for AutoSchedulerLayoutTransform operator */ struct AutoSchedulerLayoutTransformAttrs : public tvm::AttrsNode<AutoSchedulerLayoutTransformAttrs> { std::string src_layout; std::string dst_layout; TVM_DECLARE_ATTRS(AutoSchedulerLayoutTransformAttrs, "relay.attrs.AutoSchedulerLayoutTransformAttrs") { TVM_ATTR_FIELD(src_layout).describe("The source layout of the tensor. (e.g. 1N32C112H112W)"); TVM_ATTR_FIELD(dst_layout) .describe("The destination layout of the tensor. (e.g. 1N2C112H112W16c)"); } }; /*! \brief Attributes for MetaScheduleLayoutTransform operator */ struct MetaScheduleLayoutTransformAttrs : public tvm::AttrsNode<MetaScheduleLayoutTransformAttrs> { tir::IndexMap index_map; TVM_DECLARE_ATTRS(MetaScheduleLayoutTransformAttrs, "relay.attrs.MetaScheduleLayoutTransformAttrs") { TVM_ATTR_FIELD(index_map).describe( "The order of the extents, for example, " "let extents = [2, 3, 4], reorder = [0, 2, 1], and the shape of buffer A is (4, 6)" "then A[i, j] will be first rewritten to " "A[(6 * i + j) / 12, (6 * i + j) / 4 % 3 , (6 * i + j) % 4] according to the `extents`," "and then reordered to A[(6 * i + j) / 12, (6 * i + j) % 4 , (6 * i + j) / 4 % 3]" "according to `reorder`"); } }; /*! \brief Attributes for ShapeOf operator */ struct ShapeOfAttrs : public tvm::AttrsNode<ShapeOfAttrs> { DataType dtype; TVM_DECLARE_ATTRS(ShapeOfAttrs, "relay.attrs.ShapeOfAttrs") { TVM_ATTR_FIELD(dtype).describe("Target data type").set_default(NullValue<DataType>()); } }; struct SequenceMaskAttrs : public tvm::AttrsNode<SequenceMaskAttrs> { double mask_value; int axis; TVM_DECLARE_ATTRS(SequenceMaskAttrs, "relay.attrs.SequenceMaskAttrs") { TVM_ATTR_FIELD(mask_value).set_default(0).describe("The masking value."); TVM_ATTR_FIELD(axis).set_default(0).describe( "The axis of the length dimension. Can only be 0 or 1."); } }; // struct SequenceMaskAttrs. /*! \brief Attributes used in sparse_to_dense operator */ struct SparseToDenseAttrs : public tvm::AttrsNode<SparseToDenseAttrs> { Array<Integer> output_shape; TVM_DECLARE_ATTRS(SparseToDenseAttrs, "relay.attrs.SparseToDenseAttrs") { TVM_ATTR_FIELD(output_shape).describe("Shape of the dense output tensor"); } }; // struct SparseToDenseAttrs /*! \brief Attributes for ndarray_size operator */ struct NdarraySizeAttrs : public tvm::AttrsNode<NdarraySizeAttrs> { DataType dtype; TVM_DECLARE_ATTRS(NdarraySizeAttrs, "relay.attrs.NdarraySizeAttrs") { TVM_ATTR_FIELD(dtype).describe("Target data type").set_default(NullValue<DataType>()); } }; /*! \brief Attributes used in one-hot operator */ struct OneHotAttrs : public tvm::AttrsNode<OneHotAttrs> { int depth; int axis; DataType dtype; TVM_DECLARE_ATTRS(OneHotAttrs, "relay.attrs.OneHotAttrs") { TVM_ATTR_FIELD(depth).set_default(1).describe("Depth of the one hot dimension."); TVM_ATTR_FIELD(axis).set_default(-1).describe("Axis to fill."); TVM_ATTR_FIELD(dtype).set_default(NullValue<DataType>()).describe("Output data type."); } }; // struct OneHotAttrs /*! \brief Attributes used in matrix_set_diag operator */ struct MatrixSetDiagAttrs : public tvm::AttrsNode<MatrixSetDiagAttrs> { int k1; int k2; bool super_diag_right_align; bool sub_diag_right_align; TVM_DECLARE_ATTRS(MatrixSetDiagAttrs, "relay.attrs.MatrixSetDiagAttrs") { TVM_ATTR_FIELD(k1).set_default(0).describe("Lower limit (included) of the range of diagonals."); TVM_ATTR_FIELD(k2).set_default(0).describe("Upper limit (included) of the range of diagonals."); TVM_ATTR_FIELD(super_diag_right_align) .set_default(true) .describe("Bool, true iff super-diagonal is right aligned (left-padded)."); TVM_ATTR_FIELD(sub_diag_right_align) .set_default(false) .describe("Bool, true iff sub-diagonal is right aligned (left-padded)."); } }; // struct MatrixSetDiagAttrs /*! \brief Attributes used in cumsum and cumprod operator */ struct ScanopAttrs : public tvm::AttrsNode<ScanopAttrs> { Integer axis; DataType dtype; Bool exclusive = Bool(false); TVM_DECLARE_ATTRS(ScanopAttrs, "relay.attrs.ScanopAttrs") { TVM_ATTR_FIELD(axis).describe("The axis to operate over").set_default(NullValue<Integer>()); TVM_ATTR_FIELD(dtype).describe("Output data type").set_default(NullValue<DataType>()); // Default is 0 which is "false" TVM_ATTR_FIELD(exclusive) .describe("The first element is not included") .set_default(Bool(false)); } }; // struct ScanopAttrs /*! \brief Attributes used in unique operator */ struct UniqueAttrs : public tvm::AttrsNode<UniqueAttrs> { bool sorted; bool return_counts; TVM_DECLARE_ATTRS(UniqueAttrs, "relay.attrs.UniqueAttrs") { TVM_ATTR_FIELD(sorted).describe("Whether the unique elements are sorted").set_default(true); TVM_ATTR_FIELD(return_counts) .describe("Whether to return an additional tensor with counts of each unique elements") .set_default(false); } }; // struct UniqueAttrs /*! \brief Attributes used in einsum operator */ struct EinsumAttrs : public tvm::AttrsNode<EinsumAttrs> { String equation; TVM_DECLARE_ATTRS(EinsumAttrs, "relay.attrs.EinsumAttrs") { TVM_ATTR_FIELD(equation).describe("The einsum expression string"); } }; // struct EinsumAttrs /*! \brief Attributes used in stft operator */ struct StftAttrs : public tvm::AttrsNode<StftAttrs> { int n_fft; int hop_length; int win_length; bool normalized; bool onesided; TVM_DECLARE_ATTRS(StftAttrs, "relay.attrs.StftAttrs") { TVM_ATTR_FIELD(n_fft).set_default(-1).describe("The size of Fourier transform"); TVM_ATTR_FIELD(hop_length) .set_default(-1) .describe("The distance between neighboring sliding window frames"); TVM_ATTR_FIELD(win_length).set_default(-1).describe("The size of window frame and STFT filter"); TVM_ATTR_FIELD(normalized) .set_default(false) .describe("Whether to return the normalized STFT results"); TVM_ATTR_FIELD(onesided).set_default(true).describe( "Whether to return onesided result or fill with conjugate symmetry"); } }; // struct StftAttrs struct TriluAttrs : public tvm::AttrsNode<TriluAttrs> { bool upper; TVM_DECLARE_ATTRS(TriluAttrs, "relay.attrs.TriluAttrs") { TVM_ATTR_FIELD(upper).set_default(true).describe( "Whether to keep the upper or lower half of the diagonal."); } }; // struct TriluAttrs } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/vision.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/vision.h * \brief Auxiliary attributes for vision operators. */ #ifndef TVM_RELAY_ATTRS_VISION_H_ #define TVM_RELAY_ATTRS_VISION_H_ #include <tvm/ir/attrs.h> #include <tvm/relay/base.h> #include <string> namespace tvm { namespace relay { /*! \brief Attributes used in multibox_prior operators */ struct MultiBoxPriorAttrs : public tvm::AttrsNode<MultiBoxPriorAttrs> { Array<IndexExpr> sizes; Array<IndexExpr> ratios; Array<IndexExpr> steps; Array<IndexExpr> offsets; bool clip; TVM_DECLARE_ATTRS(MultiBoxPriorAttrs, "relay.attrs.MultiBoxPriorAttrs") { TVM_ATTR_FIELD(sizes) .set_default(Array<IndexExpr>({static_cast<float>(1.0)})) .describe("List of sizes of generated MultiBoxPriores."); TVM_ATTR_FIELD(ratios) .set_default(Array<IndexExpr>({static_cast<float>(1.0)})) .describe("List of aspect ratios of generated MultiBoxPriores."); TVM_ATTR_FIELD(steps) .set_default(Array<IndexExpr>({static_cast<float>(-1.0), static_cast<float>(-1.0)})) .describe("Priorbox step across y and x, -1 for auto calculation."); TVM_ATTR_FIELD(offsets) .set_default(Array<IndexExpr>({static_cast<float>(0.5), static_cast<float>(0.5)})) .describe("Priorbox center offsets, y and x respectively."); TVM_ATTR_FIELD(clip).set_default(false).describe("Whether to clip out-of-boundary boxes."); } }; struct MultiBoxTransformLocAttrs : public tvm::AttrsNode<MultiBoxTransformLocAttrs> { bool clip; double threshold; Array<IndexExpr> variances; TVM_DECLARE_ATTRS(MultiBoxTransformLocAttrs, "relay.attrs.MultiBoxTransformLocAttrs") { TVM_ATTR_FIELD(clip).set_default(true).describe("Clip out-of-boundary boxes."); TVM_ATTR_FIELD(threshold).set_default(0.01).describe("Threshold to be a positive prediction."); TVM_ATTR_FIELD(variances) .set_default(Array<IndexExpr>({0.1f, 0.1f, 0.2f, 0.2f})) .describe("Variances to be decoded from box regression output."); } }; /*! \brief Attributes used in get_valid_counts operator */ struct GetValidCountsAttrs : public tvm::AttrsNode<GetValidCountsAttrs> { Optional<FloatImm> score_threshold; int id_index; int score_index; TVM_DECLARE_ATTRS(GetValidCountsAttrs, "relay.attrs.GetValidCountsAttrs") { TVM_ATTR_FIELD(score_threshold).describe("Lower limit of score for valid bounding boxes."); TVM_ATTR_FIELD(id_index).set_default(0).describe("Axis index of id."); TVM_ATTR_FIELD(score_index).set_default(1).describe("Index of the scores/confidence of boxes."); } }; /*! \brief Attributes used in non_maximum_suppression operator */ struct NonMaximumSuppressionAttrs : public tvm::AttrsNode<NonMaximumSuppressionAttrs> { bool force_suppress; int top_k; int coord_start; int score_index; int id_index; bool return_indices; bool invalid_to_bottom; TVM_DECLARE_ATTRS(NonMaximumSuppressionAttrs, "relay.attrs.NonMaximumSuppressionAttrs") { TVM_ATTR_FIELD(force_suppress) .set_default(false) .describe("Suppress all detections regardless of class_id."); TVM_ATTR_FIELD(top_k).set_default(-1).describe( "Keep maximum top k detections before nms, -1 for no limit."); TVM_ATTR_FIELD(coord_start) .set_default(2) .describe("Start index of the consecutive 4 coordinates."); TVM_ATTR_FIELD(score_index).set_default(1).describe("Index of the scores/confidence of boxes."); TVM_ATTR_FIELD(id_index).set_default(0).describe("Axis index of id."); TVM_ATTR_FIELD(return_indices) .set_default(true) .describe("Whether to return box indices in input data."); TVM_ATTR_FIELD(invalid_to_bottom) .set_default(false) .describe("Whether to move all invalid bounding boxes to the bottom."); } }; /*! \brief Attributes used in all_class_non_maximum_suppression operator */ struct AllClassNonMaximumSuppressionAttrs : public tvm::AttrsNode<AllClassNonMaximumSuppressionAttrs> { std::string output_format; TVM_DECLARE_ATTRS(AllClassNonMaximumSuppressionAttrs, "relay.attrs.AllClassNonMaximumSuppressionAttrs") { TVM_ATTR_FIELD(output_format) .set_default("onnx") .describe( "Output format, onnx or tensorflow. Returns outputs in a way that can be easily " "consumed by each frontend."); } }; /*! \brief Attributes used in roi_align operators */ struct ROIAlignAttrs : public tvm::AttrsNode<ROIAlignAttrs> { Array<IndexExpr> pooled_size; double spatial_scale; int sample_ratio; std::string layout; std::string mode; TVM_DECLARE_ATTRS(ROIAlignAttrs, "relay.attrs.ROIAlignAttrs") { TVM_ATTR_FIELD(pooled_size).describe("Output size of roi align."); TVM_ATTR_FIELD(spatial_scale) .describe( "Ratio of input feature map height (or w) to raw image height (or w). " "Equals the reciprocal of total stride in convolutional layers, which should be " "in range (0.0, 1.0]"); TVM_ATTR_FIELD(sample_ratio) .set_default(-1) .describe("Optional sampling ratio of ROI align, using adaptive size by default."); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); TVM_ATTR_FIELD(mode).set_default("avg").describe( "Mode for ROI Align. Can be 'avg' or 'max'. The default mode is 'avg'."); } }; /*! \brief Attributes used in roi_pool operators */ struct ROIPoolAttrs : public tvm::AttrsNode<ROIPoolAttrs> { Array<IndexExpr> pooled_size; double spatial_scale; std::string layout; TVM_DECLARE_ATTRS(ROIPoolAttrs, "relay.attrs.ROIPoolAttrs") { TVM_ATTR_FIELD(pooled_size).describe("Output size of roi pool."); TVM_ATTR_FIELD(spatial_scale) .describe( "Ratio of input feature map height (or w) to raw image height (or w). " "Equals the reciprocal of total stride in convolutional layers, which should be " "in range (0.0, 1.0]"); TVM_ATTR_FIELD(layout).set_default("NCHW").describe( "Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc." "'N', 'C', 'H', 'W' stands for batch, channel, height, and width" "dimensions respectively. Convolution is applied on the 'H' and" "'W' dimensions."); } }; /*! \brief Attributes used in yolo reorg operators */ struct YoloReorgAttrs : public tvm::AttrsNode<YoloReorgAttrs> { Integer stride; TVM_DECLARE_ATTRS(YoloReorgAttrs, "relay.attrs.YoloReorgAttrs") { TVM_ATTR_FIELD(stride).set_default(1).describe("Stride value for yolo reorg"); } }; /*! \brief Attributes used in proposal operators */ struct ProposalAttrs : public tvm::AttrsNode<ProposalAttrs> { Array<IndexExpr> scales; Array<IndexExpr> ratios; int feature_stride; double threshold; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; int rpn_min_size; bool iou_loss; TVM_DECLARE_ATTRS(ProposalAttrs, "relay.attrs.ProposalAttrs") { TVM_ATTR_FIELD(scales) .set_default(Array<IndexExpr>({4.0f, 8.0f, 16.0f, 32.0f})) .describe("Used to generate anchor windows by enumerating scales"); TVM_ATTR_FIELD(ratios) .set_default(Array<IndexExpr>({0.5f, 1.0f, 2.0f})) .describe("Used to generate anchor windows by enumerating ratios"); TVM_ATTR_FIELD(feature_stride) .set_default(16) .describe( "The size of the receptive field each unit in the convolution layer of the rpn," "for example the product of all stride's prior to this layer."); TVM_ATTR_FIELD(threshold).set_default(0.7).describe( "IoU threshold of non-maximum suppresion (suppress boxes with IoU >= this threshold)"); TVM_ATTR_FIELD(rpn_pre_nms_top_n) .set_default(6000) .describe("Number of top scoring boxes to apply NMS. -1 to use all boxes"); TVM_ATTR_FIELD(rpn_post_nms_top_n) .set_default(300) .describe("Number of top scoring boxes to keep after applying NMS to RPN proposals"); TVM_ATTR_FIELD(rpn_min_size).set_default(16).describe("Minimum height or width in proposal"); TVM_ATTR_FIELD(iou_loss).set_default(false).describe("Usage of IoU Loss"); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_VISION_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/attrs/vm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/attrs/vm.h * \brief Attributes for Relay vm operators. */ #ifndef TVM_RELAY_ATTRS_VM_H_ #define TVM_RELAY_ATTRS_VM_H_ #include <tvm/ir/attrs.h> namespace tvm { namespace relay { /*! * \brief Options for the shape function operator. */ struct ShapeFuncAttrs : public tvm::AttrsNode<ShapeFuncAttrs> { Array<Integer> is_input; TVM_DECLARE_ATTRS(ShapeFuncAttrs, "relay.attrs.ShapeFuncAttrs") { TVM_ATTR_FIELD(is_input).describe( "A bool indicating whether the shape function should" "expect shape or input in each position."); } }; /*! * \brief Attributes for VM reshape_tensor operator. */ struct ReshapeTensorAttrs : public tvm::AttrsNode<ReshapeTensorAttrs> { Array<PrimExpr> newshape; TVM_DECLARE_ATTRS(ReshapeTensorAttrs, "relay.attrs.ReshapeTensorAttrs") { TVM_ATTR_FIELD(newshape).describe("The new shape of output tensor"); } }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_ATTRS_VM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/base.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/base.h * \brief Base classes for the Relay IR. */ #ifndef TVM_RELAY_BASE_H_ #define TVM_RELAY_BASE_H_ #include <tvm/ir/span.h> #include <tvm/node/node.h> #include <tvm/tir/expr.h> #include <string> #include <vector> namespace tvm { /*! * \brief Relay: a high level functional IR for TVM. * * This namespace contains the abstract syntax tree, and other * essential data structures for the Relay IR. * * You can find more about Relay by reading the language reference. */ namespace relay { #define RELAY_DEBUG(...) \ { \ auto fdebug = runtime::Registry::Get("relay.debug"); \ ICHECK(fdebug) << "Could not find Relay Python debugger function."; \ (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ } #define RELAY_DEBUG_INTERP(...) \ { \ auto fdebug = runtime::Registry::Get("relay.debug_interp"); \ ICHECK(fdebug) << "Could not find Relay Python debugger function."; \ (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__); \ } /*! * \brief Symbolic expression for tensor shape. */ using IndexExpr = ::tvm::PrimExpr; using SourceName = tvm::SourceName; using Span = tvm::Span; using SpanNode = tvm::SpanNode; /*! * \brief This is the base node container of all relay structures. */ class RelayNode : public Object { public: /*! \brief The location of the program in a SourceFragment can be null, * check with span.defined() */ mutable Span span; static constexpr const char* _type_key = "relay.Node"; TVM_DECLARE_BASE_OBJECT_INFO(RelayNode, Object); }; /*! * \brief The unique identifier of variables. * * Id is like name to the variables, * except that id is unique for each Var. * * \note Do not create Id directly, they are created in Var. */ class IdNode : public Object { public: /*! * \brief The name of the variable, * this only acts as a hint to the user, * and is not used for equality. */ String name_hint; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name_hint", &name_hint); } bool SEqualReduce(const IdNode* other, SEqualReducer equal) const { return equal.FreeVarEqualImpl(this, other); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce.FreeVarHashImpl(this); } static constexpr const char* _type_key = "relay.Id"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(IdNode, Object); }; class Id : public ObjectRef { public: /*! * \brief The constructor * \param name_hint The name of the variable. */ TVM_DLL explicit Id(String name_hint); TVM_DEFINE_OBJECT_REF_METHODS(Id, ObjectRef, IdNode); }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_BASE_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/dataflow_matcher.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/dataflow_matcher.h * \brief A pattern matcher for matching dataflow properties. */ #ifndef TVM_RELAY_DATAFLOW_MATCHER_H_ #define TVM_RELAY_DATAFLOW_MATCHER_H_ #include <tvm/relay/dataflow_pattern.h> #include <tvm/relay/dataflow_pattern_functor.h> #include <string> #include <unordered_map> #include <utility> namespace tvm { namespace relay { class DFPatternCallback; /*! * \brief Base type of all dataflow pattern callbacks. * \sa DFPatternCallback */ class DFPatternCallbackNode : public Object { public: /*! \brief Pattern this callback matches */ DFPattern pattern; /*! \brief Function to call when finding a matched expression */ PackedFunc function; /*! \brief Require InferType to be run before the callback */ bool require_type; /*! \brief Run the callback only once */ bool rewrite_once; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pattern", &pattern); v->Visit("require_type", &require_type); v->Visit("rewrite_once", &rewrite_once); } static constexpr const char* _type_key = "DFPatternCallbackNode"; TVM_DECLARE_BASE_OBJECT_INFO(DFPatternCallbackNode, Object); }; /*! * \brief Managed reference to dataflow pattern callbacks. * \sa DFPatternCallbackNode */ class DFPatternCallback : public ObjectRef { public: TVM_DLL DFPatternCallback(DFPattern pattern, PackedFunc callback, bool require_type, bool rewrite_once = false); TVM_DEFINE_OBJECT_REF_METHODS(DFPatternCallback, ObjectRef, DFPatternCallbackNode); }; /*! * \brief Determine if a pattern matches an expression * * \param pattern The pattern to match * \param expr The expression to match * * \return Return true if the pattern and the expression match, return false otherwise. */ bool MatchPattern(DFPattern pattern, Expr expr); /*! * \brief Rewrite an expression based on some number of DFPatternCallbacks * * \param callbacks An array of DFPatternCallback Nodes * \param expr The expression to rewrite * \param mod The module that associates with the expr * * \return Return An Expr with every match of the pattern inside the callbacks rewritten by the * functions inside the callbacks */ Expr RewritePatterns(Array<DFPatternCallback> callbacks, Expr expr, IRModule mod = IRModule()); /*! * \brief Partition all matches of a DFPattern inside an Expr into separate Function calls * * \param pattern The pattern to match * \param expr The expression to patition * \param attrs A set of parameter names and values to apply to the partitioned function * \param check A callback function for checking more complicated properties of the matched * expressions, returns true if the match is accepted and false otherwise * * \return Return the paritioned Expr. */ Expr PartitionPattern(DFPattern pattern, Expr expr, Map<String, ObjectRef> attrs, PackedFunc check); /*! * \brief Infer the type of an expression. * * \param expr The expression to rewrite * * \return Return An Expr with unambiguous type information filled in, as well as it's * checked type field populated with the result type. * */ Expr InferType(const Expr& expr); } // namespace relay } // namespace tvm #endif // TVM_RELAY_DATAFLOW_MATCHER_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/dataflow_pattern.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/dataflow_pattern.h * \brief A pattern language for matching dataflow properties. */ #ifndef TVM_RELAY_DATAFLOW_PATTERN_H_ #define TVM_RELAY_DATAFLOW_PATTERN_H_ #include <tvm/relay/expr.h> #include <tvm/relay/type.h> #include <string> #include <vector> namespace tvm { namespace relay { /*! * \brief Base type of all dataflow patterns. * \sa DFPattern */ class DFPatternNode : public Object { public: static constexpr const char* _type_key = "DFPatternNode"; TVM_DECLARE_BASE_OBJECT_INFO(DFPatternNode, Object); }; /*! * \brief Managed reference to dataflow patterns. * \sa DFPatternNode */ class DFPattern : public ObjectRef { public: /*! \brief Syntatic Sugar for creating a CallPattern */ DFPattern operator()(const std::vector<DFPattern>& args) const; /*! \brief Syntatic Sugar for creating a CallPattern with an "add" op */ DFPattern operator+(const DFPattern& other) const; /*! \brief Syntatic Sugar for creating a CallPattern with a "subtract" op */ DFPattern operator-(const DFPattern& other) const; /*! \brief Syntatic Sugar for creating a CallPattern with a "multiply" op */ DFPattern operator*(const DFPattern& other) const; /*! \brief Syntatic Sugar for creating a CallPattern with a "divide" op */ DFPattern operator/(const DFPattern& other) const; /*! \brief Syntatic Sugar for creating an AltPattern */ DFPattern operator||(const DFPattern& other) const; /*! \brief Syntatic Sugar for creating an Optional Pattern */ DFPattern Optional(const std::function<DFPattern(const DFPattern&)>& func) const; /*! \brief Syntatic Sugar for creating an AttrPattern */ DFPattern HasAttr(const Map<String, ObjectRef>& attrs) const; /*! \brief Syntatic Sugar for creating a TypePattern */ DFPattern HasType(const Type& type) const; /*! \brief Syntatic Sugar for creating a DataTypePattern with a DataType */ DFPattern HasDtype(const DataType& dtype) const; /*! \brief Syntatic Sugar for creating a DataTypePattern with a data type's name */ DFPattern HasDtype(const std::string& dtype) const; /*! \brief Syntatic Sugar for creating a ShapePattern */ DFPattern HasShape(const Array<PrimExpr> shape) const; TVM_DEFINE_OBJECT_REF_METHODS(DFPattern, ObjectRef, DFPatternNode); }; /*! * \brief Pattern for Relay Expression. */ class ExprPatternNode : public DFPatternNode { public: /*! \brief The expression to match. */ Expr expr; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("expr", &expr); } static constexpr const char* _type_key = "relay.dataflow_pattern.ExprPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(ExprPatternNode, DFPatternNode); }; /*! * \brief A pattern which matches a literal expression. * * \note Uses structural equality on expressions to check equality. * */ class ExprPattern : public DFPattern { public: TVM_DLL explicit ExprPattern(Expr expr); TVM_DEFINE_OBJECT_REF_METHODS(ExprPattern, DFPattern, ExprPatternNode); }; /*! * \brief A Pattern to Match a Relay Variable */ class VarPattern; /*! \brief Container for Var */ class VarPatternNode : public DFPatternNode { public: /*! * \brief The name of the Var (optional). */ String name; /*! \return The name hint of the variable */ const String& name_hint() const { return name; } void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name", &name); } static constexpr const char* _type_key = "relay.dataflow_pattern.VarPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(VarPatternNode, DFPatternNode); }; class VarPattern : public DFPattern { public: TVM_DLL VarPattern(String name_hint); TVM_DEFINE_OBJECT_REF_METHODS(VarPattern, DFPattern, VarPatternNode); }; /*! * \brief A Pattern to Match a Relay Constant */ class ConstantPattern; /*! \brief Container for Constant */ class ConstantPatternNode : public DFPatternNode { public: void VisitAttrs(tvm::AttrVisitor* v) {} static constexpr const char* _type_key = "relay.dataflow_pattern.ConstantPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantPatternNode, DFPatternNode); }; class ConstantPattern : public DFPattern { public: TVM_DEFINE_OBJECT_REF_METHODS(ConstantPattern, DFPattern, ConstantPatternNode); }; /*! * \brief Call corresponds to operator invocation. * Corresponds to the operator in computational graph terminology. */ class CallPattern; /*! \brief CallPattern container. */ class CallPatternNode : public DFPatternNode { public: /*! * \brief The operator(function) being invoked * * - It can be relay::Op which corresponds to the primitive operators. * - It can also be user defined functions (Function, GlobalVar, Var). */ DFPattern op; /*! \brief The arguments(inputs) of the call */ tvm::Array<relay::DFPattern> args; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("op", &op); v->Visit("args", &args); } static constexpr const char* _type_key = "relay.dataflow_pattern.CallPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(CallPatternNode, DFPatternNode); }; class CallPattern : public DFPattern { public: TVM_DLL CallPattern(DFPattern op, Array<DFPattern> args); TVM_DEFINE_OBJECT_REF_METHODS(CallPattern, DFPattern, CallPatternNode); }; /*! * \brief Relay Function container * \sa Function */ class FunctionPatternNode : public DFPatternNode { public: /*! \brief Function parameters */ tvm::Array<DFPattern> params; /*! * \brief * The expression which represents the computation of the function, * the expression may reference the parameters, and the type of it * or sub-expressions may reference the type variables. */ DFPattern body; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("params", &params); v->Visit("body", &body); } static constexpr const char* _type_key = "relay.dataflow_pattern.FunctionPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(FunctionPatternNode, DFPatternNode); }; /*! * \brief Managed reference to FunctionNode. * \sa FunctionNode */ class FunctionPattern : public DFPattern { public: /*! * \brief Constructor * \param params The parameters of the function. * \param body The body of the function. */ TVM_DLL FunctionPattern(tvm::Array<DFPattern> params, DFPattern body); TVM_DEFINE_OBJECT_REF_METHODS(FunctionPattern, DFPattern, FunctionPatternNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(FunctionPatternNode); }; /*! \brief A binding of a sub-network. */ class LetPatternNode : public DFPatternNode { public: /*! \brief The variable we bind to */ DFPattern var; /*! \brief The value we bind var to */ DFPattern value; /*! \brief The body of the let binding */ DFPattern body; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("var", &var); v->Visit("value", &value); v->Visit("body", &body); } static constexpr const char* _type_key = "relay.dataflow_pattern.LetPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(LetPatternNode, DFPatternNode); }; /*! * \brief Let binding that binds a local var */ class LetPattern : public DFPattern { public: /*! * \brief The constructor * \param var The variable that is bound to. * \param value The value used to bind to the variable. * \param body The body of the let binding. */ TVM_DLL LetPattern(DFPattern var, DFPattern value, DFPattern body); TVM_DEFINE_OBJECT_REF_METHODS(LetPattern, DFPattern, LetPatternNode); }; /*! \brief Tuple of multiple Exprs */ class TuplePattern; /*! \brief Tuple container */ class TuplePatternNode : public DFPatternNode { public: /*! \brief the fields of the tuple */ tvm::Array<DFPattern> fields; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("fields", &fields); } static constexpr const char* _type_key = "relay.dataflow_pattern.TuplePattern"; TVM_DECLARE_FINAL_OBJECT_INFO(TuplePatternNode, DFPatternNode); }; class TuplePattern : public DFPattern { public: TVM_DLL explicit TuplePattern(tvm::Array<DFPattern> fields); TVM_DEFINE_OBJECT_REF_METHODS(TuplePattern, DFPattern, TuplePatternNode); }; /*! \brief Get index-th field out of a tuple. */ class TupleGetItemPattern; class TupleGetItemPatternNode : public DFPatternNode { public: /*! \brief The tuple Expression */ DFPattern tuple; /*! \brief which value to get */ int index; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("tuple", &tuple); v->Visit("index", &index); } static constexpr const char* _type_key = "relay.dataflow_pattern.TupleGetItemPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleGetItemPatternNode, DFPatternNode); }; class IfPatternNode : public DFPatternNode { public: DFPattern cond, true_branch, false_branch; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("cond", &cond); v->Visit("true_branch", &true_branch); v->Visit("false_branch", &false_branch); } static constexpr const char* _type_key = "relay.dataflow_pattern.IfPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(IfPatternNode, DFPatternNode); }; class IfPattern : public DFPattern { public: TVM_DLL IfPattern(DFPattern cond, DFPattern then_clause, DFPattern else_clause); TVM_DEFINE_OBJECT_REF_METHODS(IfPattern, DFPattern, IfPatternNode); }; class TupleGetItemPattern : public DFPattern { public: TVM_DLL TupleGetItemPattern(DFPattern tuple, int index); TVM_DEFINE_OBJECT_REF_METHODS(TupleGetItemPattern, DFPattern, TupleGetItemPatternNode); }; class AltPattern; /*! * \brief Pattern for Alternate Expressions. */ class AltPatternNode : public DFPatternNode { public: /*! \brief The left optional pattern. */ DFPattern left; /*! \brief The right optional pattern. */ DFPattern right; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("left", &left); v->Visit("right", &right); } static constexpr const char* _type_key = "relay.dataflow_pattern.AltPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(AltPatternNode, DFPatternNode); }; /*! * \brief A pattern which matches either of two patterns */ class AltPattern : public DFPattern { public: TVM_DLL AltPattern(DFPattern left, DFPattern right); TVM_DEFINE_OBJECT_REF_METHODS(AltPattern, DFPattern, AltPatternNode); }; /*! * \brief Wildcard Pattern. */ class WildcardPatternNode : public DFPatternNode { public: void VisitAttrs(tvm::AttrVisitor* v) {} static constexpr const char* _type_key = "relay.dataflow_pattern.WildcardPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(WildcardPatternNode, DFPatternNode); }; /*! * \brief A pattern which matches anything. */ class WildcardPattern : public DFPattern { public: TVM_DEFINE_OBJECT_REF_METHODS(WildcardPattern, DFPattern, WildcardPatternNode); }; class TypePattern; /*! * \brief Pattern for Types. */ class TypePatternNode : public DFPatternNode { public: /*! \brief The pattern. */ DFPattern pattern; /*! \brief The type to match */ Type type; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pattern", &pattern); v->Visit("type", &type); } static constexpr const char* _type_key = "relay.dataflow_pattern.TypePattern"; TVM_DECLARE_FINAL_OBJECT_INFO(TypePatternNode, DFPatternNode); }; /*! * \brief A pattern which matches a type in another pattern */ class TypePattern : public DFPattern { public: TVM_DLL TypePattern(DFPattern pattern, Type type); TVM_DEFINE_OBJECT_REF_METHODS(TypePattern, DFPattern, TypePatternNode); }; class ShapePattern; /*! * \brief Pattern for Shapes. */ class ShapePatternNode : public DFPatternNode { public: /*! \brief The pattern. */ DFPattern pattern; /*! \brief The type to match */ Array<PrimExpr> shape; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pattern", &pattern); v->Visit("shape", &shape); } static constexpr const char* _type_key = "relay.dataflow_pattern.ShapePattern"; TVM_DECLARE_FINAL_OBJECT_INFO(ShapePatternNode, DFPatternNode); }; /*! * \brief A pattern which matches a type in another pattern */ class ShapePattern : public DFPattern { public: TVM_DLL ShapePattern(DFPattern pattern, Array<PrimExpr> type); TVM_DEFINE_OBJECT_REF_METHODS(ShapePattern, DFPattern, ShapePatternNode); }; class DataTypePattern; /*! * \brief Pattern for Types. */ class DataTypePatternNode : public DFPatternNode { public: /*! \brief The pattern. */ DFPattern pattern; /*! \brief The type to match */ DataType dtype; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pattern", &pattern); v->Visit("dtype", &dtype); } static constexpr const char* _type_key = "relay.dataflow_pattern.DataTypePattern"; TVM_DECLARE_FINAL_OBJECT_INFO(DataTypePatternNode, DFPatternNode); }; /*! * \brief A pattern which matches a type in another pattern */ class DataTypePattern : public DFPattern { public: TVM_DLL DataTypePattern(DFPattern pattern, DataType dtype); TVM_DEFINE_OBJECT_REF_METHODS(DataTypePattern, DFPattern, DataTypePatternNode); }; class AttrPattern; /*! * \brief Pattern for Attributes. */ class AttrPatternNode : public DFPatternNode { public: /*! \brief The pattern. */ DFPattern pattern; /*! \brief The attribute to match */ DictAttrs attrs; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("pattern", &pattern); v->Visit("attrs", &attrs); } static constexpr const char* _type_key = "relay.dataflow_pattern.AttrPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(AttrPatternNode, DFPatternNode); }; /*! * \brief A pattern which matches attributes in another pattern */ class AttrPattern : public DFPattern { public: TVM_DLL AttrPattern(DFPattern pattern, DictAttrs attrs); TVM_DEFINE_OBJECT_REF_METHODS(AttrPattern, DFPattern, AttrPatternNode); }; class DominatorPattern; /*! * \brief Dominated Graph Pattern * Pattern for fuzzy subgraphs where all outputs of the parent are used finally by the child, and * every operation between the parent and the child matches the path. */ class DominatorPatternNode : public DFPatternNode { public: /*! \brief The parent. */ DFPattern parent; /*! \brief The path. */ DFPattern path; /*! \brief The child. */ DFPattern child; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("parent", &parent); v->Visit("path", &path); v->Visit("child", &child); } static constexpr const char* _type_key = "relay.dataflow_pattern.DominatorPattern"; TVM_DECLARE_FINAL_OBJECT_INFO(DominatorPatternNode, DFPatternNode); }; /*! * \brief A pattern which matches a variable length dominator path */ class DominatorPattern : public DFPattern { public: TVM_DLL DominatorPattern(DFPattern parent, DFPattern path, DFPattern child); TVM_DEFINE_OBJECT_REF_METHODS(DominatorPattern, DFPattern, DominatorPatternNode); }; /*! \brief Syntatic Sugar for creating a VarPattern with a name */ DFPattern IsVar(const String& name); /*! \brief Syntatic Sugar for creating a ConstantPattern */ DFPattern IsConstant(); /*! \brief Syntatic Sugar for creating a WildcardPattern */ DFPattern IsWildcard(); /*! \brief Syntatic Sugar for creating a ExprPattern */ DFPattern IsExpr(const Expr& expr); /*! \brief Syntatic Sugar for creating a ExprPattern base on an Op*/ DFPattern IsOp(const String& op_name); /*! \brief Syntatic Sugar for creating a TuplePattern*/ DFPattern IsTuple(const Array<DFPattern>& fields); /*! \brief Syntatic Sugar for creating a TupleGetItemPattern*/ DFPattern IsTupleGetItem(const DFPattern tuple, int index = -1); } // namespace relay } // namespace tvm #endif // TVM_RELAY_DATAFLOW_PATTERN_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/dataflow_pattern_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/dataflow_pattern_functor.h * \brief A set of passes for operating on pattern graphs. */ #ifndef TVM_RELAY_DATAFLOW_PATTERN_FUNCTOR_H_ #define TVM_RELAY_DATAFLOW_PATTERN_FUNCTOR_H_ #include <tvm/relay/dataflow_pattern.h> #include <unordered_set> #include <utility> namespace tvm { namespace relay { /*! * \brief A dynamical functor that dispatches on in the first DFPattern argument. * * \tparam FType function signature * This type is only defined for FType with function signature R(const DFPattern&, * Args...) */ template <typename FType> class DFPatternFunctor; // functions to be overriden. #define DFPATTERN_FUNCTOR_DEFAULT \ { return VisitDFPatternDefault_(op, std::forward<Args>(args)...); } #define RELAY_DFPATTERN_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitDFPattern_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class DFPatternFunctor<R(const DFPattern& n, Args...)> { private: using TSelf = DFPatternFunctor<R(const DFPattern& n, Args...)>; using FType = tvm::NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief virtual destructor */ virtual ~DFPatternFunctor() {} /*! * \brief Same as call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ R operator()(const DFPattern& n, Args... args) { return VisitDFPattern(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitDFPattern(const DFPattern& n, Args... args) { ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitDFPattern_(const AltPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const AttrPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const CallPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const ConstantPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const DataTypePatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const DominatorPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const ExprPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const FunctionPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const IfPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const LetPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const ShapePatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const TupleGetItemPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const TuplePatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const TypePatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const VarPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPattern_(const WildcardPatternNode* op, Args... args) DFPATTERN_FUNCTOR_DEFAULT; virtual R VisitDFPatternDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); throw; } private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch RELAY_DFPATTERN_FUNCTOR_DISPATCH(AltPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(AttrPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(CallPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(ConstantPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(DataTypePatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(DominatorPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(ExprPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(FunctionPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(IfPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(LetPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(ShapePatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(TupleGetItemPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(TuplePatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(TypePatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(VarPatternNode); RELAY_DFPATTERN_FUNCTOR_DISPATCH(WildcardPatternNode); return vtable; } }; /*! * \brief A simple visitor wrapper around DFPatternFunctor. * Recursively visit the content. * * DFPatternVisitor treats the Pattern as dataflow graph,and only visit each Expr node once. */ class DFPatternVisitor : public DFPatternFunctor<void(const DFPattern&)> { public: void VisitDFPattern(const DFPattern& pattern) override; void VisitDFPattern_(const AltPatternNode* op) override; void VisitDFPattern_(const AttrPatternNode* op) override; void VisitDFPattern_(const CallPatternNode* op) override; void VisitDFPattern_(const ConstantPatternNode* op) override; void VisitDFPattern_(const DataTypePatternNode* op) override; void VisitDFPattern_(const DominatorPatternNode* op) override; void VisitDFPattern_(const ExprPatternNode* op) override; void VisitDFPattern_(const FunctionPatternNode* op) override; void VisitDFPattern_(const IfPatternNode* op) override; void VisitDFPattern_(const LetPatternNode* op) override; void VisitDFPattern_(const ShapePatternNode* op) override; void VisitDFPattern_(const TupleGetItemPatternNode* op) override; void VisitDFPattern_(const TuplePatternNode* op) override; void VisitDFPattern_(const TypePatternNode* op) override; void VisitDFPattern_(const VarPatternNode* op) override; void VisitDFPattern_(const WildcardPatternNode* op) override; protected: // set of already-visited nodes std::unordered_set<const Object*> visited_; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_DATAFLOW_PATTERN_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/executor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/executor.h * \brief Object representation of Executor configuration and registry */ #ifndef TVM_RELAY_EXECUTOR_H_ #define TVM_RELAY_EXECUTOR_H_ #include <dmlc/registry.h> #include <tvm/ir/attrs.h> #include <tvm/ir/expr.h> #include <tvm/ir/type.h> #include <tvm/ir/type_relation.h> #include <tvm/node/attr_registry_map.h> #include <tvm/runtime/registry.h> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { template <typename, typename> class AttrRegistry; namespace relay { /*! * \brief Executor information. * * This data structure stores the meta-data * about executors which can be used to pass around information. * * \sa Executor */ class ExecutorNode : public Object { public: /*! \brief name of the Executor */ String name; /* \brief Additional attributes storing meta-data about the Executor. */ DictAttrs attrs; /*! * \brief Should Link Parameters into the module * \return Whether the Executor is configured to execute modules with linked parameters */ Bool ShouldLinkParameters() const { return name == "aot" || GetAttr<Bool>("link-params").value_or(Bool(false)); } /*! * \brief Get an attribute. * * \param attr_key The attribute key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TObjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetAttrExample(const Executor& executor) { * auto value = executor->GetAttr<Integer>("AttrKey", 0); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { return attrs.GetAttr(attr_key, default_value); } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("attrs", &attrs); } bool SEqualReduce(const ExecutorNode* other, SEqualReducer equal) const { return name == other->name && equal.DefEqual(attrs, other->attrs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name); hash_reduce(attrs); } static constexpr const char* _type_key = "Executor"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(ExecutorNode, Object); }; /*! * \brief Managed reference class to ExecutorNode. * \sa ExecutorNode */ class Executor : public ObjectRef { public: /*! * \brief Create a new Executor object using the registry * \throws Error if name is not registered * \param name The name of the executor. * \param attrs Attributes for the executor. * \return the new Executor object. */ TVM_DLL static Executor Create(String name, Map<String, ObjectRef> attrs = {}); /*! * \brief List all registered Executors * \return the list of Executors */ TVM_DLL static Array<String> ListExecutors(); /*! * \brief List all options for a specific Executor * \param name The name of the Executor * \return Map of option name to type */ TVM_DLL static Map<String, String> ListExecutorOptions(const String& name); /*! \brief specify container node */ TVM_DEFINE_OBJECT_REF_METHODS(Executor, ObjectRef, ExecutorNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ExecutorNode) private: /*! * \brief Private Constructor * \param name The executor name * \param attrs Attributes to apply to this Executor node */ TVM_DLL Executor(String name, DictAttrs attrs) { auto n = make_object<ExecutorNode>(); n->name = std::move(name); n->attrs = std::move(attrs); data_ = std::move(n); } }; /*! * \brief Helper structure to register Executors * \sa TVM_REGISTER_EXECUTOR */ class ExecutorRegEntry { public: /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline ExecutorRegEntry& add_attr_option(const String& key); /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \param default_value The default value of the key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline ExecutorRegEntry& add_attr_option(const String& key, ObjectRef default_value); /*! * \brief Register or get a new entry. * \param name The name of the operator. * \return the corresponding entry. */ TVM_DLL static ExecutorRegEntry& RegisterOrGet(const String& name); private: /*! \brief Internal storage of value types */ struct ValueTypeInfo { std::string type_key; uint32_t type_index; }; std::unordered_map<std::string, ValueTypeInfo> key2vtype_; /*! \brief A hash table that stores the default value of each attr */ std::unordered_map<String, ObjectRef> key2default_; /*! \brief Index used for internal lookup of attribute registry */ uint32_t index_; // the name std::string name; /*! \brief Return the index stored in attr registry */ uint32_t AttrRegistryIndex() const { return index_; } /*! \brief Return the name stored in attr registry */ String AttrRegistryName() const { return name; } /*! \brief private constructor */ explicit ExecutorRegEntry(uint32_t reg_index) : index_(reg_index) {} // friend class template <typename> friend class AttrRegistryMapContainerMap; template <typename, typename> friend class tvm::AttrRegistry; friend class Executor; }; template <typename ValueType> inline ExecutorRegEntry& ExecutorRegEntry::add_attr_option(const String& key) { ICHECK(!key2vtype_.count(key)) << "AttributeError: add_attr_option failed because '" << key << "' has been set once"; using ValueNodeType = typename ValueType::ContainerType; // NOTE: we could further update the function later. uint32_t value_type_index = ValueNodeType::_GetOrAllocRuntimeTypeIndex(); ValueTypeInfo info; info.type_index = value_type_index; info.type_key = runtime::Object::TypeIndex2Key(value_type_index); key2vtype_[key] = info; return *this; } template <typename ValueType> inline ExecutorRegEntry& ExecutorRegEntry::add_attr_option(const String& key, ObjectRef default_value) { add_attr_option<ValueType>(key); key2default_[key] = default_value; return *this; } // internal macros to make executor entries #define TVM_EXECUTOR_REGISTER_VAR_DEF \ static DMLC_ATTRIBUTE_UNUSED ::tvm::relay::ExecutorRegEntry& __make_##Executor /*! * \def TVM_REGISTER_EXECUTOR * \brief Register a new executor, or set attribute of the corresponding executor. * * \param ExecutorName The name of registry * * \code * * TVM_REGISTER_EXECUTOR("aot") * .add_attr_option<String>("my_option"); * .add_attr_option<String>("my_option_default", String("default")); * * \endcode */ #define TVM_REGISTER_EXECUTOR(ExecutorName) \ TVM_STR_CONCAT(TVM_EXECUTOR_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::relay::ExecutorRegEntry::RegisterOrGet(ExecutorName) } // namespace relay } // namespace tvm #endif // TVM_RELAY_EXECUTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/expr.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/expr.h * \brief Relay expression language. */ #ifndef TVM_RELAY_EXPR_H_ #define TVM_RELAY_EXPR_H_ #include <tvm/ir/attrs.h> #include <tvm/ir/expr.h> #include <tvm/ir/module.h> #include <tvm/ir/op.h> #include <tvm/target/virtual_device.h> #include <functional> #include <stack> #include <string> #include <utility> #include "./base.h" #include "./type.h" namespace tvm { /*! * \brief Returns \p global_var with the given properties. A null property denotes 'no change'. * Returns \p global_var if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ GlobalVar WithFields(GlobalVar global_var, Optional<String> opt_name_hint = {}, Optional<Type> opt_type = {}, Optional<VirtualDevice> opt_virtual_device = {}, Optional<Span> opt_span = {}); namespace relay { using Expr = tvm::RelayExpr; using ExprNode = tvm::RelayExprNode; using BaseFunc = tvm::BaseFunc; using BaseFuncNode = tvm::BaseFuncNode; using GlobalVar = tvm::GlobalVar; using GlobalVarNode = tvm::GlobalVarNode; using tvm::PrettyPrint; /*! * \brief Constant tensor, backed by an NDArray on the cpu(0) device. * * \note Scalar constants are represented by rank-0 const tensor. * Constant folding are handled uniformly via Tensor types. */ class Constant; /*! * \brief Constant tensor type. */ class ConstantNode : public ExprNode { public: /*! \brief The data of the tensor */ runtime::NDArray data; /*! \return The corresponding tensor type of the data */ TensorType tensor_type() const; /*! \return Whether it is scalar(rank-0 tensor) */ bool is_scalar() const { return data->ndim == 0; } void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("data", &data); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const ConstantNode* other, SEqualReducer equal) const { return equal(data, other->data); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(data); } static constexpr const char* _type_key = "relay.Constant"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstantNode, ExprNode); }; class Constant : public Expr { public: /*! * \brief The constructor * \param data The data of the constant tensor. * \param span The source span of the expression. */ TVM_DLL explicit Constant(runtime::NDArray data, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Constant, RelayExpr, ConstantNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(ConstantNode); }; /*! * \brief Returns \p constant with the given properties. A null property denotes 'no change'. * Returns \p constant if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Constant WithFields(Constant constant, Optional<runtime::NDArray> opt_data = {}, Optional<VirtualDevice> opt_virtual_device = {}, Optional<Span> opt_span = {}); /*! \brief Tuple of multiple Exprs */ class Tuple; /*! \brief Tuple container */ class TupleNode : public ExprNode { public: /*! \brief the fields of the tuple */ tvm::Array<relay::Expr> fields; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("fields", &fields); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const TupleNode* other, SEqualReducer equal) const { // specially handle empty tuple as a constant is not a graph node. if (fields.size() == other->fields.size() && fields.size() == 0) { return true; } else { equal->MarkGraphNode(); return equal(fields, other->fields); } } void SHashReduce(SHashReducer hash_reduce) const { if (fields.size() != 0) { hash_reduce->MarkGraphNode(); hash_reduce(fields); } } static constexpr const char* _type_key = "relay.Tuple"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleNode, ExprNode); }; class Tuple : public Expr { public: /*! * \brief The constructor * \param fields The fields of a tuple. * \param span The source span of the expression. */ TVM_DLL explicit Tuple(tvm::Array<relay::Expr> fields, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Tuple, RelayExpr, TupleNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(TupleNode); }; /*! * \brief Returns \p tuple with the given properties. A null property denotes 'no change'. * Returns \p tuple if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Tuple WithFields(Tuple tuple, Optional<Array<Expr>> opt_fields = Optional<Array<Expr>>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! * \brief Local variables used in the let expression. * * Its semantics are similar to tvm.Var node used in TVM's low level * tensor expression language. * * \note Each Var is bind only once and is immutable. */ class Var; /*! \brief Container for Var */ class VarNode : public ExprNode { public: /*! * \brief The unique identifier of the Var. * * vid will be preserved for the same Var during type inference * and other rewritings, while the VarNode might be recreated * to attach additional information. * This property can be used to keep track of parameter Var * information across passes. */ Id vid; /*! * \brief type annotaion of the variable. * This field records user provided type annotation of the Var. * This field is optional and can be None. */ Type type_annotation; /*! \return The name hint of the variable */ const String& name_hint() const { return vid->name_hint; } void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("vid", &vid); v->Visit("type_annotation", &type_annotation); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const VarNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(type_annotation, other->type_annotation) && equal(vid, other->vid) && equal(virtual_device_, other->virtual_device_); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(type_annotation); hash_reduce(vid); } static constexpr const char* _type_key = "relay.Var"; TVM_DECLARE_FINAL_OBJECT_INFO(VarNode, ExprNode); }; class Var : public Expr { public: /*! * \brief The constructor * \param name_hint The name hint of a variable. * \param type_annotation The type annotation of a variable. * \param span The source span of the expression. */ TVM_DLL Var(String name_hint, Type type_annotation, Span span = Span()) : Var(Id(name_hint), type_annotation, span) {} /*! * \brief The constructor * \param vid The unique id of a variable. * \param type_annotation The type annotation of a variable. * \param span The source span of the expression. */ TVM_DLL Var(Id vid, Type type_annotation, Span span = Span()); /*! * \brief Return a globally fresh name. Helps with debugging to follow the same * variable between passes and sub-expressions. * * TODO(mbs): Replace with name creation w.r.t. scopes once available as part of * name gen overhaul. */ static Var GenSym(Type type_annotation = {}, Span span = {}); TVM_DEFINE_OBJECT_REF_METHODS(Var, RelayExpr, VarNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(VarNode); }; /*! * \brief Returns \p vor with the given properties. A null property denotes 'no change'. * Returns \p var if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Var WithFields(Var var, Optional<Id> opt_vid = Optional<Id>(), Optional<Type> opt_type_annotation = Optional<Type>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! * \brief Call corresponds to operator invocation. * Corresponds to the operator in computational graph terminology. */ class Call; /*! \brief Call container. */ class CallNode : public ExprNode { protected: // CallNode uses own deleter to indirectly call non-recursive destructor Object::FDeleter saved_deleter_; static void Deleter_(Object* ptr); public: /*! * \brief The operator(function) being invoked * * - It can be tvm::Op which corresponds to the primitive operators. * - It can also be user defined functions (Function, GlobalVar, Var). */ Expr op; /*! \brief The arguments(inputs) of the call */ tvm::Array<relay::Expr> args; /*! \brief The additional attributes */ Attrs attrs; /*! * \brief The type arguments passed to polymorphic(template) function. * * This is the advance feature that is only used when the function is * polymorphic. It is safe to be ignored in most cases. For example, in the * following code, the type_args of addone call is [int]. * * \code * * template<typename T> * T addone(T a) { return a + 1; } * * void main() { * int x = addone<int>(10); * } * * \endcode */ tvm::Array<Type> type_args; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("op", &op); v->Visit("args", &args); v->Visit("attrs", &attrs); v->Visit("type_args", &type_args); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const CallNode* other, SEqualReducer equal) const { // skip type_args check for primitive ops. equal->MarkGraphNode(); return equal(op, other->op) && equal(args, other->args) && equal(attrs, other->attrs) && (IsPrimitiveOp(op) || equal(type_args, other->type_args)); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(op); hash_reduce(args); hash_reduce(attrs); if (!IsPrimitiveOp(op)) { hash_reduce(type_args); } } static constexpr const char* _type_key = "relay.Call"; TVM_DECLARE_FINAL_OBJECT_INFO(CallNode, ExprNode); template <typename> friend class runtime::ObjAllocatorBase; friend class Call; }; class Call : public Expr { public: /*! * \brief The destructor */ ~Call(); /*! * \brief The constructor * \param op The operator will be invoked. * \param args The arguments of the call. * \param attrs The attributes of the call node. * \param type_args The type arguments passed to a polymorphic function. * \param span The source span of the expression. */ TVM_DLL Call(Expr op, Array<Expr> args, Attrs attrs = Attrs(), Array<Type> type_args = Array<Type>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Call, RelayExpr, CallNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(CallNode); }; /*! * \brief Returns \p call with the given properties. A null property denotes 'no change'. * Returns \p call if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Call WithFields(Call call, Optional<Expr> opt_op = Optional<Expr>(), Optional<Array<Expr>> opt_args = Optional<Array<Expr>>(), Optional<Attrs> opt_attrs = Optional<Attrs>(), Optional<Array<Type>> opt_type_args = Optional<Array<Type>>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! * \brief Let binding that binds a local var and optionally a type annotation. * * \note Let is useful to transform the program to be A-normal form. * where each of the expression corresponds to a let binding. * * For developers who are familar with the computational graph. * Each of the let can be viewed as a operator node in the computational graph. * Traversing the list of let bindings is similar to running * PostDFS-order(topo-order) traversal on the computational graph. */ class Let; /*! \brief A binding of a sub-network. */ class LetNode : public ExprNode { protected: // LetNode uses own deleter to indirectly call non-recursive destructor Object::FDeleter saved_deleter_; static void Deleter_(Object* ptr); public: /*! \brief The variable we bind to */ Var var; /*! \brief The value we bind var to */ Expr value; /*! \brief The body of the let binding */ Expr body; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("var", &var); v->Visit("value", &value); v->Visit("body", &body); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const LetNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal.DefEqual(var, other->var) && equal(value, other->value) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce.DefHash(var); hash_reduce(value); hash_reduce(body); } static constexpr const char* _type_key = "relay.Let"; TVM_DECLARE_FINAL_OBJECT_INFO(LetNode, ExprNode); template <typename> friend class runtime::ObjAllocatorBase; friend class Let; }; class Let : public Expr { public: /*! * \brief The destructor */ ~Let(); /*! * \brief The constructor * \param var The variable that is bound to. * \param value The value used to bind to the variable. * \param body The body of the let binding. * \param span The source span of the expression. */ TVM_DLL Let(Var var, Expr value, Expr body, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Let, RelayExpr, LetNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(LetNode); }; /*! * \brief Returns \p let with the given properties. A null property denotes 'no change'. * Returns \p let if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Let WithFields(Let let, Optional<Var> opt_var = Optional<Var>(), Optional<Expr> opt_value = Optional<Expr>(), Optional<Expr> opt_body = Optional<Expr>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! * \brief Condition expression * * Unlike traditional statement `if`s, the if evalutes * to the result of the branch taken. * * let x = if (true) { 1 } else { 0 }; // x is 1 * let y = if (false) { 1 } else { 0 }; // y is 0 * * \note This is similar to C's ternary operator. */ class If; /*! \brief container of If */ class IfNode : public ExprNode { public: /*! \brief The condition */ Expr cond; /*! \brief The expression evaluated when condition is true. */ Expr true_branch; /*! \brief The expression evaluated when condition is false */ Expr false_branch; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("cond", &cond); v->Visit("true_branch", &true_branch); v->Visit("false_branch", &false_branch); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const IfNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(cond, other->cond) && equal(true_branch, other->true_branch) && equal(false_branch, other->false_branch); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(cond); hash_reduce(true_branch); hash_reduce(false_branch); } static constexpr const char* _type_key = "relay.If"; TVM_DECLARE_FINAL_OBJECT_INFO(IfNode, ExprNode); }; class If : public Expr { public: /*! * \brief The constructor * \param cond The condition of a if node. * \param true_branch The fall through branch * \param false_branch The branch for execution when condition is false. * \param span The source span of the expression. */ TVM_DLL If(Expr cond, Expr true_branch, Expr false_branch, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(If, RelayExpr, IfNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(IfNode); }; /*! * \brief Returns \p if_expr with the given properties. A null property denotes 'no change'. * Returns \p if_expr if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ If WithFields(If if_expr, Optional<Expr> opt_cond = Optional<Expr>(), Optional<Expr> opt_true_branch = Optional<Expr>(), Optional<Expr> opt_false_branch = Optional<Expr>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! \brief Get index-th field out of a tuple. */ class TupleGetItem; class TupleGetItemNode : public ExprNode { public: /*! \brief The tuple Expression */ Expr tuple; /*! \brief which value to get */ int index; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("tuple_value", &tuple); v->Visit("index", &index); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const TupleGetItemNode* other, SEqualReducer equal) const { return equal(tuple, other->tuple) && equal(index, other->index); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(tuple); hash_reduce(index); } static constexpr const char* _type_key = "relay.TupleGetItem"; TVM_DECLARE_FINAL_OBJECT_INFO(TupleGetItemNode, ExprNode); }; class TupleGetItem : public Expr { public: /*! * \brief The constructor * \param tuple The tuple to get an element from. * \param index The index for extracting a value in the tuple. * \param span The source span of the expression. */ TVM_DLL TupleGetItem(Expr tuple, int index, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(TupleGetItem, RelayExpr, TupleGetItemNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(TupleGetItemNode); }; /*! * \brief Returns \p tuple_get_item with the given properties. A null property denotes 'no change'. * Returns \p tuple_get_item if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ TupleGetItem WithFields(TupleGetItem tuple_get_item, Optional<Expr> opt_tuple = Optional<Expr>(), Optional<Integer> opt_index = Optional<Integer>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! \brief Create a new Reference out of initial value. */ class RefCreate; class RefCreateNode : public ExprNode { public: /*! \brief The initial value of the Reference. */ Expr value; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("value", &value); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const RefCreateNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(value); } static constexpr const char* _type_key = "relay.RefCreate"; TVM_DECLARE_FINAL_OBJECT_INFO(RefCreateNode, ExprNode); }; class RefCreate : public Expr { public: /*! * \brief The constructor * \param value The initial value of the reference. * \param span The source span of the expression. */ TVM_DLL explicit RefCreate(Expr value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(RefCreate, RelayExpr, RefCreateNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(RefCreateNode); }; /*! * \brief Returns \p ref_create with the given properties. A null property denotes 'no change'. * Returns \p ref_crete if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ RefCreate WithFields(RefCreate ref_create, Optional<Expr> opt_value = Optional<Expr>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! \brief Get value out of Reference. */ class RefRead; class RefReadNode : public ExprNode { public: /*! \brief The Reference Expression. */ Expr ref; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("ref", &ref); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const RefReadNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(ref, other->ref); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(ref); } static constexpr const char* _type_key = "relay.RefRead"; TVM_DECLARE_FINAL_OBJECT_INFO(RefReadNode, ExprNode); }; class RefRead : public Expr { public: /*! * \brief The constructor * \param ref The reference where to read data. * \param span The source span of the expression. */ TVM_DLL explicit RefRead(Expr ref, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(RefRead, RelayExpr, RefReadNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(RefReadNode); }; /*! * \brief Returns \p ref_read with the given properties. A null property denotes 'no change'. * Returns \p ref_read if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ RefRead WithFields(RefRead ref_read, Optional<Expr> opt_ref = Optional<Expr>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! \brief Set value of Reference. The whole expression evaluates to an Empty Tuple. */ class RefWrite; class RefWriteNode : public ExprNode { public: /*! \brief The Reference Expression. */ Expr ref; /*! \brief The value to write into. */ Expr value; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("ref", &ref); v->Visit("value", &value); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const RefWriteNode* other, SEqualReducer equal) const { equal->MarkGraphNode(); return equal(ref, other->ref) && equal(value, other->value); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce(ref); hash_reduce(value); } static constexpr const char* _type_key = "relay.RefWrite"; TVM_DECLARE_FINAL_OBJECT_INFO(RefWriteNode, ExprNode); }; class RefWrite : public Expr { public: /*! * \brief The constructor * \param ref The reference where data is write to. * \param value The value to write. * \param span The source span of the expression. */ TVM_DLL RefWrite(Expr ref, Expr value, Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(RefWrite, RelayExpr, RefWriteNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(RefWriteNode); }; /*! * \brief Returns \p ref_write with the given properties. A null property denotes 'no change'. * Returns \p ref_write if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ RefWrite WithFields(RefWrite ref_write, Optional<Expr> opt_ref = Optional<Expr>(), Optional<Expr> opt_value = Optional<Expr>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /*! * \brief Base class of the temporary expression. * * TempExprs are pass specific expression that can be * useful to define intermediate result in the * rewriting pass such as layout or type transformation. * * Subclass TempExprNode allows us to pattern match on * specific kind of TempExpr and use them for expression rewriting. * * TempExpr should only be used within a pass, */ class TempExprNode : public ExprNode { public: /*! \brief virtual destructor */ virtual ~TempExprNode() {} /*! * \brief Convert the expression to a normal(non-temp) Expr. * \return The corresponding normal(non-temp) expression. */ virtual Expr Realize() const = 0; static constexpr const char* _type_key = "relay.TempExpr"; static constexpr const bool _type_has_method_sequal_reduce = false; static constexpr const bool _type_has_method_shash_reduce = false; static constexpr const uint32_t _type_child_slots = 0; TVM_DECLARE_BASE_OBJECT_INFO(TempExprNode, ExprNode); }; class TempExpr : public Expr { public: TVM_DEFINE_OBJECT_REF_METHODS(TempExpr, RelayExpr, TempExprNode); }; } // namespace relay namespace runtime { template <> template <> inline ObjectPtr<relay::LetNode> ObjAllocatorBase<SimpleObjAllocator>::make_object<relay::LetNode>() { using Derived = SimpleObjAllocator; using T = relay::LetNode; using Handler = typename Derived::template Handler<T>; static_assert(std::is_base_of<Object, T>::value, "make can only be used to create Object"); T* ptr = Handler::New(static_cast<Derived*>(this)); ptr->type_index_ = T::RuntimeTypeIndex(); ptr->saved_deleter_ = Handler::Deleter(); ptr->deleter_ = relay::LetNode::Deleter_; return ObjectPtr<T>(ptr); } template <> template <> inline ObjectPtr<relay::CallNode> ObjAllocatorBase<SimpleObjAllocator>::make_object<relay::CallNode>() { using Derived = SimpleObjAllocator; using T = relay::CallNode; using Handler = typename Derived::template Handler<T>; static_assert(std::is_base_of<Object, T>::value, "make can only be used to create Object"); T* ptr = Handler::New(static_cast<Derived*>(this)); ptr->type_index_ = T::RuntimeTypeIndex(); ptr->saved_deleter_ = Handler::Deleter(); ptr->deleter_ = relay::CallNode::Deleter_; return ObjectPtr<T>(ptr); } } // namespace runtime } // namespace tvm #endif // TVM_RELAY_EXPR_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/expr_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/expr_functor.h * \brief A more powerful visitor which enables defining arbitrary function * signatures with type based dispatch on first argument. */ #ifndef TVM_RELAY_EXPR_FUNCTOR_H_ #define TVM_RELAY_EXPR_FUNCTOR_H_ #include <tvm/ir/error.h> #include <tvm/node/functor.h> #include <tvm/relay/adt.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/relay/op.h> #include <deque> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace relay { /*! * \brief A dynamical functor that dispatches on in the first Expr argument. * You can use this as a more powerful Visitor, since it allows you to * define function signatures of Visit Function. * * \sa tvm/ir_functor.h * * \tparam FType function signiture * This type is only defined for FType with function signature R(const Expr&, * Args...) */ template <typename FType> class ExprFunctor; // functions to be overriden. #define EXPR_FUNCTOR_DEFAULT \ { return VisitExprDefault_(op, std::forward<Args>(args)...); } #define RELAY_EXPR_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitExpr_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class ExprFunctor<R(const Expr& n, Args...)> { private: using TSelf = ExprFunctor<R(const Expr& n, Args...)>; using FType = tvm::NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~ExprFunctor() {} /*! * \brief Same as call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ R operator()(const Expr& n, Args... args) { return VisitExpr(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitExpr(const Expr& n, Args... args) { ICHECK(n.defined()) << "Found null pointer node while traversing AST. The previous pass may " "have generated invalid data."; static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitExpr_(const ConstantNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const TupleNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const VarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const GlobalVarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const FunctionNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const CallNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LetNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const IfNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const OpNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const TupleGetItemNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const RefCreateNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const RefReadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const RefWriteNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const ConstructorNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const MatchNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExprDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); throw; } private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch RELAY_EXPR_FUNCTOR_DISPATCH(ConstantNode); RELAY_EXPR_FUNCTOR_DISPATCH(TupleNode); RELAY_EXPR_FUNCTOR_DISPATCH(VarNode); RELAY_EXPR_FUNCTOR_DISPATCH(GlobalVarNode); RELAY_EXPR_FUNCTOR_DISPATCH(FunctionNode); RELAY_EXPR_FUNCTOR_DISPATCH(CallNode); RELAY_EXPR_FUNCTOR_DISPATCH(LetNode); RELAY_EXPR_FUNCTOR_DISPATCH(IfNode); RELAY_EXPR_FUNCTOR_DISPATCH(OpNode); RELAY_EXPR_FUNCTOR_DISPATCH(TupleGetItemNode); RELAY_EXPR_FUNCTOR_DISPATCH(RefCreateNode); RELAY_EXPR_FUNCTOR_DISPATCH(RefReadNode); RELAY_EXPR_FUNCTOR_DISPATCH(RefWriteNode); RELAY_EXPR_FUNCTOR_DISPATCH(ConstructorNode); RELAY_EXPR_FUNCTOR_DISPATCH(MatchNode); return vtable; } }; /*! * \brief A simple visitor wrapper around ExprFunctor. * Recursively visit the content. * * ExprVisitor treats Expr as dataflow graph, * and only visit each Expr node once. */ class ExprVisitor : public ::tvm::relay::ExprFunctor<void(const Expr& n)> { public: void VisitExpr(const Expr& expr) override; void VisitExpr_(const VarNode* op) override; void VisitExpr_(const GlobalVarNode* op) override; void VisitExpr_(const ConstantNode* op) override; void VisitExpr_(const TupleNode* op) override; void VisitExpr_(const FunctionNode* op) override; void VisitExpr_(const CallNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const IfNode* op) override; void VisitExpr_(const OpNode* op) override; void VisitExpr_(const TupleGetItemNode* op) override; void VisitExpr_(const RefCreateNode* op) override; void VisitExpr_(const RefReadNode* op) override; void VisitExpr_(const RefWriteNode* op) override; void VisitExpr_(const ConstructorNode* op) override; void VisitExpr_(const MatchNode* op) override; virtual void VisitType(const Type& t); virtual void VisitClause(const Clause& c); virtual void VisitPattern(const Pattern& c); virtual void VisitSpan(const Span& span); protected: // Internal visiting counter std::unordered_map<const Object*, size_t> visit_counter_; }; /*! * \brief A wrapper around ExprFunctor which functionally updates the AST. * * ExprMutator treats Expr as dataflow graph, and only Mutate each Expr once. * The mutated results are memoized in a map and reused so that * local transformation on the dataflow preserves the graph structure. */ class ExprMutator : public ::tvm::relay::ExprFunctor<Expr(const Expr&)> { public: /*! * \brief Mutate is alias for VisitExpr * \return expr. */ Expr Mutate(const Expr& expr) { return this->VisitExpr(expr); } Expr VisitExpr(const Expr& expr) override; Expr VisitExpr_(const VarNode* op) override; Expr VisitExpr_(const ConstantNode* op) override; Expr VisitExpr_(const GlobalVarNode* op) override; Expr VisitExpr_(const OpNode* op) override; Expr VisitExpr_(const TupleNode* op) override; Expr VisitExpr_(const FunctionNode* op) override; Expr VisitExpr_(const CallNode* call_node) override; Expr VisitExpr_(const LetNode* op) override; Expr VisitExpr_(const IfNode* op) override; Expr VisitExpr_(const TupleGetItemNode* op) override; Expr VisitExpr_(const RefCreateNode* op) override; Expr VisitExpr_(const RefReadNode* op) override; Expr VisitExpr_(const RefWriteNode* op) override; Expr VisitExpr_(const ConstructorNode* op) override; Expr VisitExpr_(const MatchNode* op) override; /*! * \brief Used to visit the types inside of expressions. * * Can be overloaded to transform the types in arbitrary * ways, one way would be to define a sub-class of type * visitor for types which transform them appropriately. */ virtual Type VisitType(const Type& t); virtual Clause VisitClause(const Clause& c); virtual Pattern VisitPattern(const Pattern& c); protected: /*! \brief Internal map used for memoization. */ std::unordered_map<Expr, Expr, ObjectPtrHash, ObjectPtrEqual> memo_; }; /*! * \brief A wrapper around ExprVisitor which traverses the Dataflow Normal AST. * * MixedModeVisitor treats Expr as dataflow graph, and visits in post-DFS order * * MixedModeVisitor provides the same recursive API as ExprVisitor, and uses * recursion to traverse most forms of the IR, but under the hood it expands nested dataflow regions * of the graph and processes them iteratively to prevent stack overflows */ class MixedModeVisitor : public ::tvm::relay::ExprVisitor { public: using ::tvm::relay::ExprFunctor<void(const Expr& n)>::VisitExpr_; /*! \brief The constructor of MixedModeVisitor * \param visit_limit The number of times to allow visitation to a node. Usually 1, ocassionally * higher (i.e., 2 for dead code elimiation), limited to 10 as a sanity check. */ explicit MixedModeVisitor(int visit_limit = 1); using ExprVisitor::VisitExpr_; /*! * \brief VisitExpr is finalized to preserve call expansion of dataflow regions */ void VisitExpr(const Expr& expr) final; void VisitExpr_(const CallNode* op) override; void VisitExpr_(const TupleNode* op) override; void VisitExpr_(const TupleGetItemNode* op) override; protected: /*! * \brief A function to apply when reaching a leaf of the graph non-recursively */ virtual void VisitLeaf(const Expr& expr); /*! * \brief A function to determine if an expression has already been visited or needs to be * re-visited */ virtual bool CheckVisited(const Expr& expr); /*! * \brief The max number of times to visit a node */ size_t visit_limit_; }; /*! \brief Non-recursive DFS Graph Traversal for Custom Rewriting Passes * * MixedModeMutator treats Expr as dataflow graph, and only Rewrites each Expr once. * The mutated results are memoized in a map and reused so that * local transformation on the dataflow preserves the graph structure. * * MixedModeMutator provides the same recursive API as ExprMutator, and uses * recursion to traverse most forms of the IR, but under the hood it expands nested dataflow regions * of the graph and processes them iteratatively to prevent stack overflows * * Uses Rewrite_ API of ExprRewriter for a cleaner split between recrusive and non-recursive * behavior. */ class MixedModeMutator : public ::tvm::relay::ExprMutator { public: using ::tvm::relay::ExprFunctor<Expr(const Expr&)>::VisitExpr_; MixedModeMutator(bool pre = false) : pre_{pre} {}; Expr VisitExpr(const Expr& expr) final; virtual Expr DispatchVisitExpr(const Expr& expr); Expr VisitExpr_(const TupleNode* op) final { return Rewrite(op); }; Expr VisitExpr_(const CallNode* call_node) final { return Rewrite(call_node); }; Expr VisitExpr_(const TupleGetItemNode* op) final { return Rewrite(op); }; /*! * \brief Users should override Rewrite_ methods to implement their pass. Rewrite_ functions will * be able to rewrite the op only with data about the original node `pre` and the same node with * modified inputs `post` and should not recurse. * * \param pre The expression node before rewriting. * \param post The expression with rewritten inputs. */ virtual Expr Rewrite_(const TupleNode* pre, const Expr& post) { return post; } virtual Expr Rewrite_(const CallNode* pre, const Expr& post) { return post; } virtual Expr Rewrite_(const TupleGetItemNode* pre, const Expr& post) { return post; } protected: bool pre_; /*! \brief Implement Rewrite API by calling ExprMutator's VisitExpr_(op) to get a `post` node with * changed inputs. */ template <typename T> Expr Rewrite(const T* op) { Expr post = ExprMutator::VisitExpr_(op); return Rewrite_(op, post); } virtual void VisitLeaf(const Expr& expr); virtual bool CheckVisited(const Expr& expr); }; #define RELAY_EXPR_REWRITER_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, const Expr& post) { \ return self->Rewrite_(static_cast<const OP*>(n.get()), post); \ }); #define EXPR_REWRITER_REWRITE_DEFAULT \ { return post; } /*! \brief A non-iterating Expression Rewriter * * ExprRewriter provides a Rewrite interface for modifying graphs in Post-DFS order. * * The expectation is that ExprRewriter objects will be passed to PostOrderRewrite, which will * non-recursively unroll the graph and call Rewriting on inputs. It will then pass the original * node, called `pre`, and a node recreated with any alterned inputs, called `post`, to the * ExprRewriter. The ExprRewriter can then use the information in those two nodes to do more complex * graph rewriting. */ class ExprRewriter { private: using TSelf = ExprRewriter; using FType = tvm::NodeFunctor<Expr(const ObjectRef& n, TSelf* self, const Expr& post)>; public: /*! \brief virtual destructor */ virtual ~ExprRewriter() {} /*! * \brief Same as call. * \param pre The expression node before rewriting. * \param post The expression node with rewritten inputs. * \return The result of the call */ Expr operator()(const Expr& pre, const Expr& post) { return Rewrite(pre, post); } /*! * \brief The functor call. * \param pre The expression node before rewriting. * \param post The expression node with rewritten inputs. * \return The result of the call */ virtual Expr Rewrite(const Expr& pre, const Expr& post) { ICHECK(pre.defined()); static FType vtable = InitVTable(); return vtable(pre, this, post); } // Functions that can be overriden by subclass, should not recurse virtual Expr Rewrite_(const VarNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const GlobalVarNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const ConstantNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const TupleNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const FunctionNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const CallNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const LetNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const IfNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const OpNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const TupleGetItemNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const RefCreateNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const RefReadNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const RefWriteNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const ConstructorNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; virtual Expr Rewrite_(const MatchNode* pre, const Expr& post) EXPR_REWRITER_REWRITE_DEFAULT; private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch RELAY_EXPR_REWRITER_DISPATCH(ConstantNode); RELAY_EXPR_REWRITER_DISPATCH(TupleNode); RELAY_EXPR_REWRITER_DISPATCH(VarNode); RELAY_EXPR_REWRITER_DISPATCH(GlobalVarNode); RELAY_EXPR_REWRITER_DISPATCH(FunctionNode); RELAY_EXPR_REWRITER_DISPATCH(CallNode); RELAY_EXPR_REWRITER_DISPATCH(LetNode); RELAY_EXPR_REWRITER_DISPATCH(IfNode); RELAY_EXPR_REWRITER_DISPATCH(OpNode); RELAY_EXPR_REWRITER_DISPATCH(TupleGetItemNode); RELAY_EXPR_REWRITER_DISPATCH(RefCreateNode); RELAY_EXPR_REWRITER_DISPATCH(RefReadNode); RELAY_EXPR_REWRITER_DISPATCH(RefWriteNode); RELAY_EXPR_REWRITER_DISPATCH(ConstructorNode); RELAY_EXPR_REWRITER_DISPATCH(MatchNode); return vtable; } }; /*! \brief Non-recursive DFS Graph Traversal for Custom Rewriting Passes * * PostOrderRewrite does a non-recursive traversal of the graph in Post-DFS order and calls the * ExprRewriter's Rewrite functions on nodes once their inputs are rewritten. At each rewrite call, * PostOrderRewrite provides the original node and the node with altered inputs for use by the * ExprRewriter. */ Expr PostOrderRewrite(const Expr& expr, ExprRewriter* rewriter); /*! * \brief recursively visit the ir in post DFS order node, apply fvisit * Each node is guaranteed to be visited only once. * \param node The ir to be visited. * \param fvisit The visitor function to be applied. */ void PostOrderVisit(const Expr& node, std::function<void(const Expr&)> fvisit); /*! * \brief A struct to keep info of traversed expr in ExpandDataflow function */ struct v_info { explicit v_info(Expr node_) : node{node_} {} v_info(Expr node_, bool children_expanded_) : node{node_}, children_expanded{children_expanded_} {}; Expr node{}; bool children_expanded{false}; }; /*! * \brief A function to iteratively traverse dataflow regions of a graph * * ExpandDataflow manually manages a stack and performs DFS to determine the processing * order of nodes in an input graph. * * By default fexpand_expr implemented in a way that if it finds a dataflow node (Call, Tuple, * TupleGetItem), it checks if the arguments to that node need to be processed via fcheck_visited. * If so, the function pushes those arguments to the stack and continues iteratively to process * the top of the stack. When it finds a node that doesn't match the dataflow types, or a node who's * inputs have all been processed, it visits the current leaf via fvisit_leaf. * * This function should be used internally to other classes to implement mixed-mode traversals. The * expectation is that fvisit_leaf will perform recursive analysis within mixed-mode traversal if it * hits a non-dataflow node. * * fcheck_visited, fvisit_leaf and fexpand_expr are templated to encourage reusing. */ template <typename FCheckVisited, typename FVisitLeaf, typename FExpandExpr> void ExpandDataflow(Expr expr, FCheckVisited fcheck_visited, FVisitLeaf fvisit_leaf, FExpandExpr fexpand_expr) { std::deque<v_info> stack; auto fpush_to_stack = [&fcheck_visited, &stack](const Expr& expr) { if (!fcheck_visited(expr)) { stack.emplace_front(v_info(expr)); } }; fpush_to_stack(expr); while (stack.size() > 0) { v_info* front = &stack.front(); if (fcheck_visited(front->node)) { stack.pop_front(); } else if (front->children_expanded) { fvisit_leaf(front->node); // TODO(d-smirnov): this is for compatibility with current implementation of MixedModeVisitor stack.pop_front(); } else { front->children_expanded = true; for (auto e : fexpand_expr(front->node)) { fpush_to_stack(e); } } } } template <typename FCheckVisited, typename FVisitLeaf> void ExpandDataflow(Expr expr, FCheckVisited fcheck_visited, FVisitLeaf fvisit_leaf) { auto fexpand_expr = [](const Expr& expr) { std::vector<Expr> result; if (const CallNode* op = expr.as<CallNode>()) { if (op->op == Op::Get("call_lowered")) { // Ignore the intermediate tuple since this is purely a calling-convention detail const auto* tuple_args = op->args[1].as<TupleNode>(); ICHECK(tuple_args) << "Expected second arg to call_lowered to be a Tuple of input arguments."; for (auto it = tuple_args->fields.rbegin(); it != tuple_args->fields.rend(); ++it) { result.push_back(*it); } result.push_back(op->args[0]); } else { for (auto it = op->args.rbegin(); it != op->args.rend(); ++it) { result.push_back(*it); } } result.push_back(op->op); } else if (const TupleNode* op = expr.as<TupleNode>()) { for (auto it = op->fields.rbegin(); it != op->fields.rend(); ++it) { result.push_back(*it); } } else if (const TupleGetItemNode* op = expr.as<TupleGetItemNode>()) { result.push_back(op->tuple); } return result; }; ExpandDataflow(expr, fcheck_visited, fvisit_leaf, fexpand_expr); } void ExpandANormalForm(const LetNode* op, std::function<void(const LetNode*)> pre_visit, std::function<void(const LetNode*)> post_visit); } // namespace relay } // namespace tvm #endif // TVM_RELAY_EXPR_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/feature.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/feature.h * \brief Detect features used in Expr/Module. */ #ifndef TVM_RELAY_FEATURE_H_ #define TVM_RELAY_FEATURE_H_ #include <tvm/ir/module.h> #include <tvm/relay/expr.h> #include <bitset> #include <string> namespace tvm { namespace relay { /*! \brief Different kinds of relay feature a program might use. */ enum Feature : int { fVar = 0, fGlobalVar = 1, fConstant = 2, fTuple = 3, fTupleGetItem = 4, fFunction = 5, fOp = 6, fCall = 7, fLet = 8, fIf = 9, fRefCreate = 10, fRefRead = 11, fRefWrite = 12, fConstructor = 13, fMatch = 14, /*! \brief Whether any non-atom fragment of the program is shared, making the program a graph. */ fGraph = 15, /*! \brief Whether there is local fixpoint in the program. */ fLetRec = 16 }; constexpr size_t feature_count = 17; /*! * \brief A finite set of Feature. */ class FeatureSet { public: FeatureSet(const FeatureSet&) = default; /*! \brief A singleton set containing a single Feature. */ explicit FeatureSet(Feature ft) { bs_.set(static_cast<size_t>(ft)); } explicit FeatureSet(const tvm::Array<tvm::Integer>& ft) { for (Integer i : ft) { *this += Feature(i.IntValue()); } } explicit operator Array<Integer>() const { Array<Integer> ret; for (size_t i = 0; i < feature_count; ++i) { if (bs_[i]) { ret.push_back(Integer(i)); } } return ret; } /*! \brief A set that contain all the Feature. */ static FeatureSet All() { FeatureSet fs; fs.bs_.flip(); return fs; } /*! \brief The empty set. Contain no Feature. */ static FeatureSet No() { FeatureSet fs; return fs; } template <typename T> FeatureSet& operator+=(const T& rhs) { bs_ |= FeatureSet(rhs).bs_; return *this; } /*! \brief Set union. */ template <typename T> FeatureSet operator+(const T& rhs) const { FeatureSet fs(*this); fs += rhs; return fs; } template <typename T> FeatureSet& operator-=(const T& rhs) { bs_ &= ~(FeatureSet(rhs)).bs_; return *this; } /*! \brief Set difference. */ template <typename T> FeatureSet operator-(const T& rhs) const { FeatureSet fs(*this); fs -= rhs; return fs; } /*! * \brief Is this a subset of rhs? * * \param rhs another FeatureSet. * * \return true only if this is a subset of rhs. */ bool is_subset_of(const FeatureSet& rhs) const { return ((*this) - rhs).bs_.none(); } /*! * \brief return a string representation. */ std::string ToString() const; private: std::bitset<feature_count> bs_; FeatureSet() = default; explicit FeatureSet(const std::bitset<feature_count>& bs) : bs_(bs) {} }; /*! * \brief Calculate the feature of the program. * * \param expr The expression. * * \return The FeatureSet. */ FeatureSet DetectFeature(const RelayExpr& expr); /*! * \brief Calculate the feature of the program. * * \param mod The module. * * \return The FeatureSet. */ FeatureSet DetectFeature(const IRModule& mod); /*! * \brief Calculate the feature of the program. * * \param expr The expression. * \param mod The module. * * \return The FeatureSet. */ inline FeatureSet DetectFeature(const Expr& expr, const IRModule& mod) { return DetectFeature(expr) + DetectFeature(mod); } /*! * \brief Check the feature of the program. * * \param expr The expression. * \param fs The feature set of the program. */ void CheckFeature(const RelayExpr& expr, const FeatureSet& fs); /*! * \brief Check the feature of the program. * * \param mod The module. * \param fs The feature set of the program. */ void CheckFeature(const IRModule& mod, const FeatureSet& fs); /*! * \brief Check the feature of the program. * * \param expr The expression. * \param mod The module. * \param fs The feature set of the program. */ inline void CheckFeature(const RelayExpr& expr, const IRModule& mod, const FeatureSet& fs) { CheckFeature(expr, fs); CheckFeature(mod, fs); } } // namespace relay } // namespace tvm #endif // TVM_RELAY_FEATURE_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/function.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/function.h * \brief Relay Function. */ #ifndef TVM_RELAY_FUNCTION_H_ #define TVM_RELAY_FUNCTION_H_ #include <tvm/ir/function.h> #include <tvm/relay/expr.h> #include <string> namespace tvm { namespace relay { /*! * \brief Relay Function container * \sa Function */ class FunctionNode : public BaseFuncNode { public: /*! \brief Function parameters */ tvm::Array<Var> params; /*! * \brief * The expression which represents the computation of the function, * the expression may reference the parameters, and the type of it * or sub-expressions may reference the type variables. */ Expr body; /*! \brief User annotated return type of the function. */ Type ret_type; /*! * \brief Type parameters of the function. * Enables the function to vary its type based on these. * This corresponds to template paramaters in c++'s terminology. * * \note This can be usually empty for non-polymorphic functions. */ tvm::Array<TypeVar> type_params; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("params", &params); v->Visit("body", &body); v->Visit("ret_type", &ret_type); v->Visit("type_params", &type_params); v->Visit("attrs", &attrs); v->Visit("virtual_device_", &virtual_device_); v->Visit("span", &span); v->Visit("_checked_type_", &checked_type_); } bool SEqualReduce(const FunctionNode* other, SEqualReducer equal) const { // Important to make def equal first. equal->MarkGraphNode(); return equal.DefEqual(params, other->params) && equal.DefEqual(type_params, other->type_params) && equal(ret_type, other->ret_type) && equal(attrs, other->attrs) && equal(body, other->body); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce->MarkGraphNode(); hash_reduce.DefHash(params); hash_reduce.DefHash(type_params); hash_reduce(ret_type); hash_reduce(attrs); hash_reduce(body); } /*! * \brief Return the derived function annotation of this expression. * * \return The function type annotation. * \note The function type annotation can contain IncompleteType. */ TVM_DLL FuncType func_type_annotation() const; static constexpr const char* _type_key = "relay.Function"; TVM_DECLARE_FINAL_OBJECT_INFO(FunctionNode, BaseFuncNode); }; /*! * \brief Managed reference to FunctionNode. * \sa FunctionNode */ class Function : public BaseFunc { public: /*! * \brief Constructor * \param params The parameters of the function. * \param body The body of the function. * \param ret_type The return type of the function. * \param ty_params The type parameters. * \param attrs Additional function attributes. * \param span The span of the function. */ TVM_DLL Function(tvm::Array<Var> params, Expr body, Type ret_type, tvm::Array<TypeVar> ty_params, tvm::DictAttrs attrs = NullValue<DictAttrs>(), Span span = Span()); TVM_DEFINE_OBJECT_REF_METHODS(Function, BaseFunc, FunctionNode); TVM_DEFINE_OBJECT_REF_COW_METHOD(FunctionNode); }; /*! * \brief Returns \p function with the given properties. A null property denotes 'no change'. * Returns \p function if all properties are unchanged. Otherwise, returns a copy with the new * fields. */ Function WithFields(Function function, Optional<Array<Var>> opt_params = Optional<Array<Var>>(), Optional<Expr> opt_body = Optional<Expr>(), Optional<Type> opt_ret_type = Optional<Type>(), Optional<Array<TypeVar>> opt_ty_params = Optional<Array<TypeVar>>(), Optional<DictAttrs> opt_attrs = Optional<DictAttrs>(), Optional<VirtualDevice> opt_virtual_device = Optional<VirtualDevice>(), Optional<Span> opt_span = Optional<Span>()); /* * \brief Returns the Relay FunctionNode represented by base_func if it should be optimized, * otherwise returns nullptr. * * This means returns nullptr: * - For PrimFuncs, since not Relay Functions. * - For Functions marked for external compilation (with "Compiler"). * - For Functions marked as already having an external definition (with "ExternalSymbol"). * - For Functions marked as not to be optimized (with "SkipOptimization"). * * TODO(mbs): Audit all enumerations of IRModule::functions to use this or some family of such. */ const FunctionNode* AsOptimizableFunctionNode(const BaseFunc& base_func); /*! * \brief namespace of the attributes that can be attached to a relay::Function. */ namespace attr { /*! * \brief Mark the function as representing a sub-graph which is to be lowered or compiled as * a unit. For example, the function may represent a kernel which TVM will lower to a PrimFunc. * If present should be bound to \p Integer(1). May be accompanied by "Compiler", see below. * The function body should be considered opaque by Relay, and many passes simply ignore these * functions. * * Type: Integer */ constexpr const char* kPrimitive = "Primitive"; /*! * \brief Mark the function as externally implemented, ie bound in a runtime::Module within the * IRModule's "external_mods" attribute. If present should be bound to \p Integer(1). Generally * the only attribute when present. * * Type: Integer */ constexpr const char* kExtern = "Extern"; /*! * \brief Indicates the name of the external codegen 'compiler' that should be used to lower * or compile the function other than TVM's default lowering pipeline. The name may correspond * to a TargetKind name. There may be a global function registered under 'relay.ext.{name}'. * * Type: String */ constexpr const char* kCompiler = "Compiler"; /*! \brief Indicate if the function is a closure. */ constexpr const char* kClosure = "Closure"; /*! \brief Store a Var to parameter/Constant mapping on a Function. */ constexpr const char* kParams = "__params__"; /*! \brief Mark if the function should be avoided being optimized. */ constexpr const char* kSkipOptimization = "SkipOptimization"; /*! \brief Treat the function as a composite operator. */ constexpr const char* kComposite = "Composite"; /*! \brief Mark the function to be inlined. */ constexpr const char* kInline = "Inline"; /*! \brief Indicate the function was created by the Pattern Partitioning Pass. */ constexpr const char* kPartitionedFromPattern = "PartitionedFromPattern"; /*! \brief Mark the function as only composed of reshape operations. */ constexpr const char* kReshapeOnly = "relay.reshape_only"; } // namespace attr } // namespace relay } // namespace tvm #endif // TVM_RELAY_FUNCTION_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/interpreter.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/interpreter.h * \brief An interpreter for Relay. * * This file implements a simple reference interpreter for Relay programs. * Given a Relay module, and a Relay expression it produces a value. * * The interpreter's values are a naive representation of the values that * can be produced by a Relay program and are exposed via TVM's object * protocol to Python for introspection and debugging. * * The interpreter's intent is to serve as a reference semantics for the Relay IR, * as well as for debugging and testing. */ #ifndef TVM_RELAY_INTERPRETER_H_ #define TVM_RELAY_INTERPRETER_H_ #include <tvm/ir/module.h> #include <tvm/relay/expr.h> #include <tvm/runtime/container/closure.h> #include <tvm/runtime/object.h> #include <tvm/target/target.h> #include <unordered_set> namespace tvm { namespace relay { /*! \brief The container type of Closures used by the interpreter. */ class InterpreterClosureObj : public runtime::ClosureObj { public: /*! \brief The set of free variables in the closure. * * These are the captured variables which are required for * evaluation when we call the closure. */ tvm::Map<Var, ObjectRef> env; /*! \brief The function which implements the closure. * * \note May reference the variables contained in the env. */ Function func; InterpreterClosureObj() {} void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("env", &env); v->Visit("func", &func); } static constexpr const char* _type_key = "interpreter.Closure"; TVM_DECLARE_FINAL_OBJECT_INFO(InterpreterClosureObj, runtime::ClosureObj); }; class InterpreterClosure : public runtime::Closure { public: TVM_DLL InterpreterClosure(tvm::Map<Var, ObjectRef> env, Function func); TVM_DEFINE_OBJECT_REF_METHODS(InterpreterClosure, runtime::Closure, InterpreterClosureObj); }; /*! \brief The container type of RecClosure. */ class RecClosureObj : public Object { public: /*! \brief The closure. */ InterpreterClosure clos; /*! \brief variable the closure bind to. */ Var bind; RecClosureObj() {} void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("clos", &clos); v->Visit("bind", &bind); } static constexpr const char* _type_key = "interpreter.RecClosure"; TVM_DECLARE_FINAL_OBJECT_INFO(RecClosureObj, Object); }; class RecClosure : public ObjectRef { public: TVM_DLL RecClosure(InterpreterClosure clos, Var bind); TVM_DEFINE_OBJECT_REF_METHODS(RecClosure, ObjectRef, RecClosureObj); }; struct RefValueObj : Object { mutable ObjectRef value; RefValueObj() {} void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("value", &value); } static constexpr const char* _type_key = "relay.RefValue"; TVM_DECLARE_FINAL_OBJECT_INFO(RefValueObj, Object); }; class RefValue : public ObjectRef { public: TVM_DLL RefValue(ObjectRef val); TVM_DEFINE_OBJECT_REF_METHODS(RefValue, ObjectRef, RefValueObj); }; struct ConstructorValueObj : Object { int32_t tag; tvm::Array<ObjectRef> fields; /*! \brief Optional field tracking ADT constructor. */ Constructor constructor; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("tag", &tag); v->Visit("fields", &fields); v->Visit("constructor", &constructor); } static constexpr const char* _type_key = "relay.ConstructorValue"; TVM_DECLARE_FINAL_OBJECT_INFO(ConstructorValueObj, Object); }; class ConstructorValue : public ObjectRef { public: TVM_DLL ConstructorValue(int32_t tag, tvm::Array<ObjectRef> fields, Constructor construtor = {}); TVM_DEFINE_OBJECT_REF_METHODS(ConstructorValue, ObjectRef, ConstructorValueObj); }; /*! * \brief Returns a packed function over Relay expressions which will evaluate \p expr * applied to those arguments, where \p expr is w.r.t. the definitions in \p mod. * * This function is intended to support the Python 'debug' executor. * * The given \p expr should have function type. The given \p mod may be empty or * undefined if \p expr is self-contained. Relay arguments passed to the result * packed function must be constants, references, or constructors/tuples over such. * As much work as possible is done while constructing the result packed function, and * that function may be reasonably efficiently applied multiple times without redoing * unnecessary work. * * Primitives are lowered and compiled to packed functions for execution on \p device * with properties given by \p target. All other Relay constructs are interpreted. * * The interpreter is intended to be a 'reference' implementation of the Relay semantics * for testing and interactive use. It is not intended to be particularly efficient. * * \param mod A module containing definitions which can be referenced from * \p expr. May be empty or undefined. * \param expr An expression of function type to evaluate. May reference definitions from \p mod. * \param device The device on which all primitives will be executed. * \param target The compiler target flag for compiling primitives. * \return A packed function that takes an array of Relay expressions and returns the * result of applying \p expr to those arguments. */ TypedPackedFunc<ObjectRef(Array<Expr>)> EvalFunction(IRModule mod, Expr expr, Device device, Target target); /*! * \brief Evaluates \p expr and returns its result. * * This function is intended to support TVM constant evaluation. * * \param expr An expression to evaluate. * \param type_definitions Global type definitions which \p expr may references. * \param import_set Already imported external modules. * \param device The device on which all primitives will be executed. * \param target The compiler target flag for compiling primitives. * \param attrs Attributes for the expression to be evaluated with * @return The object representing the result. */ ObjectRef Eval(Expr expr, Map<GlobalTypeVar, TypeData> type_definitions, std::unordered_set<String> import_set, Device device, Target target, Map<String, ObjectRef> attrs = {}); } // namespace relay } // namespace tvm #endif // TVM_RELAY_INTERPRETER_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/op.h * \brief Primitive operators(builtin intrinsics). */ #ifndef TVM_RELAY_OP_H_ #define TVM_RELAY_OP_H_ #include <tvm/ir/op.h> #include <tvm/relay/expr.h> #include <tvm/relay/type.h> namespace tvm { namespace relay { using Op = tvm::Op; using OpNode = tvm::OpNode; #define RELAY_REGISTER_OP(OpName) TVM_REGISTER_OP(OpName) } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/op_attr_types.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/op_attr_types.h * \brief The Expr and related elements in DataFlow construction. */ #ifndef TVM_RELAY_OP_ATTR_TYPES_H_ #define TVM_RELAY_OP_ATTR_TYPES_H_ #include <tvm/relay/expr.h> #include <tvm/relay/type.h> #include <tvm/target/generic_func.h> #include <tvm/target/target.h> #include <tvm/te/schedule.h> #include <tvm/te/tensor.h> #include <tvm/tir/data_layout.h> #include <string> namespace tvm { namespace relay { using tir::BijectiveLayoutNode; using tir::Layout; using tir::LayoutAxis; /*! \brief operator pattern used in graph fusion */ enum OpPatternKind { // Elementwise operation kElemWise = 0, // Broadcasting operator, can always map output axis to the input in order. // for example :code:`out[i, ax1, j, ax2] = input[i, j]`. // Note that the axis need to be in order so transpose is not a bcast operator. kBroadcast = 1, // Injective operator, can always injectively map output axis to a single input axis. // All injective operator can still be safely fused to injective and reduction. kInjective = 2, // Communicative reduction operator. kCommReduce = 3, // Complex operation, can still fuse elemwise operations into its output. // but cannot chain another complex op kOutEWiseFusable = 4, // The pattern for tuple nodes. Can fuse into subsequent injective ops, // but treated specially kTuple = 7, // Opaque operation, cannot fuse anything. kOpaque = 8 }; /*! \brief the operator pattern */ using TOpPattern = int; /*! * \brief Whether operator is stateful or contain internal state. * * All the primitive ops we registered so far are pure. * This attribute is left for potential future compatible reasons. * We can always work around the stateful ops by adding an additional * handle argument and return it. */ using TOpIsStateful = bool; /*! * \brief Mark the operator as non-computational. */ using TNonComputational = bool; /*! * \brief Mark the operator as reshape op of its first input * and can be turned into a nop when the first input and output * shares the same piece of memory. */ using TReshapeOp = bool; /*! * \brief Mark the operator whether output shape is data dependent. */ using TShapeDataDependent = Array<Integer>; /*! * \brief Computation description interface. * * \note This function have a special convention * for functions with tuple input/output. * * So far we restrict tuple support to the following case: * - Function which takes a single tuple as input. * - Function which outputs a single tuple. * * In both cases, the tuple is flattened as array. * * \param attrs The attribute of the primitive * \param inputs The input tensors. * \param out_type The output type information & these are always placeholders. * \return The output compute description of the operator. */ using FTVMCompute = runtime::TypedPackedFunc<Array<te::Tensor>( const Attrs& attrs, const Array<te::Tensor>& inputs, const Type& out_type)>; /*! * \brief Build the computation schedule for * op whose root is at current op. * * \param attrs The attribute of the node. * \param outs The output tensors. * \param target The build target. * \return schedule The computation schedule. */ using FTVMSchedule = runtime::TypedPackedFunc<te::Schedule( const Attrs& attrs, const Array<te::Tensor>& outs, const Target& target)>; /*! * \brief Generate the strategy of operators. This function is a generic * function and can be re-defined for different targets. * * The function signature of generic function is: * OpStrategy(const Attrs& attrs, const Array<Tensor>& inputs, * const Type& out_type, const Target& target) */ using FTVMStrategy = GenericFunc; /*! * \brief Alternate the layout of operators or replace the * operator with other expressions. This function will be invoked * in AlterOpLayout pass. * \param attrs The attribute of the original node. * \param args The input symbols of the original node. * \param tinfos An array of placeholders, use for getting the inferred shape * and dtype of the inputs. * \return new_expr The modified expression. */ using FTVMAlterOpLayout = runtime::TypedPackedFunc<Expr(const Attrs& attrs, const Array<Expr>& args, const Array<te::Tensor>& tinfos, const Type& out_type)>; /*! * \brief Convert the layout of operators or replace the * operator with other expressions. This function will be invoked * in ConvertLayout pass. * \param attrs The attribute of the original node. * \param inputs The input symbols of the original node. * \param tinfos An array of placeholders, use for getting the inferred shape * and dtype of the inputs. * \param desired_layouts Specify an array of desired layouts for each input. * For example a conv2d op: Array("NHWC", "OHWI"), this * specifies the desired layout for data then kernel. * \return new_expr The modified expression. */ using FTVMConvertOpLayout = runtime::TypedPackedFunc<Expr( const Attrs& attrs, const Array<Expr>& args, const Array<te::Tensor>& tinfos, const Array<String>& desired_layouts)>; /*! * \brief Legalizes an expression with another expression. This function will be * invoked in Legalize pass. It is a target-dependent pass. * \param attrs The attribute of the original node. * \param args The input symbols of the original node. * \param arg_types An array of placeholders, use for getting the inferred shape * and dtype of the inputs. * \return new_expr The modified expression. */ using FTVMLegalize = runtime::TypedPackedFunc<Expr(const Attrs& attrs, const Array<Expr>& args, const Array<tvm::relay::Type>& arg_types)>; /*! * \brief Annotates an expression to indicate if an op should be compiled using * the given compiler/target. * \param expr The original expr. * \return true if this op should be registered to invoke a specific compiler * for codegen, otherwise, false. */ using FTVMAnnotateTarget = runtime::TypedPackedFunc<bool(const Expr& expr)>; /*! * \brief Forward rewriting rule for a specific op. * * \param ref_call The reference old call type to be rewritten. * We can make use of the op and type information. * \param new_args The new arguments (some of them could be TempExpr). * \param ctx Optional context information about ref_call. * \return The rewriten result call, can also return nullptr, * which indicate the rewriter should use the default fallback * rule that realizes all its input and compose the call. * * \note When we register the function, we can register * a different signature with ctx to be a specific node type. */ using FForwardRewrite = runtime::TypedPackedFunc<Expr( const Call& ref_call, const Array<Expr>& new_args, const ObjectRef& ctx)>; /*! * \brief Gradient for a specific op. * * \param orig_call the original Expr. * \param output_grad the gradient of the Expr. * \return the gradient for each parameters. */ using FPrimalGradient = runtime::TypedPackedFunc<tvm::Array<Expr>(const Expr& orig_call, const Expr& output_grad)>; /*! * \brief The codegeneration strategy for dynamic dimensions. */ enum AnyCodegenStrategy { /*! \brief The default strategy of using completely variable dimensions. */ kVariableDimensions }; /*! \brief A runtime representation of shape. */ using Shape = Array<IndexExpr>; using FShapeFunc = runtime::TypedPackedFunc<Array<te::Tensor>( const Attrs& attrs, const Array<te::Tensor>& inputs, const Array<IndexExpr>& out_ndims)>; } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_ATTR_TYPES_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/op_strategy.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/op_strategy.h * \brief The Relay operator Strategy and related data structure. */ #ifndef TVM_RELAY_OP_STRATEGY_H_ #define TVM_RELAY_OP_STRATEGY_H_ #include <tvm/relay/expr.h> #include <tvm/relay/op_attr_types.h> #include <tvm/target/target.h> #include <tvm/te/schedule.h> #include <tvm/te/tensor.h> #include <string> namespace tvm { namespace relay { /*! * \brief Operator implementation that includes compute and schedule function. */ class OpImplementationNode : public Object { public: /*! \brief Compute function */ FTVMCompute fcompute; /*! \brief Schedule function */ FTVMSchedule fschedule; /*! \brief Name of the implementation */ String name; /*! \brief Priority level */ int plevel; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("name", &name); v->Visit("plevel", &plevel); } static constexpr const char* _type_key = "relay.OpImplementation"; TVM_DECLARE_FINAL_OBJECT_INFO(OpImplementationNode, Object); }; /*! * \brief Operator implementation class. */ class OpImplementation : public ObjectRef { public: /*! * \brief Invoke the operator compute function. * \param attrs The attribute of the primitive * \param inputs The input tensors. * \param out_type The output type information. * \return The output compute description of the operator. */ TVM_DLL Array<te::Tensor> Compute(const Attrs& attrs, const Array<te::Tensor>& inputs, const Type& out_type); /*! * \brief Build the computation schedule. * \param attrs The attribute of the node. * \param outs The output tensors. * \param target The build target. * \return The computation schedule. */ TVM_DLL te::Schedule Schedule(const Attrs& attrs, const Array<te::Tensor>& outs, const Target& target); TVM_DEFINE_OBJECT_REF_METHODS(OpImplementation, ObjectRef, OpImplementationNode); }; /*! * \brief Specialized implementations for operators under certain conditions. */ class OpSpecializationNode : public Object { public: /*! \brief List of implementations. */ Array<OpImplementation> implementations; /*! \brief Condition to enable the specialization. * Could be undefined to represent generic case. */ te::SpecializedCondition condition; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("condition", &condition); v->Visit("implementations", &implementations); } static constexpr const char* _type_key = "relay.OpSpecialization"; TVM_DECLARE_FINAL_OBJECT_INFO(OpSpecializationNode, ExprNode); }; /*! * \brief Operator specialization class. */ class OpSpecialization : public ObjectRef { public: /*! * \brief Add an implementation. * \param fcompute Compute function * \param fschedule Schedule function * \param name Name of the implementation * \param plevel Priority level of the implementation */ TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, String name, int plevel); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(OpSpecialization, ObjectRef, OpSpecializationNode); }; /*! * \brief Operator strategy to choose implementation. */ class OpStrategyNode : public Object { public: /*! \brief List of operator specializations. */ Array<OpSpecialization> specializations; void VisitAttrs(tvm::AttrVisitor* v) { v->Visit("specializations", &specializations); } static constexpr const char* _type_key = "relay.OpStrategy"; TVM_DECLARE_FINAL_OBJECT_INFO(OpStrategyNode, ExprNode); }; /*! * \brief Operator strategy class. */ class OpStrategy : public ObjectRef { public: /*! * \brief Add an implementation. * \param fcompute Compute function * \param fschedule Schedule function * \param name Name of the implementation * \param plevel Priority level of the implementation */ TVM_DLL void AddImplementation(FTVMCompute fcompute, FTVMSchedule fschedule, String name, int plevel); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(OpStrategy, ObjectRef, OpStrategyNode); }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_OP_STRATEGY_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/pattern_functor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/pattern_functor.h * \brief A more powerful visitor on ADT patterns that enables defining * arbitrary function signatures with type-based dispatch on first argument. */ #ifndef TVM_RELAY_PATTERN_FUNCTOR_H_ #define TVM_RELAY_PATTERN_FUNCTOR_H_ #include <tvm/ir/error.h> #include <tvm/node/functor.h> #include <string> #include <unordered_map> #include <utility> #include "./adt.h" #include "./expr.h" #include "./op.h" namespace tvm { namespace relay { /*! * \brief A dynamical functor on ADT patterns that dispatches on its first argument. * You can use this as a more powerful visitor, since it allows you to * define the types of further arguments to VisitPattern. * * \sa tvm/ir_functor.h * * \tparam FType function signiture * This type is only defined for FType with function signature R(const Pattern&, * Args...) */ template <typename FType> class PatternFunctor; // functions to be overriden. #define PATTERN_FUNCTOR_DEFAULT \ { return VisitPatternDefault_(op, std::forward<Args>(args)...); } #define RELAY_PATTERN_FUNCTOR_DISPATCH(OP) \ vtable.template set_dispatch<OP>([](const ObjectRef& n, TSelf* self, Args... args) { \ return self->VisitPattern_(static_cast<const OP*>(n.get()), std::forward<Args>(args)...); \ }); template <typename R, typename... Args> class PatternFunctor<R(const Pattern& n, Args...)> { private: using TSelf = PatternFunctor<R(const Pattern& n, Args...)>; using FType = tvm::NodeFunctor<R(const ObjectRef& n, TSelf* self, Args...)>; public: /*! \brief the result type of this functor */ using result_type = R; /*! \brief virtual destructor */ virtual ~PatternFunctor() {} /*! * \brief Same as call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ R operator()(const Pattern& n, Args... args) { return VisitPattern(n, std::forward<Args>(args)...); } /*! * \brief The functor call. * \param n The expression node. * \param args Additional arguments. * \return The result of the call */ virtual R VisitPattern(const Pattern& n, Args... args) { ICHECK(n.defined()); static FType vtable = InitVTable(); return vtable(n, this, std::forward<Args>(args)...); } // Functions that can be overriden by subclass virtual R VisitPattern_(const PatternWildcardNode* op, Args... args) PATTERN_FUNCTOR_DEFAULT; virtual R VisitPattern_(const PatternVarNode* op, Args... args) PATTERN_FUNCTOR_DEFAULT; virtual R VisitPattern_(const PatternConstructorNode* op, Args... args) PATTERN_FUNCTOR_DEFAULT; virtual R VisitPattern_(const PatternTupleNode* op, Args... args) PATTERN_FUNCTOR_DEFAULT; virtual R VisitPatternDefault_(const Object* op, Args...) { LOG(FATAL) << "Do not have a default for " << op->GetTypeKey(); throw; } private: // initialize the vtable. static FType InitVTable() { FType vtable; // Set dispatch RELAY_PATTERN_FUNCTOR_DISPATCH(PatternWildcardNode); RELAY_PATTERN_FUNCTOR_DISPATCH(PatternVarNode); RELAY_PATTERN_FUNCTOR_DISPATCH(PatternConstructorNode); RELAY_PATTERN_FUNCTOR_DISPATCH(PatternTupleNode); return vtable; } }; /*! \brief A simple visitor wrapper around PatternFunctor. * * Exposes two visitors with default traversal strategies, one * which doesn't compute a result but can mutate internal state, * and another which functionally builds a new pattern. */ class PatternVisitor : public ::tvm::relay::PatternFunctor<void(const Pattern& n)> { public: void VisitPattern_(const PatternWildcardNode* op) override; void VisitPattern_(const PatternVarNode* op) override; void VisitPattern_(const PatternConstructorNode* op) override; void VisitPattern_(const PatternTupleNode* op) override; virtual void VisitType(const Type& t); virtual void VisitVar(const Var& v); virtual void VisitConstructor(const Constructor& c); }; /*! \brief A wrapper around ExprFunctor which functionally updates the AST. * * ExprMutator uses memoization and self return in order to amortize * the cost of using functional updates. */ class PatternMutator : public ::tvm::relay::PatternFunctor<Pattern(const Pattern&)> { public: Pattern Mutate(const Pattern& pat); Pattern VisitPattern_(const PatternWildcardNode* op) override; Pattern VisitPattern_(const PatternVarNode* op) override; Pattern VisitPattern_(const PatternConstructorNode* op) override; Pattern VisitPattern_(const PatternTupleNode* op) override; /*! \brief Used to visit the types inside of patterns. * * Can be overloaded to transform the types in arbitrary * ways, one way would be to define a sub-class of type * visitor for types which transform them appropriately. */ virtual Type VisitType(const Type& t); /*! \brief Used to visit the vars inside of patterns. */ virtual Var VisitVar(const Var& v); /*! \brief Used to visit the vars inside of patterns. */ virtual Constructor VisitConstructor(const Constructor& c); private: std::unordered_map<Var, Var, ObjectPtrHash, ObjectPtrEqual> var_map_; }; } // namespace relay } // namespace tvm #endif // TVM_RELAY_PATTERN_FUNCTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/qnn/attrs.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/qnn/attrs.h * \brief Auxiliary attributes for qnn operators. */ #ifndef TVM_RELAY_QNN_ATTRS_H_ #define TVM_RELAY_QNN_ATTRS_H_ #include <tvm/ir/attrs.h> #include <string> namespace tvm { namespace relay { namespace qnn { /*! \brief Attribute for requantize operator */ struct RequantizeAttrs : public tvm::AttrsNode<RequantizeAttrs> { int axis; std::string rounding; std::string compute_dtype; DataType out_dtype; TVM_DECLARE_ATTRS(RequantizeAttrs, "relay.attrs.RequantizeAttrs") { TVM_ATTR_FIELD(axis) .describe( "The output channel axis for channel wise quantization. Default value is -1," "which corresponds to the last axis.") .set_default(-1); TVM_ATTR_FIELD(rounding).set_default("None").describe( "Defines the rounding direction when the value is midway between" "two representable values. There are two supported modes - UPWARD" "or TONEAREST. Both modes behave exactly same except at the" "midpoints between the two representable values. At the midpoint," "UPWARD rounds towards positive infinity (for example -1.5 will be" "rounded to -1). TONEAREST is the standard rounding where the" "value is rounded away from zero at midpoints (for example, -1.5" "rounds to -2). More context can be found at following gblic manual" "https://www.gnu.org/software/libc/manual/html_node/Rounding.html."); TVM_ATTR_FIELD(compute_dtype) .set_default("None") .describe( "Specifies the data type used during requantize. Supported " "options: \"int64\", \"float32\", \"float64\""); TVM_ATTR_FIELD(out_dtype) .set_default(NullValue<DataType>()) .describe("Output data type, set to explicit type under mixed precision setting"); } }; /*! \brief Attribute for quantize operator */ struct QuantizeAttrs : public tvm::AttrsNode<QuantizeAttrs> { DataType out_dtype; int axis; TVM_DECLARE_ATTRS(QuantizeAttrs, "relay.attrs.QuantizeAttrs") { TVM_ATTR_FIELD(out_dtype).describe("Output data type, can be one of [int8 or uint8]."); TVM_ATTR_FIELD(axis) .describe( "The output channel axis for channel wise quantization. Default value is -1," "which corresponds to the last axis.") .set_default(-1); } }; struct SimulatedQuantizeAttrs : public tvm::AttrsNode<SimulatedQuantizeAttrs> { int axis; TVM_DECLARE_ATTRS(SimulatedQuantizeAttrs, "relay.attrs.SimulatedQuantizeAttrs") { TVM_ATTR_FIELD(axis) .describe( "The output channel axis for channel wise quantization. Default value is -1," "which corresponds to the last axis.") .set_default(-1); } }; /*! \brief Attribute for dequantize operator */ struct DequantizeAttrs : public tvm::AttrsNode<DequantizeAttrs> { int axis; TVM_DECLARE_ATTRS(DequantizeAttrs, "relay.attrs.DequantizeAttrs") { TVM_ATTR_FIELD(axis) .describe( "The channel axis for channel wise dequantization. Default value is -1," "which corresponds to the last axis.") .set_default(-1); } }; /*! \brief Attribute for broadcast operator */ struct BroadcastAttrs : public tvm::AttrsNode<BroadcastAttrs> { int lhs_axis; int rhs_axis; TVM_DECLARE_ATTRS(BroadcastAttrs, "relay.attrs.BroadcastAttrs") { TVM_ATTR_FIELD(lhs_axis) .describe( "The channel axis for channel wise broadcast. Default value is -1," "which corresponds to the last axis.") .set_default(-1); TVM_ATTR_FIELD(rhs_axis) .describe( "The channel axis for channel wise broadcast. Default value is -1," "which corresponds to the last axis.") .set_default(-1); } }; } // namespace qnn } // namespace relay } // namespace tvm #endif // TVM_RELAY_QNN_ATTRS_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/qnn/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/qnn/transform.h * * This file implements a pass manager for QNN ops using Relay Pass manager. */ #ifndef TVM_RELAY_QNN_TRANSFORM_H_ #define TVM_RELAY_QNN_TRANSFORM_H_ #include <tvm/relay/transform.h> #include <tvm/runtime/c_runtime_api.h> namespace tvm { namespace relay { using relay::transform::Pass; namespace qnn { namespace transform { /*! * \brief Legalizes a QNN expr. Contains specifically two types of Legalizations. First, * converts/Lowers an expression containing QNN ops to an expression containing only core Relay ops. * Each QNN op is lowered to a sequence of exisiting Relay ops. This is a target-independent pass. * One can register the lowering/transformation function for this op using FTVMQnnCanonicalize * attr_name for FTVMLegalize op attribute. Second, as opposed to Relay Legalize, this one legalizes * only QNN ops. One can register a transformation/legalization function for an op by using the * FTVMQnnLegalize attr_name for FTVMLegalize op attribute. The isolation of QNN and Relay Legalize * gives us separation of concerns, leading to a better software practice. The legalization can be * configured to happen per target. * * \return The pass. */ TVM_DLL Pass Legalize(); } // namespace transform } // namespace qnn } // namespace relay } // namespace tvm #endif // TVM_RELAY_QNN_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/runtime.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/runtime.h * \brief Object representation of Runtime configuration and registry */ #ifndef TVM_RELAY_RUNTIME_H_ #define TVM_RELAY_RUNTIME_H_ #include <dmlc/registry.h> #include <tvm/ir/attrs.h> #include <tvm/ir/expr.h> #include <tvm/ir/type.h> #include <tvm/ir/type_relation.h> #include <tvm/node/attr_registry_map.h> #include <tvm/runtime/registry.h> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { template <typename, typename> class AttrRegistry; namespace relay { /*! \brief Value used with Runtime::name to indicate the C++ runtime. */ static constexpr const char* kTvmRuntimeCpp = "cpp"; /*! \brief Value used with Runtime::name to indicate the C runtime. */ static constexpr const char* kTvmRuntimeCrt = "crt"; /*! * \brief Runtime information. * * This data structure stores the meta-data * about Runtimes which can be used to pass around information. * * \sa Runtime */ class RuntimeNode : public Object { public: /*! \brief name of the Runtime */ String name; /* \brief Additional attributes storing meta-data about the Runtime. */ DictAttrs attrs; /*! * \brief Get an attribute. * * \param attr_key The attribute key. * \param default_value The default value if the key does not exist, defaults to nullptr. * * \return The result * * \tparam TObjectRef the expected object type. * \throw Error if the key exists but the value does not match TObjectRef * * \code * * void GetAttrExample(const Runtime& runtime) { * auto value = runtime->GetAttr<Integer>("AttrKey", 0); * } * * \endcode */ template <typename TObjectRef> Optional<TObjectRef> GetAttr( const std::string& attr_key, Optional<TObjectRef> default_value = Optional<TObjectRef>(nullptr)) const { return attrs.GetAttr(attr_key, default_value); } // variant that uses TObjectRef to enable implicit conversion to default value. template <typename TObjectRef> Optional<TObjectRef> GetAttr(const std::string& attr_key, TObjectRef default_value) const { return GetAttr<TObjectRef>(attr_key, Optional<TObjectRef>(default_value)); } void VisitAttrs(AttrVisitor* v) { v->Visit("name", &name); v->Visit("attrs", &attrs); } bool SEqualReduce(const RuntimeNode* other, SEqualReducer equal) const { return name == other->name && equal.DefEqual(attrs, other->attrs); } void SHashReduce(SHashReducer hash_reduce) const { hash_reduce(name); hash_reduce(attrs); } static constexpr const char* _type_key = "Runtime"; static constexpr const bool _type_has_method_sequal_reduce = true; static constexpr const bool _type_has_method_shash_reduce = true; TVM_DECLARE_FINAL_OBJECT_INFO(RuntimeNode, Object); }; /*! * \brief Managed reference class to RuntimeNode. * \sa RuntimeNode */ class Runtime : public ObjectRef { public: Runtime() = default; /*! * \brief Create a new Runtime object using the registry * \throws Error if name is not registered * \param name The name of the Runtime. * \param attrs Attributes for the Runtime. * \return the new Runtime object. */ TVM_DLL static Runtime Create(String name, Map<String, ObjectRef> attrs = {}); /*! * \brief List all registered Runtimes * \return the list of Runtimes */ TVM_DLL static Array<String> ListRuntimes(); /*! * \brief List all options for a specific Runtime * \param name The name of the Runtime * \return Map of option name to type */ TVM_DLL static Map<String, String> ListRuntimeOptions(const String& name); /*! \brief specify container node */ TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Runtime, ObjectRef, RuntimeNode); private: /*! * \brief Private Constructor * \param name The Runtime name * \param attrs Attributes to apply to this Runtime node */ TVM_DLL Runtime(String name, DictAttrs attrs) { auto n = make_object<RuntimeNode>(); n->name = std::move(name); n->attrs = std::move(attrs); data_ = std::move(n); } }; /*! * \brief Helper structure to register Runtimes * \sa TVM_REGISTER_Runtime */ class RuntimeRegEntry { public: /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline RuntimeRegEntry& add_attr_option(const String& key); /*! * \brief Register a valid configuration option and its ValueType for validation * \param key The configuration key * \param default_value The default value of the key * \tparam ValueType The value type to be registered */ template <typename ValueType> inline RuntimeRegEntry& add_attr_option(const String& key, ObjectRef default_value); /*! * \brief Register or get a new entry. * \param name The name of the operator. * \return the corresponding entry. */ TVM_DLL static RuntimeRegEntry& RegisterOrGet(const String& name); private: /*! \brief Internal storage of value types */ struct ValueTypeInfo { std::string type_key; uint32_t type_index; }; std::unordered_map<std::string, ValueTypeInfo> key2vtype_; /*! \brief A hash table that stores the default value of each attr */ std::unordered_map<String, ObjectRef> key2default_; /*! \brief Index used for internal lookup of attribute registry */ uint32_t index_; // the name std::string name; /*! \brief Return the index stored in attr registry */ uint32_t AttrRegistryIndex() const { return index_; } /*! \brief Return the name stored in attr registry */ String AttrRegistryName() const { return name; } /*! \brief private constructor */ explicit RuntimeRegEntry(uint32_t reg_index) : index_(reg_index) {} // friend class template <typename> friend class AttrRegistryMapContainerMap; template <typename, typename> friend class tvm::AttrRegistry; friend class Runtime; }; template <typename ValueType> inline RuntimeRegEntry& RuntimeRegEntry::add_attr_option(const String& key) { ICHECK(!key2vtype_.count(key)) << "AttributeError: add_attr_option failed because '" << key << "' has been set once"; using ValueNodeType = typename ValueType::ContainerType; // NOTE: we could further update the function later. uint32_t value_type_index = ValueNodeType::_GetOrAllocRuntimeTypeIndex(); ValueTypeInfo info; info.type_index = value_type_index; info.type_key = runtime::Object::TypeIndex2Key(value_type_index); key2vtype_[key] = info; return *this; } template <typename ValueType> inline RuntimeRegEntry& RuntimeRegEntry::add_attr_option(const String& key, ObjectRef default_value) { add_attr_option<ValueType>(key); key2default_[key] = default_value; return *this; } // internal macros to make Runtime entries #define TVM_RUNTIME_REGISTER_VAR_DEF \ static DMLC_ATTRIBUTE_UNUSED ::tvm::relay::RuntimeRegEntry& __make_##Runtime /*! * \def TVM_REGISTER_RUNTIME * \brief Register a new Runtime, or set attribute of the corresponding Runtime. * * \param RuntimeName The name of registry * * \code * * TVM_REGISTER_RUNTIME("c") * .add_attr_option<String>("my_option"); * .add_attr_option<String>("my_option_default", String("default")); * * \endcode */ #define TVM_REGISTER_RUNTIME(RuntimeName) \ TVM_STR_CONCAT(TVM_RUNTIME_REGISTER_VAR_DEF, __COUNTER__) = \ ::tvm::relay::RuntimeRegEntry::RegisterOrGet(RuntimeName) } // namespace relay } // namespace tvm #endif // TVM_RELAY_RUNTIME_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/transform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/transform.h * \brief Relay specific transformation passes. */ #ifndef TVM_RELAY_TRANSFORM_H_ #define TVM_RELAY_TRANSFORM_H_ #include <tvm/ir/transform.h> #include <tvm/relay/attrs/transform.h> #include <tvm/relay/expr.h> #include <tvm/relay/function.h> #include <tvm/relay/op.h> #include <tvm/relay/op_attr_types.h> #include <tvm/target/compilation_config.h> #include <tvm/target/target.h> #include <tvm/target/virtual_device.h> #include <string> namespace tvm { namespace relay { namespace transform { using Pass = tvm::transform::Pass; using PassNode = tvm::transform::PassNode; using PassInfo = tvm::transform::PassInfo; using PassInfoNode = tvm::transform::PassInfoNode; using PassContext = tvm::transform::PassContext; using PassContextNode = tvm::transform::PassContextNode; using Sequential = tvm::transform::Sequential; /* * \brief Create a function pass. * * \param pass_func The packed function that contains the optimization. * \param opt_level The optimization level of the function pass. * \param name The name of the function pass. * \param required The list of the passes that the function pass is dependent on. * * \return The created function pass. */ TVM_DLL Pass CreateFunctionPass( const runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)>& pass_func, int opt_level, String name, tvm::Array<String> required); /*! \brief Remove let-bound expressions which do not effect the program result. * * This pass will remove let bindings which are not referenced. If inline_once is True, * let bindings which are only referenced once will also be inlined. * * For example, this pass should turn `let a = 1; 2` into `2`, * as the value of the expression does not depend on a. * * As another example, `let a = 1; a` will be optimized into 1 if inline_once is True. * * If ignore_purity is False, possibly side-effecting expressions (such as memory allocation, * random number generation, reading/writing references, or calls to primitive or external * functions) are never elided or inlined. This is sound, but ignore_purity can be set to True * to suppress this check. * * The analysis is fairly conservative, for example it assumes all local functions * may be called more than once, any functions passed as arguments have side effects, * and so on. * * \param inline_once whether or not to inline bindings used exactly once. * \param ignore_purity whether to ignore whether expressions have side-effects * * \return the pass. */ TVM_DLL Pass DeadCodeElimination(bool inline_once = false, bool ignore_purity = false); /*! * \brief Convert all expressions of TensorType into GradCell, * an algebraic data type defined in gradient.rly. * * This will delay or decrease memory usage. All calls to * ones, ones_like, zeros, zeros_like will not immediately instantiate a tensor in memory, * rather only instantiate if needed. It also defines + and * operation * between GradCell types which can increase performance when using * zero-filled or one-filled tensors, which is the case in reverse mode ad. * * \return the pass */ TVM_DLL Pass LazyGradientInit(); /*! * \brief Fold constant expressions. * * Because of backward compatibility reason it skips QNN primitives from folding by default. * There are some transformation passes like FakeQuantizationToInteger, which requires to keep QNN * primitives for constant subgraphs. Uncontrolled constant folding of QNN primitives may break * applicability of FakeQuantizationToInteger. We suggest to use FoldConstant pass with none * default fold_qnn=True value only when all other QNN sensitive passes were already applied. * * \param fold_qnn Whether to fold constants for QNN operations. * * \return The pass. */ TVM_DLL Pass FoldConstant(bool fold_qnn = false); /*! * \brief Split function with huge number of arguments to smaller pieces. * * \return The pass. */ TVM_DLL Pass SplitArgs(int max_function_args); /*! * \brief Fuse operations into expr into separate functions. * * \param fuse_opt_level Optimization level. If it is -1 it will be inferred from pass context. * * \return The pass. */ TVM_DLL Pass FuseOps(int fuse_opt_level = -1); /*! * \brief The inverse operation of FuseOps. It transforms a fused program returned by * FuseOps into the program before FuseOps. (i.e. x == DefuseOps(FuseOps(x))) * * \return The pass. */ TVM_DLL Pass DefuseOps(); /*! * \brief Rewrite the annotated program. * * \param fallback_device The fallback device which is the default device for * operators without annotation. * * \return The pass. */ TVM_DLL Pass RewriteAnnotatedOps(int fallback_device); /*! * \brief Turn an expression to Basic Block Normal Form. * * We define a block as a group of expressions implied by the scope structure. * * Each graph node can only belong to a single block. * * For any value that is being used in multiple blocks, it has to be referred * by a Var which is defined in a block, whose scope is the least common ancestor * of blocks this value is used. * * \return The pass. */ TVM_DLL Pass ToBasicBlockNormalForm(); /*! * \brief turn a dataflow graph into Administrative Normal Form, or A-Normal Form (ANF). * * It will turn an expression that is in a graph form (with sharing implicit), * to an expression with explicit sharing (A-Normal Form). * * The scope of the root expression is the global scope. * * The scope of any non root expression is the least common ancestor of all it's scope. * * Values are ordered by post-DFS order in each scope. * * \return The pass. */ TVM_DLL Pass ToANormalForm(); /*! * \brief ToANormalForm but on incomplete graph. * * \param expr the graph. * * \return The transformed program. */ TVM_DLL Expr ToANormalForm(const Expr& expr); /*! * \brief Turn an expression into continuation passing style(CPS). * * CPS mean that every function will, instead of returning the result directly, * be passed down an extra function (called the continuation) as argument, * and pass the result to the continuation instead. * * Thus, every function call has to be passed an extra argument * that represent the rest of the computation (Hence the name of continuation). * * Similarly, all other compute will be wrapped and call the continuation as well. * * \return the pass. */ TVM_DLL Pass ToCPS(); /*! * \brief Remove let binding and directly share via pointer instead. * * It will remove all let binding, * and turn all of the variable bound by let into direct pointer reference. * * \return the expression in graph normal form. */ TVM_DLL Pass ToGraphNormalForm(); /*! * \brief Aggressive constant propagation/constant folding/inlining. * * It will do as much computation in compile time as possible. * It has two benefit: remove runtime overhead, and allow more optimization (typically fusion). * As a side effect, code size will explode. * * \return the optimized expression. */ TVM_DLL Pass PartialEval(); /*! * \brief Simplify certain operators during inference. For example, the result * of a batch norm which is indexed at tuple index 0 will be unpacked into a * number of simplified operators. * * \return The Pass. */ TVM_DLL Pass SimplifyInference(); /*! * \brief Replaces non linear activation functions with their fast but approximate counterparts. * * \return The Pass. */ TVM_DLL Pass FastMath(); /*! * \brief Find Dynamic ops and make them static * * Searches the graph for dynamic ops. If the dynamic inputs to those ops are constants, it replaces * them with static ops and re-performs type inference and constant folding. The pass repeats * itself until the graph stops changing or we run too many iterations. * * \return The pass. */ TVM_DLL Pass DynamicToStatic(); /*! * \brief Infer the type of an expression. * * The result of type checking is a new expression with unambiguous * type information filled in, as well as it's checked type field * populated with the result type. * * \return The pass. */ TVM_DLL Pass InferType(); /*! * \brief Infer the type of an expression, reusing existing type information. * * The result of type checking is a new expression with unambiguous * type information filled in for the given node only. The local * version can use existing type information populated throughout * the expression and assumes this information is correct. The local * version also avoids examining large amounts of the graph assuming * type information is filled in properly which makes it much faster if we * iteratively call type inference. * * \return The type of the expression. */ TVM_DLL Type InferTypeLocal(const Expr& expr); /*! * \brief Search and eliminate common subexpression. For example, if there are * two expressions evaluated to an identical value, a single variable is created * and these two expressions are replaced by this variable. * * \param fskip The callback argument that allows to skip certain expressions. * * \return The pass. */ TVM_DLL Pass EliminateCommonSubexpr(runtime::PackedFunc fskip = nullptr); /*! * \brief Combine parallel 2d convolutions into a single convolution if the * number of branches of this conv2d operator is not less than * `min_num_branch`. * * \param min_num_branches The minimun number of branches. * * \return The pass. */ TVM_DLL Pass CombineParallelConv2D(uint64_t min_num_branches = 3); /*! * \brief Combine parallel dense ops into a single batch_matmul if the * number of branches of this dense operator is not less than * `min_num_branch`. * * \param min_num_branches The minimun number of branches. * \param to_batch_matmul Whether to combine parallel dense ops to batch matmul. * If set false, combine dense ops to single dense op. * * \return The pass. */ TVM_DLL Pass CombineParallelDense(uint64_t min_num_branches = 3, bool to_batch_matmul = true); /*! * \brief Combine parallel batch_matmul ops into a single batch_matmul * if the number of branches of this dense operator is not less than * `min_num_branch`. * * \param min_num_branches The minimun number of branches. * * \return The pass. */ TVM_DLL Pass CombineParallelBatchMatmul(uint64_t min_num_branches = 3); /*! * \brief Backward fold axis scaling into weights of conv/dense operators. * * \return The pass. */ TVM_DLL Pass BackwardFoldScaleAxis(); /*! * \brief Forward fold axis scaling into weights of conv/dense operators. * * \return The pass. */ TVM_DLL Pass ForwardFoldScaleAxis(); /*! * \brief A sequential pass that executes ForwardFoldScaleAxis and * BackwardFoldScaleAxis passes. * * \return The pass. */ TVM_DLL Pass FoldScaleAxis(); /*! * \brief Canonicalize some operators to the simplified operators. For example, * bias_add can be canonicalized to expand_dims and broadcast_add. * * \return The pass. */ TVM_DLL Pass CanonicalizeOps(); /*! * \brief Alternate the layouts of operators or replace primitive operators * with other expressions. * * \return The pass. */ TVM_DLL Pass AlterOpLayout(); /*! * \brief Do layout rewrite according to the tile structure created by auto-scheduler. * \return The pass */ TVM_DLL Pass AutoSchedulerLayoutRewrite(); /*! * \brief Do layout rewrite according to the tile structure created by meta-schedule. * \return The pass */ TVM_DLL Pass MetaScheduleLayoutRewrite(); /*! * \brief Given a dest layout, this pass transforms the expr such that most of the ops input data * layout is changed to the dest layout. In ideal situation, there are only 2 layout transforms, one * at the start and one at the end. * * This pass is not a part of relay.build and is expected to be called between framework-relay * parser and relay.build call. This is very helpful for hardware backends that support/prefer only * type of data layout. * * RFC - https://discuss.tvm.ai/t/layout-conversion-pass/4009 * * This pass uses most of the AlterOpLayout and InferCorrectLayout infrastructure. We can define new * layouts for conv2d ops for now. Most of the other operators try to adapt to their input layout * using the InferCorrectLayout infrastructure. * * \param desired_layouts Specify mapping of op_name to array of desired layouts for each input. * For example: Map("nn.conv2d", Array("NHWC", "OHWI")), * this specifies the desired layout for data then kernel for nn.conv2d. * \return The pass. */ TVM_DLL Pass ConvertLayout(const Map<String, Array<String>>& desired_layouts); /*! * \brief Legalizes an expr with another expression. * \param legalize_map_attr_name The Op's attr name which corresponds to the legalize rule function. * One can collect and isolate similar type of legalize transformations using this param. For * example, transformations that only apply to Dialects can be isolated into a FTVMDialectLegalize * string. This pass calls only those transformations that have been registered using the supplied * legalize_map_attr_name. * * \return The pass. */ TVM_DLL Pass Legalize(const String& legalize_map_attr_name = "FTVMLegalize"); /*! * \brief Canonicalize cast expressions to make operator fusion more efficient. * * \return The pass. */ TVM_DLL Pass CanonicalizeCast(); /*! * \brief Add abstraction over a constructor or global variable bound to a function. * * For example: `square` is transformed to * `fn (%x: int32) -> int32 { square(x) }`. * * See https://en.wikipedia.org/wiki/Lambda_calculus#%CE%B7-conversion * for more details. * * \param expand_constructor Whether to expand constructors. * \param expand_global_var Whether to expand global variables. * * \return The pass. */ TVM_DLL Pass EtaExpand(bool expand_constructor, bool expand_global_var); /*! * \brief Partition a Relay program into regions that can be executed on * different backends. * * \return The pass. */ TVM_DLL Pass PartitionGraph(); /*! * \brief Inline the global functions marked as `inline` in a given Relay * IRModule. * * \return The pass. */ TVM_DLL Pass Inline(); /*! * \brief Remove the unused functions in the Relay IRModule. * * \param entry_functions The entry functions used to search the functions that * are being used. * * \return The pass. */ TVM_DLL Pass RemoveUnusedFunctions(Array<runtime::String> entry_functions); /*! * \brief Simplify the Relay expression. * * \return The pass. */ TVM_DLL Pass SimplifyExpr(); /*! * \brief Run any custom passes registered under "RelayToTIR" attributes on TargetKinds. * * This pass looks for inline, let-bound or global functions which have a "Compiler" attribute. * If the attribute value corresponds to a TargetKind with a "RelayToTIR" attribute, then the * 'custom' pass bound to that attribute is run (at most once) on the IRModule as a whole. * * If, in addition, the \p config has a Target with a matching TargetKind, that Target is set * as the 'current' target before the custom pass is executed. In this way it is possible * for custom passes to pick up target options which may guide how they transform the IRModule. * (Those targets are referred to as 'extern codegen targets' elsewhere). * * A typical custom pass will: * - Find calls to "Compiler" attributes functions with matching compiler name. * - Lower those function to TIR PrimFuncs. * - Bind those functions into the IRModule under the the functions' "global_symbol" attribute. * - Replace all calls to those functions with 'call_lowered' to the matching global. * Care should be taken to handle multiple calls to the same function. * See src/relay/backend/contrib/example_target_hooks/relay_to_tir.cc for an example custom pass. * * It is also possible (despite the pass and attribute names!) for the custom pass to proceed * directly to a runtime::Module, which can be attached to the output IRModules "external_mods" * attribute (taking care not to clobber any existing modules). In this case the flow is as above, * except: * - The runtime::Module must contain a binding for each compiled function under their * "global_symbol" (ie runtime::Module::ImplementsFunction should return true). * - A Relay Function must be bound (or re-bound) into the result IRModule, again with the same * "global_symbol", but with only the "Extern" attribute set to Integer(1). The function body * should be the original function body. In this way we always have a TVM definition matching * every global function name. * * There are many existing runtime::Modules, ranging from source to object to dynamic libaries to * entirely custom implementations. Some of those may require additional compilation using * 'export_library' on the final build artifact. * * The OutlineCompilerFunctionsWithExistingGlobalSymbols and MarkCompilerFunctionsAsExtern utility * passes can be used by custom passes to take care of some of the boilerplate. * * TODO(mbs): Rename PreLoweringTargetHooks? * * \param config All available targets. * * \return The pass. */ TVM_DLL Pass RelayToTIRTargetHook(CompilationConfig config); /*! * \brief A pass for manifesting explicit memory allocations and rewriting * specific dialects. * * \param cpu_virtual_device VirtualDevice for computations and data which must reside on a CPU, * such as shapes and shape functions. * * \return The pass. */ TVM_DLL Pass ManifestAlloc(VirtualDevice cpu_virtual_device); /*! * \brief A pass for manifesting variable lifetimes by inserting kill operations when variables * become dead. This pass should be run after ManifestAlloc, and should not be run more than once. * * \return The pass. */ TVM_DLL Pass ManifestLifetimes(); /*! * \brief Uses existing "on_device" and "device_copy" CallNodes to infer the \p VirtualDevice on * which every Relay sub-expression should run and the result stored. Captures the result of that * analysis using new "on_device" and "device_copy" CallNodes. * * See tvm::relay::transform::{LexicalOnDeviceMixin,DeviceAwareExprVisitor,DeviceAwareExprMutator} * for help recovering the device for an arbitrary sub-expression in downstream transformations. * * \param config Describes the targets and default \p VirtualDevice for all primitive operators and * host sub-expressions. * * \return The pass. */ TVM_DLL Pass PlanDevices(CompilationConfig config); /*! * \brief This transform flattens atrous convolution, which corresponds to the sequence of * operations: "space_to_batch_nd"->"conv2d"->"batch_to_space_nd" and convert them into subgraphs * with a convolution with the modified "dilation" and recalculated "padding" parameters. * * \return The pass. */ TVM_DLL Pass FlattenAtrousConv(); /*! * \brief Annotates the minimum required memory of each primitive function callsite by analyzing * the liveness of the input/output tensors at each function callsite and calculating the total * amount of memory these tensors require. This is added as a "used_memory" annotation to the * function in question as a list of the number of bytes for each callsite. In addition, the * containing function is annotated with an "io_used_memory" annotation which refers to the total * memory required for the IO tensors. * * Note: This pass does not support dynamic shapes, it is the users responsibility to check this * pass isn't applied where dynamic shapes may be input. */ TVM_DLL Pass AnnotateUsedMemory(); /*! * \brief Captures the post-dfs index and dominator post-dfs index of (most) expression nodes in * their span, in the form "index:<post-dfs index>:<dominator post-dfs index>". This is useful for * debugging since a) it helps identify pretty-printed sub-expressions within the overall model * and b) the indexes are heavily used by Collage for its compact representation of sub-graphs. * * Note that Op and Constructor nodes are not changed even though they are assigned an * post-dfs index. */ TVM_DLL Pass CapturePostDfsIndexInSpans(); /*! * \brief Calls device dependent memory scope analysis pass, collects mapping of desirable * expr->memory_scope and annotates expressions by VirtualDevice with required memory_scope */ TVM_DLL Pass AnnotateMemoryScope(CompilationConfig config); /*! * \brief Removes non-fused reshapes after lowering the graph. * InferType() cannot be invoked after calling this pass as it removes reshapes from the call * graph. Many targets only need buffer addresses irrespective of the shapes of them. This makes * reshapes symbolic once the graph has been lowered. Reshape removal results into smaller code * size and reduced buffer allocations. It opens up opportunities of operator fusion in the target * backend. Thus, consequently, it improves the performance of the inference. */ TVM_DLL Pass RemoveStandaloneReshapes(); } // namespace transform /*! * \brief Bind the free variables to a Relay expression. This is a helper * function usually called by other pass functions to help optimizations. * If any free variables are introduced into a function, those are added * to the functoin parameters. * Additionally this may change the order of parameters if you map a variable * to a variable. * * \param expr The input expression. * \param binds The variable to expression map that will be used to help the * binding. * * \return The updated expression. */ TVM_DLL Expr Bind(const Expr& expr, const tvm::Map<Var, Expr>& binds); /*! * \brief Substitute variables with new variables (including function parameters) in a function. * This is a helper function usually called by other pass functions to help optimizations. * Expects all values in the bind map to be Vars. * * \param func The input function. * \param binds The variable to expression map that will be used to help the * binding. * * \return The updated expression. */ TVM_DLL Function SubstituteBoundVars(const Function& func, const tvm::Map<Var, Expr>& binds); /*! * \brief Apply rewrite rules to rewrite the expr in post DFS order. This * function is used as a helper function to rewrtie an expression in a pass. * * \param expr The expression. * \param rewrite_map_attr_name The Op's attr name which corresponds to the rewrite * rule function. * \param fcontext Additional callback to provide context argument for each call node. * \param fmulti_ref_trigger Transformation function to be called when * an Expr consumed by multiple callers. * \return The rewritten expression. */ TVM_DLL Expr ForwardRewrite(const Expr& expr, const String& rewrite_map_attr_name, std::function<ObjectRef(const Call&)> fcontext = nullptr, std::function<Expr(const Expr&)> fmulti_ref_trigger = nullptr); /*! * \brief Apply rewrite rules to rewrite the expr in post DFS order. This * function is used as a helper function to rewrtie an expression in a pass. * * \param expr The expression. * \param rewrite_func The rewrite func that will apply to all operators. * \param fcontext Additional callback to provide context argument for each call node. * \param fmulti_ref_trigger Transformation function to be called when * an Expr consumed by multiple callers. * * \return The rewritten expression. */ TVM_DLL Expr ForwardRewrite(const Expr& expr, const FForwardRewrite& rewrite_func, std::function<ObjectRef(const Call&)> fcontext = nullptr, std::function<Expr(const Expr&)> fmulti_ref_trigger = nullptr); /*! * \brief Rewrite the annotated program. * * \param expr The expression. * \param fallback_device The fallback device which is the default device for * operators without annotation. * * \return The updated program. */ TVM_DLL Expr RewriteAnnotatedOps(const Expr& expr, int fallback_device); /*! * \brief Turn an expression into continuation passing style(CPS). * * CPS mean that every function will, instead of returning the result directly, * be passed down an extra function (called the continuation) as argument, * and pass the result to the continuation instead. * * Thus, every function call has to be passed an extra argument * that represent the rest of the computation (Hence the name of continuation). * * Similarly, all other compute will be wrapped and call the continuation as well. * * \param f the function. * \param mod the module. * * \return the converted Function. */ TVM_DLL Function ToCPS(const Function& f, const IRModule& mod); /*! * \brief Remove the continuation argument of a CPS function. * * Note that this only transform the type back into un-CPS form * when there is no higher order input/output. * * \param f the function. * * \return the converted Function. */ TVM_DLL Function UnCPS(const Function& f); /*! * \brief Deduplicate the bound variables and type variables in the expression. * * \param e the expression. * * \return the deduplicated expression. */ TVM_DLL Expr DeDup(const Expr& e); } // namespace relay } // namespace tvm #endif // TVM_RELAY_TRANSFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/relay/type.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/relay/type.h * \brief Relay typed AST nodes. */ #ifndef TVM_RELAY_TYPE_H_ #define TVM_RELAY_TYPE_H_ #include <tvm/ir/attrs.h> #include <tvm/ir/env_func.h> #include <tvm/ir/tensor_type.h> #include <tvm/ir/type.h> #include <tvm/ir/type_relation.h> #include <tvm/runtime/registry.h> #include <tvm/tir/expr.h> #include <string> #include "base.h" namespace tvm { namespace relay { // namespace update for backward compact // will be removed later. using AnyNode = tvm::tir::AnyNode; using Any = tvm::tir::Any; using Kind = TypeKind; using Type = tvm::Type; using TypeNode = tvm::TypeNode; using TypeVar = tvm::TypeVar; using TypeVarNode = tvm::TypeVarNode; using GlobalTypeVar = tvm::GlobalTypeVar; using GlobalTypeVarNode = tvm::GlobalTypeVarNode; using TupleType = tvm::TupleType; using TupleTypeNode = tvm::TupleTypeNode; using TypeConstraint = tvm::TypeConstraint; using TypeConstraintNode = tvm::TypeConstraintNode; using FuncType = tvm::FuncType; using FuncTypeNode = tvm::FuncTypeNode; using IncompleteType = tvm::IncompleteType; using IncompleteTypeNode = tvm::IncompleteTypeNode; using RelayRefType = tvm::RelayRefType; using RelayRefTypeNode = tvm::RelayRefTypeNode; using TensorType = tvm::TensorType; using TensorTypeNode = tvm::TensorTypeNode; using TypeCall = tvm::TypeCall; using TypeCallNode = tvm::TypeCallNode; using TypeRelation = tvm::TypeRelation; using TypeRelationNode = tvm::TypeRelationNode; using TypeRelationFn = tvm::TypeRelationFn; using TypeReporter = tvm::TypeReporter; using TypeReporterNode = tvm::TypeReporterNode; } // namespace relay } // namespace tvm #endif // TVM_RELAY_TYPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/builtin_fp16.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file builtin_fp16.h * \brief Functions for conversion between fp32 and fp16 */ #ifndef TVM_RUNTIME_BUILTIN_FP16_H_ #define TVM_RUNTIME_BUILTIN_FP16_H_ #include <tvm/runtime/c_runtime_api.h> #include <cstdint> extern "C" { TVM_DLL uint16_t __gnu_f2h_ieee(float); TVM_DLL float __gnu_h2f_ieee(uint16_t); TVM_DLL uint16_t __truncsfhf2(float v); TVM_DLL float __extendhfsf2(uint16_t v); } #endif // TVM_RUNTIME_BUILTIN_FP16_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/c_backend_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/c_backend_api.h * \brief TVM runtime backend API. * * The functions defined in this header are intended to be * used by compiled tvm operators, usually user do not need to use these * function directly. */ #ifndef TVM_RUNTIME_C_BACKEND_API_H_ #define TVM_RUNTIME_C_BACKEND_API_H_ #include <tvm/runtime/c_runtime_api.h> #ifdef __cplusplus extern "C" { #endif /*! * \brief Signature for backend functions exported as DLL. * * \param args The arguments * \param type_codes The type codes of the arguments * \param num_args Number of arguments. * \param out_ret_value The output value of the return value. * \param out_ret_tcode The output type code of the return value. * \param resource_handle Pointer to associated resource. * * \return 0 if success, -1 if failure happens, set error via TVMAPISetLastError. */ typedef int (*TVMBackendPackedCFunc)(TVMValue* args, int* type_codes, int num_args, TVMValue* out_ret_value, int* out_ret_tcode, void* resource_handle); /*! * \brief Backend function for modules to get function * from its environment mod_node (its imports and global function). * The user do should not call TVMFuncFree on func. * * \param mod_node The module handle. * \param func_name The name of the function. * \param out The result function. * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendGetFuncFromEnv(void* mod_node, const char* func_name, TVMFunctionHandle* out); /*! * \brief Backend function to register system-wide library symbol. * * \param name The name of the symbol * \param ptr The symbol address. * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendRegisterSystemLibSymbol(const char* name, void* ptr); /*! * \brief Backend function to allocate temporal workspace. * * \note The result allocated space is ensured to be aligned to kTempAllocaAlignment. * * \param nbytes The size of the space requested. * \param device_type The device type which the space will be allocated. * \param device_id The device id which the space will be allocated. * \param dtype_code_hint The type code of the array elements. Only used in * certain backends such as OpenGL. * \param dtype_bits_hint The type bits of the array elements. Only used in * certain backends such as OpenGL. * \return nullptr when error is thrown, a valid ptr if success */ TVM_DLL void* TVMBackendAllocWorkspace(int device_type, int device_id, uint64_t nbytes, int dtype_code_hint, int dtype_bits_hint); /*! * \brief Backend function to free temporal workspace. * * \param ptr The result allocated space pointer. * \param device_type The device type which the space will be allocated. * \param device_id The device id which the space will be allocated. * \return 0 when no error is thrown, -1 when failure happens * * \sa TVMBackendAllocWorkspace */ TVM_DLL int TVMBackendFreeWorkspace(int device_type, int device_id, void* ptr); /*! * \brief Backend function to register execution environment(e.g. python) * specific C APIs. * * \note We only register the C API function when absolutely necessary (e.g. when signal handler * cannot trap back into python). In most cases we should use the PackedFunc FFI. * * \param name The name of the symbol * \param ptr The symbol address. * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendRegisterEnvCAPI(const char* name, void* ptr); /*! * \brief Environment for TVM parallel task. */ typedef struct { /*! * \brief Auxiliary used for synchronization */ void* sync_handle; /*! \brief total amount of task */ int32_t num_task; } TVMParallelGroupEnv; /*! * \brief The callback function to execute a parallel lambda * \param task_id the task id of the function. * \param penv The parallel environment backs the execution. * \param cdata The supporting closure data. */ typedef int (*FTVMParallelLambda)(int task_id, TVMParallelGroupEnv* penv, void* cdata); /*! * \brief Backend function for running parallel jobs. * * \param flambda The parallel function to be launched. * \param cdata The closure data. * \param num_task Number of tasks to launch, can be 0, means launch * with all available threads. * * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendParallelLaunch(FTVMParallelLambda flambda, void* cdata, int num_task); /*! * \brief BSP barrrier between parallel threads * \param task_id the task id of the function. * \param penv The parallel environment backs the execution. * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendParallelBarrier(int task_id, TVMParallelGroupEnv* penv); /*! * \brief Simple static initialization function. * Run f once and set handle to be not null. * This function is mainly used for test purpose. * * \param handle A global address to indicate f * \param f The function to be run * \param cdata The closure data to pass to the function. * \param nbytes Number of bytes in the closure data. * \return 0 when no error is thrown, -1 when failure happens */ TVM_DLL int TVMBackendRunOnce(void** handle, int (*f)(void*), void* cdata, int nbytes); #ifdef __cplusplus } // TVM_EXTERN_C #endif #endif // TVM_RUNTIME_C_BACKEND_API_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/c_runtime_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * \file tvm/runtime/c_runtime_api.h * \brief TVM runtime library. * * The philosophy of TVM project is to customize the compilation * stage to generate code that can used by other projects transparently. * So this is a minimum runtime code gluing, and some limited * memory management code to enable quick testing. * * The runtime API is independent from TVM compilation stack and can * be linked via libtvm_runtime. * * The common flow is: * - Use TVMFuncListGlobalNames to get global function name * - Use TVMFuncCall to call these functions. * * Possible return values of the API functions: * * 0: success * * -1: the error can be retrieved through TVMGetLastError. * * -2: a frontend error occurred and recorded in the frontend. */ #ifndef TVM_RUNTIME_C_RUNTIME_API_H_ #define TVM_RUNTIME_C_RUNTIME_API_H_ // Macros to do weak linking #ifdef _MSC_VER #define TVM_WEAK __declspec(selectany) #else #define TVM_WEAK __attribute__((weak)) #endif #ifdef __EMSCRIPTEN__ #include <emscripten/emscripten.h> #define TVM_DLL EMSCRIPTEN_KEEPALIVE #endif #ifndef TVM_DLL #ifdef _WIN32 #ifdef TVM_EXPORTS #define TVM_DLL __declspec(dllexport) #else #define TVM_DLL __declspec(dllimport) #endif #else #define TVM_DLL __attribute__((visibility("default"))) #endif #endif // TVM version #define TVM_VERSION "0.11.dev0" // TVM Runtime is DLPack compatible. #include <dlpack/dlpack.h> #ifdef __cplusplus extern "C" { #endif #include <stddef.h> #include <stdint.h> /*! \brief type of array index. */ typedef int64_t tvm_index_t; /*! \brief Extension device types in TVM * * Additional enumerators to supplement those provided by * DLPack's `DLDeviceType` enumeration. * * MAINTAINERS NOTE #1: We need to ensure that the two devices * are identified by the same integer. * Currently this requires manual verification. * Discussed here: https://github.com/dmlc/dlpack/issues/111 * As of DLPack v0.7, the highest-valued enumerator in * `DLDeviceType` is kDLHexagon = 16. * * MAINTAINERS NOTE #2: As of DLPack v0.7, the definition for * `DLDeviceType` specifies an underlying storage type of * `int32_t`. That guarantees a variable of type * `DLDeviceType` is capable of holding any integers provided * by *either* of these enumerations. * * However, the `int32_t` specification only applies when the * header file is compiled as C++, and this header file is also * meant to work as C code. So the unspecified storage type * could be a latent bug when compiled as C. */ #ifdef __cplusplus typedef enum : int32_t { #else typedef enum { #endif // To help avoid accidental conflicts between `DLDeviceType` // and this enumeration, start numbering the new enumerators // a little higher than (currently) seems necessary. kDLAOCL = 32, kDLSDAccel, kOpenGL, kDLMicroDev, TVMDeviceExtType_End, // sentinel value } TVMDeviceExtType; #ifdef __cplusplus // Some other parts of TVM hardcode the integer identifier for // some DLPack / TVM devices, rather then using the symbolic // enumerator. E.g., `2` rather than `kDLCUDA`. // These asserts should alert us when that mapping breaks. #define TVM_HARCODED_INTEGER_CHANGED_MSG \ "Change in compile-time integer. Make sure hardcoded uses of this integer throughout TVM are " \ "updated." static_assert(kDLCPU == 1, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLCUDA == 2, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLCUDAHost == 3, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLOpenCL == 4, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLVulkan == 7, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLMetal == 8, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLVPI == 9, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLROCM == 10, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLROCMHost == 11, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLExtDev == 12, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLCUDAManaged == 13, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLOneAPI == 14, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLWebGPU == 15, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLHexagon == 16, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLAOCL == 32, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLSDAccel == 33, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kOpenGL == 34, TVM_HARCODED_INTEGER_CHANGED_MSG); static_assert(kDLMicroDev == 35, TVM_HARCODED_INTEGER_CHANGED_MSG); #undef TVM_HARCODED_INTEGER_CHANGED_MSG #endif /*! * \brief The type code in used and only used in TVM FFI for argument passing. * * DLPack consistency: * 1) kTVMArgInt is compatible with kDLInt * 2) kTVMArgFloat is compatible with kDLFloat * 3) kDLUInt is not in ArgTypeCode, but has a spared slot * * Downstream consistency: * The kDLInt, kDLUInt, kDLFloat are kept consistent with the original ArgType code * * It is only used in argument passing, and should not be confused with * DataType::TypeCode, which is DLPack-compatible. * * \sa tvm::runtime::DataType::TypeCode */ typedef enum { kTVMArgInt = kDLInt, kTVMArgFloat = kDLFloat, kTVMOpaqueHandle = 3U, kTVMNullptr = 4U, kTVMDataType = 5U, kDLDevice = 6U, kTVMDLTensorHandle = 7U, kTVMObjectHandle = 8U, kTVMModuleHandle = 9U, kTVMPackedFuncHandle = 10U, kTVMStr = 11U, kTVMBytes = 12U, kTVMNDArrayHandle = 13U, kTVMObjectRValueRefArg = 14U, // Extension codes for other frameworks to integrate TVM PackedFunc. // To make sure each framework's id do not conflict, use first and // last sections to mark ranges. // Open an issue at the repo if you need a section of code. kTVMExtBegin = 15U, kTVMNNVMFirst = 16U, kTVMNNVMLast = 20U, // The following section of code is used for non-reserved types. kTVMExtReserveEnd = 64U, kTVMExtEnd = 128U, } TVMArgTypeCode; /*! \brief the array handle */ typedef DLTensor* TVMArrayHandle; /*! * \brief Union type of values * being passed through API and function calls. */ typedef union { int64_t v_int64; double v_float64; void* v_handle; const char* v_str; DLDataType v_type; DLDevice v_device; } TVMValue; /*! * \brief Byte array type used to pass in byte array * When kTVMBytes is used as data type. */ typedef struct { const char* data; size_t size; } TVMByteArray; /*! \brief Handle to TVM runtime modules. */ typedef void* TVMModuleHandle; /*! \brief Handle to packed function handle. */ typedef void* TVMFunctionHandle; /*! \brief Handle to hold return value. */ typedef void* TVMRetValueHandle; /*! * \brief The stream that is specific to device * can be NULL, which indicates the default one. */ typedef void* TVMStreamHandle; /*! \brief Handle to Object. */ typedef void* TVMObjectHandle; /*! * \brief Used for implementing C API function. * Set last error message before return. * \param msg The error message to be set. */ TVM_DLL void TVMAPISetLastError(const char* msg); /*! * \brief return str message of the last error * all function in this file will return 0 when success * and nonzero when an error occurred, * TVMGetLastError can be called to retrieve the error * * this function is threadsafe and can be called by different thread * \return error info */ TVM_DLL const char* TVMGetLastError(void); /*! * \brief Load module from file. * \param file_name The file name to load the module from. * \param format The format of the module. * \param out The result module * * \return 0 when success, nonzero when failure happens * \note The resulting module do not contain import relation. * It can be reconstructed by TVMModImport. */ TVM_DLL int TVMModLoadFromFile(const char* file_name, const char* format, TVMModuleHandle* out); /*! * \brief Add dep to mod's dependency. * This allows functions in this module to use modules. * * \param mod The module handle. * \param dep The dependent module to be imported. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMModImport(TVMModuleHandle mod, TVMModuleHandle dep); /*! * \brief Get function from the module. * \param mod The module handle. * \param func_name The name of the function. * \param query_imports Whether to query imported modules * \param out The result function, can be NULL if it is not available. * \return 0 when no error is thrown, nonzero when failure happens */ TVM_DLL int TVMModGetFunction(TVMModuleHandle mod, const char* func_name, int query_imports, TVMFunctionHandle* out); /*! * \brief Free the Module * \param mod The module to be freed. * * \note This may not free up the module's resources. * If there is active TVMFunctionHandle uses the module * Or if this module is imported by another active module. * * The all functions remains valid until TVMFuncFree is called. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMModFree(TVMModuleHandle mod); /*! * \brief Free the function when it is no longer needed. * \param func The function handle * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMFuncFree(TVMFunctionHandle func); /*! * \brief Call a Packed TVM Function. * * \param func node handle of the function. * \param arg_values The arguments * \param type_codes The type codes of the arguments * \param num_args Number of arguments. * * \param ret_val The return value. * \param ret_type_code the type code of return value. * * \return 0 when success, nonzero when failure happens * \note TVM calls always exchanges with type bits=64, lanes=1 * * \note API calls always exchanges with type bits=64, lanes=1 * If API call returns container handles (e.g. FunctionHandle) * these handles should be managed by the front-end. * The front-end need to call free function (e.g. TVMFuncFree) * to free these handles. */ TVM_DLL int TVMFuncCall(TVMFunctionHandle func, TVMValue* arg_values, int* type_codes, int num_args, TVMValue* ret_val, int* ret_type_code); /*! * \brief Set the return value of TVMPackedCFunc. * * This function is called by TVMPackedCFunc to set the return value. * When this function is not called, the function returns null by default. * * \param ret The return value handle, pass by ret in TVMPackedCFunc * \param value The value to be returned. * \param type_code The type of the value to be returned. * \param num_ret Number of return values, for now only 1 is supported. */ TVM_DLL int TVMCFuncSetReturn(TVMRetValueHandle ret, TVMValue* value, int* type_code, int num_ret); /*! * \brief Inplace translate callback argument value to return value. * This is only needed for non-POD arguments. * * \param value The value to be translated. * \param code The type code to be translated. * \note This function will do a shallow copy when necessary. * * \return 0 when success, nonzero when failure happens. */ TVM_DLL int TVMCbArgToReturn(TVMValue* value, int* code); /*! * \brief C type of packed function. * * \param args The arguments * \param type_codes The type codes of the arguments * \param num_args Number of arguments. * \param ret The return value handle. * \param resource_handle The handle additional resouce handle from front-end. * \return 0 if success, -1 if failure happens, set error via TVMAPISetLastError. * \sa TVMCFuncSetReturn */ typedef int (*TVMPackedCFunc)(TVMValue* args, int* type_codes, int num_args, TVMRetValueHandle ret, void* resource_handle); /*! * \brief C callback to free the resource handle in C packed function. * \param resource_handle The handle additional resouce handle from front-end. */ typedef void (*TVMPackedCFuncFinalizer)(void* resource_handle); /*! * \brief Signature for extension function declarer. * * TVM call this function to get the extension functions * The declarer will call register_func to register function and their name. * * \param register_func_handle The register function * \return 0 if success, -1 if failure happens */ typedef int (*TVMExtensionFuncDeclarer)(TVMFunctionHandle register_func_handle); /*! * \brief Wrap a TVMPackedCFunc to become a FunctionHandle. * * The resource_handle will be managed by TVM API, until the function is no longer used. * * \param func The packed C function. * \param resource_handle The resource handle from front-end, can be NULL. * \param fin The finalizer on resource handle when the FunctionHandle get freed, can be NULL * \param out the result function handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMFuncCreateFromCFunc(TVMPackedCFunc func, void* resource_handle, TVMPackedCFuncFinalizer fin, TVMFunctionHandle* out); /*! * \brief Register the function to runtime's global table. * * The registered function then can be pulled by the backend by the name. * * \param name The name of the function. * \param f The function to be registered. * \param override Whether allow override already registered function. */ TVM_DLL int TVMFuncRegisterGlobal(const char* name, TVMFunctionHandle f, int override); /*! * \brief Get a global function. * * \param name The name of the function. * \param out the result function pointer, NULL if it does not exist. * * \note The function handle of global function is managed by TVM runtime, * So TVMFuncFree is should not be called when it get deleted. */ TVM_DLL int TVMFuncGetGlobal(const char* name, TVMFunctionHandle* out); /*! * \brief List all the globally registered function name * \param out_size The number of functions * \param out_array The array of function names. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMFuncListGlobalNames(int* out_size, const char*** out_array); /*! * \brief Remove a global function. * \param name The name of the function. */ TVM_DLL int TVMFuncRemoveGlobal(const char* name); // Array related apis for quick proptyping /*! * \brief Allocate a nd-array's memory, * including space of shape, of given spec. * * \param shape The shape of the array, the data content will be copied to out * \param ndim The number of dimension of the array. * \param dtype_code The type code of the dtype * \param dtype_bits The number of bits of dtype * \param dtype_lanes The number of lanes in the dtype. * \param device_type The device type. * \param device_id The device id. * \param out The output handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayAlloc(const tvm_index_t* shape, int ndim, int dtype_code, int dtype_bits, int dtype_lanes, int device_type, int device_id, TVMArrayHandle* out); /*! * \brief Free the TVM Array. * \param handle The array handle to be freed. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayFree(TVMArrayHandle handle); /*! * \brief Copy array data from CPU byte array. * \param handle The array handle. * \param data the data pointer * \param nbytes The number of bytes to copy. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayCopyFromBytes(TVMArrayHandle handle, void* data, size_t nbytes); /*! * \brief Copy array data to CPU byte array. * \param handle The array handle. * \param data the data pointer * \param nbytes The number of bytes to copy. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayCopyToBytes(TVMArrayHandle handle, void* data, size_t nbytes); /*! * \brief Copy the array, both from and to must be valid during the copy. * \param from The array to be copied from. * \param to The target space. * \param stream The stream where the copy happens, can be NULL. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayCopyFromTo(TVMArrayHandle from, TVMArrayHandle to, TVMStreamHandle stream); /*! * \brief Produce an array from the DLManagedTensor that shares data memory * with the DLManagedTensor. * \param from The source DLManagedTensor. * \param out The output array handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayFromDLPack(DLManagedTensor* from, TVMArrayHandle* out); /*! * \brief Produce a DLMangedTensor from the array that shares data memory with * the array. * \param from The source array. * \param out The DLManagedTensor handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMArrayToDLPack(TVMArrayHandle from, DLManagedTensor** out); /*! * \brief Delete (free) a DLManagedTensor's data. * \param dltensor Pointer to the DLManagedTensor. */ TVM_DLL void TVMDLManagedTensorCallDeleter(DLManagedTensor* dltensor); /*! * \brief Create a new runtime stream. * * \param device_type The device type. * \param device_id The device id. * \param out The new stream handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMStreamCreate(int device_type, int device_id, TVMStreamHandle* out); /*! * \brief Free a created stream handle. * * \param device_type The device type. * \param device_id The device id. * \param stream The stream to be freed. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMStreamFree(int device_type, int device_id, TVMStreamHandle stream); /*! * \brief Set the runtime stream of current thread to be stream. * The subsequent calls to the same device_type * will use the setted stream handle. * The specific type of stream is runtime device dependent. * * \param device_type The device type. * \param device_id The device id. * \param handle The stream handle. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMSetStream(int device_type, int device_id, TVMStreamHandle handle); /*! * \brief Wait until all computations on stream completes. * * \param device_type The device type. * \param device_id The device id. * \param stream The stream to be synchronized. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMSynchronize(int device_type, int device_id, TVMStreamHandle stream); /*! * \brief Synchronize two streams of execution. * * \param device_type The device type. * \param device_id The device id. * \param src The source stream to synchronize. * \param dst The destination stream to synchronize. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMStreamStreamSynchronize(int device_type, int device_id, TVMStreamHandle src, TVMStreamHandle dst); /*! * \brief Get the type_index from an object. * * \param obj The object handle. * \param out_tindex the output type index. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMObjectGetTypeIndex(TVMObjectHandle obj, unsigned* out_tindex); /*! * \brief Convert type key to type index. * \param type_key The key of the type. * \param out_tindex the corresponding type index. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMObjectTypeKey2Index(const char* type_key, unsigned* out_tindex); /*! * \brief Convert type index to type key. * \param tindex The type index. * \param out_type_key The output type key. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMObjectTypeIndex2Key(unsigned tindex, char** out_type_key); /*! * \brief Increase the reference count of an object. * * \param obj The object handle. * \note Internally we increase the reference counter of the object. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMObjectRetain(TVMObjectHandle obj); /*! * \brief Free the object. * * \param obj The object handle. * \note Internally we decrease the reference counter of the object. * The object will be freed when every reference to the object are removed. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMObjectFree(TVMObjectHandle obj); /*! * \brief Free a TVMByteArray returned from TVMFuncCall, and associated memory. * \param arr The TVMByteArray instance. * \return 0 on success, -1 on failure. */ TVM_DLL int TVMByteArrayFree(TVMByteArray* arr); /*! * \brief Allocate a data space on device. * \param dev The device to perform operation. * \param nbytes The number of bytes in memory. * \param alignment The alignment of the memory. * \param type_hint The type of elements. Only needed by certain backends such * as nbytes & alignment are sufficient for most backends. * \param out_data The allocated device pointer. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMDeviceAllocDataSpace(DLDevice dev, size_t nbytes, size_t alignment, DLDataType type_hint, void** out_data); /*! * \brief Allocate a data space on device with special memory scope. * \note The memory could use a special multi-dimensional memory layout. * That is why we pass shape and dtype instead of raw number of bytes. * \param dev The device to perform operation. * \param ndim The number of dimension of the tensor. * \param shape The shape of the tensor. * \param dtype The type of elements. * \param mem_scope The memory scope of the tensor, * can be nullptr, which indicate the default global DRAM * \param out_data The allocated device pointer. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape, DLDataType dtype, const char* mem_scope, void** out_data); /*! * \brief Free a data space on device. * \param dev The device to perform operation. * \param ptr The data space. * \return 0 when success, nonzero when failure happens */ TVM_DLL int TVMDeviceFreeDataSpace(DLDevice dev, void* ptr); /*! * \brief Copy data from one place to another. * \note This API is designed to support special memory with shape dependent layout. * We pass in DLTensor* with shape information to support these cases. * \param from The source tensor. * \param to The target tensor. * \param stream Optional stream object. * \return 0 when success, nonzero when failure happens. */ TVM_DLL int TVMDeviceCopyDataFromTo(DLTensor* from, DLTensor* to, TVMStreamHandle stream); /*! * \brief Check that an object is derived from another. * \param child_type_index The type index of the derived type. * \param parent_type_index The type index of the parent type. * \param is_derived A boolean representing whether this predicate holds. * \return 0 when success, nonzero when failure happens. */ TVM_DLL int TVMObjectDerivedFrom(uint32_t child_type_index, uint32_t parent_type_index, int* is_derived); #ifdef __cplusplus } // TVM_EXTERN_C #endif #endif // TVM_RUNTIME_C_RUNTIME_API_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/adt.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/adt.h * \brief Runtime ADT container types. */ #ifndef TVM_RUNTIME_CONTAINER_ADT_H_ #define TVM_RUNTIME_CONTAINER_ADT_H_ #include <utility> #include <vector> #include "./base.h" namespace tvm { namespace runtime { /*! \brief An object representing a structure or enumeration. */ class ADTObj : public Object, public InplaceArrayBase<ADTObj, ObjectRef> { public: /*! \brief The tag representing the constructor used. */ int32_t tag; /*! \brief Number of fields in the ADT object. */ uint32_t size; // The fields of the structure follows directly in memory. static constexpr const uint32_t _type_index = TypeIndex::kRuntimeADT; static constexpr const char* _type_key = "runtime.ADT"; TVM_DECLARE_FINAL_OBJECT_INFO(ADTObj, Object); private: /*! * \return The number of elements in the array. */ size_t GetSize() const { return size; } /*! * \brief Initialize the elements in the array. * * \tparam Iterator Iterator type of the array. * \param begin The begin iterator. * \param end The end iterator. */ template <typename Iterator> void Init(Iterator begin, Iterator end) { size_t num_elems = std::distance(begin, end); this->size = 0; auto it = begin; for (size_t i = 0; i < num_elems; ++i) { InplaceArrayBase::EmplaceInit(i, *it++); // Only increment size after the initialization succeeds this->size++; } } friend class ADT; friend InplaceArrayBase<ADTObj, ObjectRef>; }; /*! \brief reference to algebraic data type objects. */ class ADT : public ObjectRef { public: /*! * \brief construct an ADT object reference. * \param tag The tag of the ADT object. * \param fields The fields of the ADT object. * \return The constructed ADT object reference. */ ADT(int32_t tag, std::vector<ObjectRef> fields) : ADT(tag, fields.begin(), fields.end()){}; /*! * \brief construct an ADT object reference. * \param tag The tag of the ADT object. * \param begin The begin iterator to the start of the fields array. * \param end The end iterator to the end of the fields array. * \return The constructed ADT object reference. */ template <typename Iterator> ADT(int32_t tag, Iterator begin, Iterator end) { size_t num_elems = std::distance(begin, end); auto ptr = make_inplace_array_object<ADTObj, ObjectRef>(num_elems); ptr->tag = tag; ptr->Init(begin, end); data_ = std::move(ptr); } /*! * \brief construct an ADT object reference. * \param tag The tag of the ADT object. * \param init The initializer list of fields. * \return The constructed ADT object reference. */ ADT(int32_t tag, std::initializer_list<ObjectRef> init) : ADT(tag, init.begin(), init.end()){}; /*! * \brief Access element at index. * * \param idx The array index * \return const ObjectRef */ const ObjectRef& operator[](size_t idx) const { return operator->()->operator[](idx); } /*! * \brief Return the ADT tag. */ int32_t tag() const { return operator->()->tag; } /*! * \brief Return the number of fields. */ size_t size() const { return operator->()->size; } /*! * \brief Construct a tuple object. * * \tparam Args Type params of tuple feilds. * \param args Tuple fields. * \return ADT The tuple object reference. */ template <typename... Args> static ADT Tuple(Args&&... args) { return ADT(0, std::forward<Args>(args)...); } TVM_DEFINE_OBJECT_REF_METHODS(ADT, ObjectRef, ADTObj); }; } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_ADT_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/array.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/array.h * \brief Runtime Array container types. */ #ifndef TVM_RUNTIME_CONTAINER_ARRAY_H_ #define TVM_RUNTIME_CONTAINER_ARRAY_H_ #include <algorithm> #include <memory> #include <type_traits> #include <utility> #include <vector> #include "./base.h" #include "./optional.h" namespace tvm { namespace runtime { /*! \brief array node content in array */ class ArrayNode : public Object, public InplaceArrayBase<ArrayNode, ObjectRef> { public: /*! \return The size of the array */ size_t size() const { return this->size_; } /*! * \brief Read i-th element from array. * \param i The index * \return the i-th element. */ const ObjectRef at(int64_t i) const { return this->operator[](i); } /*! \return begin constant iterator */ const ObjectRef* begin() const { return static_cast<ObjectRef*>(InplaceArrayBase::AddressOf(0)); } /*! \return end constant iterator */ const ObjectRef* end() const { return begin() + size_; } /*! \brief Release reference to all the elements */ void clear() { ShrinkBy(size_); } /*! * \brief Set i-th element of the array in-place * \param i The index * \param item The value to be set */ void SetItem(int64_t i, ObjectRef item) { this->operator[](i) = std::move(item); } /*! * \brief Constructs a container and copy from another * \param cap The capacity of the container * \param from Source of the copy * \return Ref-counted ArrayNode requested */ static ObjectPtr<ArrayNode> CopyFrom(int64_t cap, ArrayNode* from) { int64_t size = from->size_; ICHECK_GE(cap, size) << "ValueError: not enough capacity"; ObjectPtr<ArrayNode> p = ArrayNode::Empty(cap); ObjectRef* write = p->MutableBegin(); ObjectRef* read = from->MutableBegin(); // To ensure exception safety, size is only incremented after the initialization succeeds for (int64_t& i = p->size_ = 0; i < size; ++i) { new (write++) ObjectRef(*read++); } return p; } /*! * \brief Constructs a container and move from another * \param cap The capacity of the container * \param from Source of the move * \return Ref-counted ArrayNode requested */ static ObjectPtr<ArrayNode> MoveFrom(int64_t cap, ArrayNode* from) { int64_t size = from->size_; ICHECK_GE(cap, size) << "ValueError: not enough capacity"; ObjectPtr<ArrayNode> p = ArrayNode::Empty(cap); ObjectRef* write = p->MutableBegin(); ObjectRef* read = from->MutableBegin(); // To ensure exception safety, size is only incremented after the initialization succeeds for (int64_t& i = p->size_ = 0; i < size; ++i) { new (write++) ObjectRef(std::move(*read++)); } from->size_ = 0; return p; } /*! * \brief Constructs a container with n elements. Each element is a copy of val * \param n The size of the container * \param val The init value * \return Ref-counted ArrayNode requested */ static ObjectPtr<ArrayNode> CreateRepeated(int64_t n, const ObjectRef& val) { ObjectPtr<ArrayNode> p = ArrayNode::Empty(n); ObjectRef* itr = p->MutableBegin(); for (int64_t& i = p->size_ = 0; i < n; ++i) { new (itr++) ObjectRef(val); } return p; } static constexpr const uint32_t _type_index = TypeIndex::kRuntimeArray; static constexpr const char* _type_key = "Array"; TVM_DECLARE_FINAL_OBJECT_INFO(ArrayNode, Object); private: /*! \return Size of initialized memory, used by InplaceArrayBase. */ size_t GetSize() const { return this->size_; } /*! \return begin mutable iterator */ ObjectRef* MutableBegin() const { return static_cast<ObjectRef*>(InplaceArrayBase::AddressOf(0)); } /*! \return end mutable iterator */ ObjectRef* MutableEnd() const { return MutableBegin() + size_; } /*! * \brief Create an ArrayNode with the given capacity. * \param n Required capacity * \return Ref-counted ArrayNode requested */ static ObjectPtr<ArrayNode> Empty(int64_t n = kInitSize) { ICHECK_GE(n, 0); ObjectPtr<ArrayNode> p = make_inplace_array_object<ArrayNode, ObjectRef>(n); p->capacity_ = n; p->size_ = 0; return p; } /*! * \brief Inplace-initialize the elements starting idx from [first, last) * \param idx The starting point * \param first Begin of iterator * \param last End of iterator * \tparam IterType The type of iterator * \return Self */ template <typename IterType> ArrayNode* InitRange(int64_t idx, IterType first, IterType last) { ObjectRef* itr = MutableBegin() + idx; for (; first != last; ++first) { ObjectRef ref = *first; new (itr++) ObjectRef(std::move(ref)); } return this; } /*! * \brief Move elements from right to left, requires src_begin > dst * \param dst Destination * \param src_begin The start point of copy (inclusive) * \param src_end The end point of copy (exclusive) * \return Self */ ArrayNode* MoveElementsLeft(int64_t dst, int64_t src_begin, int64_t src_end) { ObjectRef* from = MutableBegin() + src_begin; ObjectRef* to = MutableBegin() + dst; while (src_begin++ != src_end) { *to++ = std::move(*from++); } return this; } /*! * \brief Move elements from left to right, requires src_begin < dst * \param dst Destination * \param src_begin The start point of move (inclusive) * \param src_end The end point of move (exclusive) * \return Self */ ArrayNode* MoveElementsRight(int64_t dst, int64_t src_begin, int64_t src_end) { ObjectRef* from = MutableBegin() + src_end; ObjectRef* to = MutableBegin() + (src_end - src_begin + dst); while (src_begin++ != src_end) { *--to = std::move(*--from); } return this; } /*! * \brief Enlarges the size of the array * \param delta Size enlarged, should be positive * \param val Default value * \return Self */ ArrayNode* EnlargeBy(int64_t delta, const ObjectRef& val = ObjectRef(nullptr)) { ObjectRef* itr = MutableEnd(); while (delta-- > 0) { new (itr++) ObjectRef(val); ++size_; } return this; } /*! * \brief Shrinks the size of the array * \param delta Size shrinked, should be positive * \return Self */ ArrayNode* ShrinkBy(int64_t delta) { ObjectRef* itr = MutableEnd(); while (delta-- > 0) { (--itr)->ObjectRef::~ObjectRef(); --size_; } return this; } /*! \brief Number of elements used */ int64_t size_; /*! \brief Number of elements allocated */ int64_t capacity_; /*! \brief Initial size of ArrayNode */ static constexpr int64_t kInitSize = 4; /*! \brief Expansion factor of the Array */ static constexpr int64_t kIncFactor = 2; // CRTP parent class friend InplaceArrayBase<ArrayNode, ObjectRef>; // Reference class template <typename, typename> friend class Array; // To specialize make_object<ArrayNode> friend ObjectPtr<ArrayNode> make_object<>(); }; /*! \brief Helper struct for type-checking * * is_valid_iterator<T,IterType>::value will be true if IterType can * be dereferenced into a type that can be stored in an Array<T>, and * false otherwise. */ template <typename T, typename IterType> struct is_valid_iterator : std::bool_constant<std::is_base_of_v< T, std::remove_cv_t<std::remove_reference_t<decltype(*std::declval<IterType>())>>>> {}; template <typename T, typename IterType> struct is_valid_iterator<Optional<T>, IterType> : is_valid_iterator<T, IterType> {}; template <typename T, typename IterType> inline constexpr bool is_valid_iterator_v = is_valid_iterator<T, IterType>::value; /*! * \brief Array, container representing a contiguous sequence of ObjectRefs. * * Array implements in-place copy-on-write semantics. * * As in typical copy-on-write, a method which would typically mutate the array * instead opaquely copies the underlying container, and then acts on its copy. * * If the array has reference count equal to one, we directly update the * container in place without copying. This is optimization is sound because * when the reference count is equal to one this reference is guranteed to be * the sole pointer to the container. * * * operator[] only provides const access, use Set to mutate the content. * \tparam T The content ObjectRef type. */ template <typename T, typename = typename std::enable_if<std::is_base_of<ObjectRef, T>::value>::type> class Array : public ObjectRef { public: using value_type = T; // constructors /*! * \brief default constructor */ Array() { data_ = ArrayNode::Empty(); } /*! * \brief move constructor * \param other source */ Array(Array<T>&& other) : ObjectRef() { // NOLINT(*) data_ = std::move(other.data_); } /*! * \brief copy constructor * \param other source */ Array(const Array<T>& other) : ObjectRef() { // NOLINT(*) data_ = other.data_; } /*! * \brief constructor from pointer * \param n the container pointer */ explicit Array(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief Constructor from iterator * \param first begin of iterator * \param last end of iterator * \tparam IterType The type of iterator */ template <typename IterType> Array(IterType first, IterType last) { static_assert(is_valid_iterator_v<T, IterType>, "IterType cannot be inserted into a tvm::Array<T>"); Assign(first, last); } /*! * \brief constructor from initializer list * \param init The initializer list */ Array(std::initializer_list<T> init) { // NOLINT(*) Assign(init.begin(), init.end()); } /*! * \brief constructor from vector * \param init The vector */ Array(const std::vector<T>& init) { // NOLINT(*) Assign(init.begin(), init.end()); } /*! * \brief Constructs a container with n elements. Each element is a copy of val * \param n The size of the container * \param val The init value */ explicit Array(const size_t n, const T& val) { data_ = ArrayNode::CreateRepeated(n, val); } /*! * \brief move assign operator * \param other The source of assignment * \return reference to self. */ Array<T>& operator=(Array<T>&& other) { data_ = std::move(other.data_); return *this; } /*! * \brief copy assign operator * \param other The source of assignment * \return reference to self. */ Array<T>& operator=(const Array<T>& other) { data_ = other.data_; return *this; } public: // iterators struct ValueConverter { using ResultType = T; static T convert(const ObjectRef& n) { return DowncastNoCheck<T>(n); } }; using iterator = IterAdapter<ValueConverter, const ObjectRef*>; using reverse_iterator = ReverseIterAdapter<ValueConverter, const ObjectRef*>; /*! \return begin iterator */ iterator begin() const { return iterator(GetArrayNode()->begin()); } /*! \return end iterator */ iterator end() const { return iterator(GetArrayNode()->end()); } /*! \return rbegin iterator */ reverse_iterator rbegin() const { // ArrayNode::end() is never nullptr return reverse_iterator(GetArrayNode()->end() - 1); } /*! \return rend iterator */ reverse_iterator rend() const { // ArrayNode::begin() is never nullptr return reverse_iterator(GetArrayNode()->begin() - 1); } public: // const methods in std::vector /*! * \brief Immutably read i-th element from array. * \param i The index * \return the i-th element. */ const T operator[](int64_t i) const { ArrayNode* p = GetArrayNode(); ICHECK(p != nullptr) << "ValueError: cannot index a null array"; ICHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size " << p->size_; return DowncastNoCheck<T>(*(p->begin() + i)); } /*! \return The size of the array */ size_t size() const { ArrayNode* p = GetArrayNode(); return p == nullptr ? 0 : GetArrayNode()->size_; } /*! \return The capacity of the array */ size_t capacity() const { ArrayNode* p = GetArrayNode(); return p == nullptr ? 0 : GetArrayNode()->capacity_; } /*! \return Whether array is empty */ bool empty() const { return size() == 0; } /*! \return The first element of the array */ const T front() const { ArrayNode* p = GetArrayNode(); ICHECK(p != nullptr) << "ValueError: cannot index a null array"; ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; return DowncastNoCheck<T>(*(p->begin())); } /*! \return The last element of the array */ const T back() const { ArrayNode* p = GetArrayNode(); ICHECK(p != nullptr) << "ValueError: cannot index a null array"; ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array"; return DowncastNoCheck<T>(*(p->end() - 1)); } public: // mutation in std::vector, implements copy-on-write /*! * \brief push a new item to the back of the list * \param item The item to be pushed. */ void push_back(const T& item) { ArrayNode* p = CopyOnWrite(1); p->EmplaceInit(p->size_++, item); } /*! * \brief Insert an element into the given position * \param position An iterator pointing to the insertion point * \param val The element to insert */ void insert(iterator position, const T& val) { ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; int64_t idx = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; auto addr = CopyOnWrite(1) // ->EnlargeBy(1) // ->MoveElementsRight(idx + 1, idx, size) // ->MutableBegin(); new (addr + idx) ObjectRef(val); } /*! * \brief Insert a range of elements into the given position * \param position An iterator pointing to the insertion point * \param first The begin iterator of the range * \param last The end iterator of the range */ template <typename IterType> void insert(iterator position, IterType first, IterType last) { static_assert(is_valid_iterator_v<T, IterType>, "IterType cannot be inserted into a tvm::Array<T>"); if (first == last) { return; } ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array"; int64_t idx = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; int64_t numel = std::distance(first, last); CopyOnWrite(numel) ->EnlargeBy(numel) ->MoveElementsRight(idx + numel, idx, size) ->InitRange(idx, first, last); } /*! \brief Remove the last item of the list */ void pop_back() { ICHECK(data_ != nullptr) << "ValueError: cannot pop_back because array is null"; int64_t size = GetArrayNode()->size_; ICHECK_GT(size, 0) << "ValueError: cannot pop_back because array is empty"; CopyOnWrite()->ShrinkBy(1); } /*! * \brief Erase an element on the given position * \param position An iterator pointing to the element to be erased */ void erase(iterator position) { ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; int64_t st = std::distance(begin(), position); int64_t size = GetArrayNode()->size_; ICHECK(0 <= st && st < size) << "ValueError: cannot erase at index " << st << ", because Array size is " << size; CopyOnWrite() // ->MoveElementsLeft(st, st + 1, size) // ->ShrinkBy(1); } /*! * \brief Erase a given range of elements * \param first The begin iterator of the range * \param last The end iterator of the range */ void erase(iterator first, iterator last) { if (first == last) { return; } ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array"; int64_t size = GetArrayNode()->size_; int64_t st = std::distance(begin(), first); int64_t ed = std::distance(begin(), last); ICHECK_LT(st, ed) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")"; ICHECK(0 <= st && st <= size && 0 <= ed && ed <= size) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")" << ", because array size is " << size; CopyOnWrite() // ->MoveElementsLeft(st, ed, size) // ->ShrinkBy(ed - st); } /*! * \brief Resize the array. * \param n The new size. */ void resize(int64_t n) { ICHECK_GE(n, 0) << "ValueError: cannot resize an Array to negative size"; if (data_ == nullptr) { SwitchContainer(n); return; } int64_t size = GetArrayNode()->size_; if (size < n) { CopyOnWrite(n - size)->EnlargeBy(n - size); } else if (size > n) { CopyOnWrite()->ShrinkBy(size - n); } } /*! * \brief Make sure the list has the capacity of at least n * \param n lower bound of the capacity */ void reserve(int64_t n) { if (data_ == nullptr || n > GetArrayNode()->capacity_) { SwitchContainer(n); } } /*! \brief Release reference to all the elements */ void clear() { if (data_ != nullptr) { ArrayNode* p = CopyOnWrite(); p->clear(); } } public: // Array's own methods /*! * \brief set i-th element of the array. * \param i The index * \param value The value to be setted. */ void Set(int64_t i, T value) { ArrayNode* p = this->CopyOnWrite(); ICHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size " << p->size_; *(p->MutableBegin() + i) = std::move(value); } /*! \return The underlying ArrayNode */ ArrayNode* GetArrayNode() const { return static_cast<ArrayNode*>(data_.get()); } /*! * \brief Helper function to apply a map function onto the array. * * \param fmap The transformation function T -> U. * * \tparam F The type of the mutation function. * * \tparam U The type of the returned array, inferred from the * return type of F. If overridden by the user, must be something * that is convertible from the return type of F. * * \note This function performs copy on write optimization. If * `fmap` returns an object of type `T`, and all elements of the * array are mapped to themselves, then the returned array will be * the same as the original, and reference counts of the elements in * the array will not be incremented. * * \return The transformed array. */ template <typename F, typename U = std::invoke_result_t<F, T>> Array<U> Map(F fmap) const { return Array<U>(MapHelper(data_, fmap)); } /*! * \brief Helper function to apply fmutate to mutate an array. * \param fmutate The transformation function T -> T. * \tparam F the type of the mutation function. * \note This function performs copy on write optimization. */ template <typename F, typename = std::enable_if_t<std::is_same_v<T, std::invoke_result_t<F, T>>>> void MutateByApply(F fmutate) { data_ = MapHelper(std::move(data_), fmutate); } /*! * \brief reset the array to content from iterator. * \param first begin of iterator * \param last end of iterator * \tparam IterType The type of iterator */ template <typename IterType> void Assign(IterType first, IterType last) { int64_t cap = std::distance(first, last); ICHECK_GE(cap, 0) << "ValueError: cannot construct an Array of negative size"; ArrayNode* p = GetArrayNode(); if (p != nullptr && data_.unique() && p->capacity_ >= cap) { // do not have to make new space p->clear(); } else { // create new space data_ = ArrayNode::Empty(cap); p = GetArrayNode(); } // To ensure exception safety, size is only incremented after the initialization succeeds ObjectRef* itr = p->MutableBegin(); for (int64_t& i = p->size_ = 0; i < cap; ++i, ++first, ++itr) { new (itr) ObjectRef(*first); } } /*! * \brief Copy on write semantics * Do nothing if current handle is the unique copy of the array. * Otherwise make a new copy of the array to ensure the current handle * hold a unique copy. * * \return Handle to the internal node container(which ganrantees to be unique) */ ArrayNode* CopyOnWrite() { if (data_ == nullptr) { return SwitchContainer(ArrayNode::kInitSize); } if (!data_.unique()) { return SwitchContainer(capacity()); } return static_cast<ArrayNode*>(data_.get()); } /*! \brief specify container node */ using ContainerType = ArrayNode; private: /*! * \brief Implement copy-on-write semantics, and ensures capacity is enough for extra elements. * \param reserve_extra Number of extra slots needed * \return ArrayNode pointer to the unique copy */ ArrayNode* CopyOnWrite(int64_t reserve_extra) { ArrayNode* p = GetArrayNode(); if (p == nullptr) { // necessary to get around the constexpr address issue before c++17 const int64_t kInitSize = ArrayNode::kInitSize; return SwitchContainer(std::max(kInitSize, reserve_extra)); } if (p->capacity_ >= p->size_ + reserve_extra) { return CopyOnWrite(); } int64_t cap = p->capacity_ * ArrayNode::kIncFactor; cap = std::max(cap, p->size_ + reserve_extra); return SwitchContainer(cap); } /*! * \brief Move or copy the ArrayNode to new address with the given capacity * \param capacity The capacity requirement of the new address */ ArrayNode* SwitchContainer(int64_t capacity) { if (data_ == nullptr) { data_ = ArrayNode::Empty(capacity); } else if (data_.unique()) { data_ = ArrayNode::MoveFrom(capacity, GetArrayNode()); } else { data_ = ArrayNode::CopyFrom(capacity, GetArrayNode()); } return static_cast<ArrayNode*>(data_.get()); } /*! \brief Helper method for mutate/map * * A helper function used internally by both `Array::Map` and * `Array::MutateInPlace`. Given an array of data, apply the * mapping function to each element, returning the collected array. * Applies both mutate-in-place and copy-on-write optimizations, if * possible. * * \param data A pointer to the ArrayNode containing input data. * Passed by value to allow for mutate-in-place optimizations. * * \param fmap The mapping function * * \tparam F The type of the mutation function. * * \tparam U The output type of the mutation function. Inferred * from the callable type given. Must inherit from ObjectRef. * * \return The mapped array. Depending on whether mutate-in-place * or copy-on-write optimizations were applicable, may be the same * underlying array as the `data` parameter. */ template <typename F, typename U = std::invoke_result_t<F, T>> static ObjectPtr<Object> MapHelper(ObjectPtr<Object> data, F fmap) { if (data == nullptr) { return nullptr; } ICHECK(data->IsInstance<ArrayNode>()); constexpr bool is_same_output_type = std::is_same_v<T, U>; if constexpr (is_same_output_type) { if (data.unique()) { // Mutate-in-place path. Only allowed if the output type U is // the same as type T, we have a mutable this*, and there are // no other shared copies of the array. auto arr = static_cast<ArrayNode*>(data.get()); for (auto it = arr->MutableBegin(); it != arr->MutableEnd(); it++) { T mapped = fmap(DowncastNoCheck<T>(std::move(*it))); *it = std::move(mapped); } return data; } } constexpr bool compatible_types = is_valid_iterator_v<T, U*> || is_valid_iterator_v<U, T*>; ObjectPtr<ArrayNode> output = nullptr; auto arr = static_cast<ArrayNode*>(data.get()); auto it = arr->begin(); if constexpr (compatible_types) { // Copy-on-write path, if the output Array<U> might be // represented by the same underlying array as the existing // Array<T>. Typically, this is for functions that map `T` to // `T`, but can also apply to functions that map `T` to // `Optional<T>`, or that map `T` to a subclass or superclass of // `T`. bool all_identical = true; for (; it != arr->end(); it++) { U mapped = fmap(DowncastNoCheck<T>(*it)); if (!mapped.same_as(*it)) { // At least one mapped element is different than the // original. Therefore, prepare the output array, // consisting of any previous elements that had mapped to // themselves (if any), and the element that didn't map to // itself. all_identical = false; output = ArrayNode::CreateRepeated(arr->size(), U()); output->InitRange(0, arr->begin(), it); output->SetItem(it - arr->begin(), std::move(mapped)); it++; break; } } if (all_identical) { return data; } } else { // Path for incompatible types. The constexpr check for // compatible types isn't strictly necessary, as the first // mapped.same_as(*it) would return false, but we might as well // avoid it altogether. output = ArrayNode::CreateRepeated(arr->size(), U()); } // Normal path for incompatible types, or post-copy path for // copy-on-write instances. // // If the types are incompatible, then at this point `output` is // empty, and `it` points to the first element of the input. // // If the types were compatible, then at this point `output` // contains zero or more elements that mapped to themselves // followed by the first element that does not map to itself, and // `it` points to the element just after the first element that // does not map to itself. Because at least one element has been // changed, we no longer have the opportunity to avoid a copy, so // we don't need to check the result. // // In both cases, `it` points to the next element to be processed, // so we can either start or resume the iteration from that point, // with no further checks on the result. for (; it != arr->end(); it++) { U mapped = fmap(DowncastNoCheck<T>(*it)); output->SetItem(it - arr->begin(), std::move(mapped)); } return output; } }; /*! * \brief Concat two Arrays. * \param lhs first Array to be concatenated. * \param rhs second Array to be concatenated. * \return The concatenated Array. Original Arrays are kept unchanged. */ template <typename T, typename = typename std::enable_if<std::is_base_of<ObjectRef, T>::value>::type> inline Array<T> Concat(Array<T> lhs, const Array<T>& rhs) { for (const auto& x : rhs) { lhs.push_back(x); } return std::move(lhs); } // Specialize make_object<ArrayNode> to make sure it is correct. template <> inline ObjectPtr<ArrayNode> make_object() { return ArrayNode::Empty(); } } // namespace runtime // expose the functions to the root namespace. using runtime::Array; using runtime::ArrayNode; } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_ARRAY_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/base.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/base.h * \brief Base utilities for common POD(plain old data) container types. */ #ifndef TVM_RUNTIME_CONTAINER_BASE_H_ #define TVM_RUNTIME_CONTAINER_BASE_H_ #include <dmlc/logging.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/memory.h> #include <tvm/runtime/object.h> #include <algorithm> #include <initializer_list> #include <utility> namespace tvm { namespace runtime { /*! \brief String-aware ObjectRef equal functor */ struct ObjectHash { /*! * \brief Calculate the hash code of an ObjectRef * \param a The given ObjectRef * \return Hash code of a, string hash for strings and pointer address otherwise. */ size_t operator()(const ObjectRef& a) const; }; /*! \brief String-aware ObjectRef hash functor */ struct ObjectEqual { /*! * \brief Check if the two ObjectRef are equal * \param a One ObjectRef * \param b The other ObjectRef * \return String equality if both are strings, pointer address equality otherwise. */ bool operator()(const ObjectRef& a, const ObjectRef& b) const; }; /*! * \brief Base template for classes with array like memory layout. * * It provides general methods to access the memory. The memory * layout is ArrayType + [ElemType]. The alignment of ArrayType * and ElemType is handled by the memory allocator. * * \tparam ArrayType The array header type, contains object specific metadata. * \tparam ElemType The type of objects stored in the array right after * ArrayType. * * \code * // Example usage of the template to define a simple array wrapper * class ArrayObj : public InplaceArrayBase<ArrayObj, Elem> { * public: * // Wrap EmplaceInit to initialize the elements * template <typename Iterator> * void Init(Iterator begin, Iterator end) { * size_t num_elems = std::distance(begin, end); * auto it = begin; * this->size = 0; * for (size_t i = 0; i < num_elems; ++i) { * InplaceArrayBase::EmplaceInit(i, *it++); * this->size++; * } * } * } * * void test_function() { * vector<Elem> fields; * auto ptr = make_inplace_array_object<ArrayObj, Elem>(fields.size()); * ptr->Init(fields.begin(), fields.end()); * * // Access the 0th element in the array. * assert(ptr->operator[](0) == fields[0]); * } * * \endcode */ template <typename ArrayType, typename ElemType> class InplaceArrayBase { public: /*! * \brief Access element at index * \param idx The index of the element. * \return Const reference to ElemType at the index. */ const ElemType& operator[](size_t idx) const { size_t size = Self()->GetSize(); ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; return *(reinterpret_cast<ElemType*>(AddressOf(idx))); } /*! * \brief Access element at index * \param idx The index of the element. * \return Reference to ElemType at the index. */ ElemType& operator[](size_t idx) { size_t size = Self()->GetSize(); ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n"; return *(reinterpret_cast<ElemType*>(AddressOf(idx))); } /*! * \brief Destroy the Inplace Array Base object */ ~InplaceArrayBase() { if (!(std::is_standard_layout<ElemType>::value && std::is_trivial<ElemType>::value)) { size_t size = Self()->GetSize(); for (size_t i = 0; i < size; ++i) { ElemType* fp = reinterpret_cast<ElemType*>(AddressOf(i)); fp->ElemType::~ElemType(); } } } protected: /*! * \brief Construct a value in place with the arguments. * * \tparam Args Type parameters of the arguments. * \param idx Index of the element. * \param args Arguments to construct the new value. * * \note Please make sure ArrayType::GetSize returns 0 before first call of * EmplaceInit, and increment GetSize by 1 each time EmplaceInit succeeds. */ template <typename... Args> void EmplaceInit(size_t idx, Args&&... args) { void* field_ptr = AddressOf(idx); new (field_ptr) ElemType(std::forward<Args>(args)...); } /*! * \brief Return the self object for the array. * * \return Pointer to ArrayType. */ inline ArrayType* Self() const { return static_cast<ArrayType*>(const_cast<InplaceArrayBase*>(this)); } /*! * \brief Return the raw pointer to the element at idx. * * \param idx The index of the element. * \return Raw pointer to the element. */ void* AddressOf(size_t idx) const { static_assert( alignof(ArrayType) % alignof(ElemType) == 0 && sizeof(ArrayType) % alignof(ElemType) == 0, "The size and alignment of ArrayType should respect " "ElemType's alignment."); size_t kDataStart = sizeof(ArrayType); ArrayType* self = Self(); char* data_start = reinterpret_cast<char*>(self) + kDataStart; return data_start + idx * sizeof(ElemType); } }; /*! * \brief iterator adapter that adapts TIter to return another type. * \tparam Converter a struct that contains converting function * \tparam TIter the content iterator type. */ template <typename Converter, typename TIter> class IterAdapter { public: using difference_type = typename std::iterator_traits<TIter>::difference_type; using value_type = typename Converter::ResultType; using pointer = typename Converter::ResultType*; using reference = typename Converter::ResultType&; using iterator_category = typename std::iterator_traits<TIter>::iterator_category; explicit IterAdapter(TIter iter) : iter_(iter) {} IterAdapter& operator++() { ++iter_; return *this; } IterAdapter& operator--() { --iter_; return *this; } IterAdapter operator++(int) { IterAdapter copy = *this; ++iter_; return copy; } IterAdapter operator--(int) { IterAdapter copy = *this; --iter_; return copy; } IterAdapter operator+(difference_type offset) const { return IterAdapter(iter_ + offset); } IterAdapter operator-(difference_type offset) const { return IterAdapter(iter_ - offset); } template <typename T = IterAdapter> typename std::enable_if<std::is_same<iterator_category, std::random_access_iterator_tag>::value, typename T::difference_type>::type inline operator-(const IterAdapter& rhs) const { return iter_ - rhs.iter_; } bool operator==(IterAdapter other) const { return iter_ == other.iter_; } bool operator!=(IterAdapter other) const { return !(*this == other); } const value_type operator*() const { return Converter::convert(*iter_); } private: TIter iter_; }; /*! * \brief iterator adapter that adapts TIter to return another type. * \tparam Converter a struct that contains converting function * \tparam TIter the content iterator type. */ template <typename Converter, typename TIter> class ReverseIterAdapter { public: using difference_type = typename std::iterator_traits<TIter>::difference_type; using value_type = typename Converter::ResultType; using pointer = typename Converter::ResultType*; using reference = typename Converter::ResultType&; // NOLINT(*) using iterator_category = typename std::iterator_traits<TIter>::iterator_category; explicit ReverseIterAdapter(TIter iter) : iter_(iter) {} ReverseIterAdapter& operator++() { --iter_; return *this; } ReverseIterAdapter& operator--() { ++iter_; return *this; } ReverseIterAdapter operator++(int) { ReverseIterAdapter copy = *this; --iter_; return copy; } ReverseIterAdapter operator--(int) { ReverseIterAdapter copy = *this; ++iter_; return copy; } ReverseIterAdapter operator+(difference_type offset) const { return ReverseIterAdapter(iter_ - offset); } template <typename T = ReverseIterAdapter> typename std::enable_if<std::is_same<iterator_category, std::random_access_iterator_tag>::value, typename T::difference_type>::type inline operator-(const ReverseIterAdapter& rhs) const { return rhs.iter_ - iter_; } bool operator==(ReverseIterAdapter other) const { return iter_ == other.iter_; } bool operator!=(ReverseIterAdapter other) const { return !(*this == other); } const value_type operator*() const { return Converter::convert(*iter_); } private: TIter iter_; }; } // namespace runtime // expose the functions to the root namespace. using runtime::Downcast; using runtime::IterAdapter; using runtime::make_object; using runtime::Object; using runtime::ObjectEqual; using runtime::ObjectHash; using runtime::ObjectPtr; using runtime::ObjectPtrEqual; using runtime::ObjectPtrHash; using runtime::ObjectRef; } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_BASE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/closure.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/closure.h * \brief Runtime Closure container types. */ #ifndef TVM_RUNTIME_CONTAINER_CLOSURE_H_ #define TVM_RUNTIME_CONTAINER_CLOSURE_H_ #include "./base.h" namespace tvm { namespace runtime { /*! * \brief An object representing a closure. This object is used by both the * Relay VM and interpreter. */ class ClosureObj : public Object { public: static constexpr const uint32_t _type_index = TypeIndex::kRuntimeClosure; static constexpr const char* _type_key = "runtime.Closure"; TVM_DECLARE_BASE_OBJECT_INFO(ClosureObj, Object); }; /*! \brief reference to closure. */ class Closure : public ObjectRef { public: TVM_DEFINE_OBJECT_REF_METHODS(Closure, ObjectRef, ClosureObj); }; } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_CLOSURE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/map.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/map.h * \brief Runtime Map container types. */ #ifndef TVM_RUNTIME_CONTAINER_MAP_H_ #define TVM_RUNTIME_CONTAINER_MAP_H_ #ifndef USE_FALLBACK_STL_MAP #define USE_FALLBACK_STL_MAP 0 #endif #include <algorithm> #include <unordered_map> #include <utility> #include "./base.h" #include "./optional.h" namespace tvm { namespace runtime { #if TVM_LOG_DEBUG #define TVM_MAP_FAIL_IF_CHANGED() \ ICHECK(state_marker == self->state_marker) << "Concurrent modification of the Map"; #else #define TVM_MAP_FAIL_IF_CHANGED() #endif // TVM_LOG_DEBUG #if (USE_FALLBACK_STL_MAP != 0) /*! \brief Shared content of all specializations of hash map */ class MapNode : public Object { public: /*! \brief Type of the keys in the hash map */ using key_type = ObjectRef; /*! \brief Type of the values in the hash map */ using mapped_type = ObjectRef; /*! \brief Type of the actual underlying container */ using ContainerType = std::unordered_map<ObjectRef, ObjectRef, ObjectHash, ObjectEqual>; /*! \brief Iterator class */ using iterator = ContainerType::iterator; /*! \brief Iterator class */ using const_iterator = ContainerType::const_iterator; /*! \brief Type of value stored in the hash map */ using KVType = ContainerType::value_type; static_assert(std::is_standard_layout<KVType>::value, "KVType is not standard layout"); static_assert(sizeof(KVType) == 16 || sizeof(KVType) == 8, "sizeof(KVType) incorrect"); static constexpr const uint32_t _type_index = runtime::TypeIndex::kRuntimeMap; static constexpr const char* _type_key = "Map"; TVM_DECLARE_FINAL_OBJECT_INFO(MapNode, Object); /*! * \brief Number of elements in the SmallMapNode * \return The result */ size_t size() const { return data_.size(); } /*! * \brief Count the number of times a key exists in the hash map * \param key The indexing key * \return The result, 0 or 1 */ size_t count(const key_type& key) const { return data_.count(key); } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The const reference to the value */ const mapped_type& at(const key_type& key) const { return data_.at(key); } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The mutable reference to the value */ mapped_type& at(const key_type& key) { return data_.at(key); } /*! \return begin iterator */ iterator begin() { return data_.begin(); } /*! \return const begin iterator */ const_iterator begin() const { return data_.begin(); } /*! \return end iterator */ iterator end() { return data_.end(); } /*! \return end iterator */ const_iterator end() const { return data_.end(); } /*! * \brief Index value associated with a key * \param key The indexing key * \return The iterator of the entry associated with the key, end iterator if not exists */ const_iterator find(const key_type& key) const { return data_.find(key); } /*! * \brief Index value associated with a key * \param key The indexing key * \return The iterator of the entry associated with the key, end iterator if not exists */ iterator find(const key_type& key) { return data_.find(key); } /*! * \brief Erase the entry associated with the iterator * \param position The iterator */ void erase(const iterator& position) { data_.erase(position); } /*! * \brief Erase the entry associated with the key, do nothing if not exists * \param key The indexing key */ void erase(const key_type& key) { data_.erase(key); } /*! * \brief Create an empty container * \return The object created */ static ObjectPtr<MapNode> Empty() { return make_object<MapNode>(); } protected: /*! * \brief Create the map using contents from the given iterators. * \param first Begin of iterator * \param last End of iterator * \tparam IterType The type of iterator * \return ObjectPtr to the map created */ template <typename IterType> static ObjectPtr<Object> CreateFromRange(IterType first, IterType last) { ObjectPtr<MapNode> p = make_object<MapNode>(); p->data_ = ContainerType(first, last); return p; } /*! * \brief InsertMaybeReHash an entry into the given hash map * \param kv The entry to be inserted * \param map The pointer to the map, can be changed if re-hashing happens */ static void InsertMaybeReHash(const KVType& kv, ObjectPtr<Object>* map) { MapNode* map_node = static_cast<MapNode*>(map->get()); map_node->data_[kv.first] = kv.second; } /*! * \brief Create an empty container with elements copying from another MapNode * \param from The source container * \return The object created */ static ObjectPtr<MapNode> CopyFrom(MapNode* from) { ObjectPtr<MapNode> p = make_object<MapNode>(); p->data_ = ContainerType(from->data_.begin(), from->data_.end()); return p; } /*! \brief The real container storing data */ ContainerType data_; template <typename, typename, typename, typename> friend class Map; }; #else /*! \brief Shared content of all specializations of hash map */ class MapNode : public Object { public: /*! \brief Type of the keys in the hash map */ using key_type = ObjectRef; /*! \brief Type of the values in the hash map */ using mapped_type = ObjectRef; /*! \brief Type of value stored in the hash map */ using KVType = std::pair<ObjectRef, ObjectRef>; /*! \brief Iterator class */ class iterator; static_assert(std::is_standard_layout<KVType>::value, "KVType is not standard layout"); static_assert(sizeof(KVType) == 16 || sizeof(KVType) == 8, "sizeof(KVType) incorrect"); static constexpr const uint32_t _type_index = runtime::TypeIndex::kRuntimeMap; static constexpr const char* _type_key = "Map"; TVM_DECLARE_FINAL_OBJECT_INFO(MapNode, Object); /*! * \brief Number of elements in the SmallMapNode * \return The result */ size_t size() const { return size_; } /*! * \brief Count the number of times a key exists in the hash map * \param key The indexing key * \return The result, 0 or 1 */ size_t count(const key_type& key) const; /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The const reference to the value */ const mapped_type& at(const key_type& key) const; /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The mutable reference to the value */ mapped_type& at(const key_type& key); /*! \return begin iterator */ iterator begin() const; /*! \return end iterator */ iterator end() const; /*! * \brief Index value associated with a key * \param key The indexing key * \return The iterator of the entry associated with the key, end iterator if not exists */ iterator find(const key_type& key) const; /*! * \brief Erase the entry associated with the iterator * \param position The iterator */ void erase(const iterator& position); /*! * \brief Erase the entry associated with the key, do nothing if not exists * \param key The indexing key */ void erase(const key_type& key) { erase(find(key)); } class iterator { public: using iterator_category = std::forward_iterator_tag; using difference_type = int64_t; using value_type = KVType; using pointer = KVType*; using reference = KVType&; /*! \brief Default constructor */ #if TVM_LOG_DEBUG iterator() : state_marker(0), index(0), self(nullptr) {} #else iterator() : index(0), self(nullptr) {} #endif // TVM_LOG_DEBUG /*! \brief Compare iterators */ bool operator==(const iterator& other) const { TVM_MAP_FAIL_IF_CHANGED() return index == other.index && self == other.self; } /*! \brief Compare iterators */ bool operator!=(const iterator& other) const { return !(*this == other); } /*! \brief De-reference iterators */ pointer operator->() const; /*! \brief De-reference iterators */ reference operator*() const { TVM_MAP_FAIL_IF_CHANGED() return *((*this).operator->()); } /*! \brief Prefix self increment, e.g. ++iter */ iterator& operator++(); /*! \brief Prefix self decrement, e.g. --iter */ iterator& operator--(); /*! \brief Suffix self increment */ iterator operator++(int) { TVM_MAP_FAIL_IF_CHANGED() iterator copy = *this; ++(*this); return copy; } /*! \brief Suffix self decrement */ iterator operator--(int) { TVM_MAP_FAIL_IF_CHANGED() iterator copy = *this; --(*this); return copy; } protected: #if TVM_LOG_DEBUG uint64_t state_marker; /*! \brief Construct by value */ iterator(uint64_t index, const MapNode* self) : state_marker(self->state_marker), index(index), self(self) {} #else iterator(uint64_t index, const MapNode* self) : index(index), self(self) {} #endif // TVM_LOG_DEBUG /*! \brief The position on the array */ uint64_t index; /*! \brief The container it points to */ const MapNode* self; friend class DenseMapNode; friend class SmallMapNode; }; /*! * \brief Create an empty container * \return The object created */ static inline ObjectPtr<MapNode> Empty(); protected: #if TVM_LOG_DEBUG uint64_t state_marker; #endif // TVM_LOG_DEBUG /*! * \brief Create the map using contents from the given iterators. * \param first Begin of iterator * \param last End of iterator * \tparam IterType The type of iterator * \return ObjectPtr to the map created */ template <typename IterType> static inline ObjectPtr<Object> CreateFromRange(IterType first, IterType last); /*! * \brief InsertMaybeReHash an entry into the given hash map * \param kv The entry to be inserted * \param map The pointer to the map, can be changed if re-hashing happens */ static inline void InsertMaybeReHash(const KVType& kv, ObjectPtr<Object>* map); /*! * \brief Create an empty container with elements copying from another SmallMapNode * \param from The source container * \return The object created */ static inline ObjectPtr<MapNode> CopyFrom(MapNode* from); /*! \brief number of slots minus 1 */ uint64_t slots_; /*! \brief number of entries in the container */ uint64_t size_; // Reference class template <typename, typename, typename, typename> friend class Map; }; /*! \brief A specialization of small-sized hash map */ class SmallMapNode : public MapNode, public runtime::InplaceArrayBase<SmallMapNode, MapNode::KVType> { private: static constexpr uint64_t kInitSize = 2; static constexpr uint64_t kMaxSize = 4; public: using MapNode::iterator; using MapNode::KVType; /*! \brief Defaults to the destructor of InplaceArrayBase */ ~SmallMapNode() = default; /*! * \brief Count the number of times a key exists in the SmallMapNode * \param key The indexing key * \return The result, 0 or 1 */ size_t count(const key_type& key) const { return find(key).index < size_; } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The const reference to the value */ const mapped_type& at(const key_type& key) const { iterator itr = find(key); ICHECK(itr.index < size_) << "IndexError: key is not in Map"; return itr->second; } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The mutable reference to the value */ mapped_type& at(const key_type& key) { iterator itr = find(key); ICHECK(itr.index < size_) << "IndexError: key is not in Map"; return itr->second; } /*! \return begin iterator */ iterator begin() const { return iterator(0, this); } /*! \return end iterator */ iterator end() const { return iterator(size_, this); } /*! * \brief Index value associated with a key * \param key The indexing key * \return The iterator of the entry associated with the key, end iterator if not exists */ iterator find(const key_type& key) const { KVType* ptr = static_cast<KVType*>(AddressOf(0)); for (uint64_t i = 0; i < size_; ++i, ++ptr) { if (ObjectEqual()(ptr->first, key)) { return iterator(i, this); } } return iterator(size_, this); } /*! * \brief Erase the entry associated with the iterator * \param position The iterator */ void erase(const iterator& position) { Erase(position.index); } private: /*! * \brief Remove a position in SmallMapNode * \param index The position to be removed */ void Erase(const uint64_t index) { if (index >= size_) { return; } KVType* begin = static_cast<KVType*>(AddressOf(0)); KVType* last = begin + (size_ - 1); if (index + 1 == size_) { last->first.ObjectRef::~ObjectRef(); last->second.ObjectRef::~ObjectRef(); } else { *(begin + index) = std::move(*last); } size_ -= 1; } /*! * \brief Create an empty container * \param n Number of empty slots * \return The object created */ static ObjectPtr<SmallMapNode> Empty(uint64_t n = kInitSize) { using ::tvm::runtime::make_inplace_array_object; ObjectPtr<SmallMapNode> p = make_inplace_array_object<SmallMapNode, KVType>(n); p->size_ = 0; p->slots_ = n; return p; } /*! * \brief Create an empty container initialized with a given range * \param n Number of empty slots * \param first begin of iterator * \param last end of iterator * \tparam IterType The type of iterator * \return The object created */ template <typename IterType> static ObjectPtr<SmallMapNode> CreateFromRange(uint64_t n, IterType first, IterType last) { ObjectPtr<SmallMapNode> p = Empty(n); KVType* ptr = static_cast<KVType*>(p->AddressOf(0)); for (; first != last; ++first, ++p->size_) { new (ptr++) KVType(*first); } return p; } /*! * \brief Create an empty container with elements copying from another SmallMapNode * \param from The source container * \return The object created */ static ObjectPtr<SmallMapNode> CopyFrom(SmallMapNode* from) { KVType* first = static_cast<KVType*>(from->AddressOf(0)); KVType* last = first + from->size_; return CreateFromRange(from->size_, first, last); } /*! * \brief InsertMaybeReHash an entry into the given hash map * \param kv The entry to be inserted * \param map The pointer to the map, can be changed if re-hashing happens */ static void InsertMaybeReHash(const KVType& kv, ObjectPtr<Object>* map) { SmallMapNode* map_node = static_cast<SmallMapNode*>(map->get()); iterator itr = map_node->find(kv.first); if (itr.index < map_node->size_) { itr->second = kv.second; return; } if (map_node->size_ < map_node->slots_) { KVType* ptr = static_cast<KVType*>(map_node->AddressOf(map_node->size_)); new (ptr) KVType(kv); ++map_node->size_; return; } uint64_t next_size = std::max(map_node->slots_ * 2, uint64_t(kInitSize)); next_size = std::min(next_size, uint64_t(kMaxSize)); ICHECK_GT(next_size, map_node->slots_); ObjectPtr<Object> new_map = CreateFromRange(next_size, map_node->begin(), map_node->end()); InsertMaybeReHash(kv, &new_map); *map = std::move(new_map); } /*! * \brief Increment the pointer * \param index The pointer to be incremented * \return The increased pointer */ uint64_t IncItr(uint64_t index) const { return index + 1 < size_ ? index + 1 : size_; } /*! * \brief Decrement the pointer * \param index The pointer to be decremented * \return The decreased pointer */ uint64_t DecItr(uint64_t index) const { return index > 0 ? index - 1 : size_; } /*! * \brief De-reference the pointer * \param index The pointer to be dereferenced * \return The result */ KVType* DeRefItr(uint64_t index) const { return static_cast<KVType*>(AddressOf(index)); } /*! \brief A size function used by InplaceArrayBase */ uint64_t GetSize() const { return size_; } protected: friend class MapNode; friend class DenseMapNode; friend class runtime::InplaceArrayBase<SmallMapNode, MapNode::KVType>; }; /*! \brief A specialization of hash map that implements the idea of array-based hash map. * Another reference implementation can be found [1]. * * A. Overview * * DenseMapNode did several improvements over traditional separate chaining hash, * in terms of cache locality, memory footprints and data organization. * * A1. Implicit linked list. For better cache locality, instead of using linked list * explicitly for each bucket, we store list data into a single array that spans contiguously * in memory, and then carefully design access patterns to make sure most of them fall into * a single cache line. * * A2. 1-byte metadata. There is only 1 byte overhead for each slot in the array to indexing and * traversal. This can be divided in 3 parts. * 1) Reserved code: (0b11111111)_2 indicates a slot is empty; (0b11111110)_2 indicates protected, * which means the slot is empty but not allowed to be written. * 2) If not empty or protected, the highest bit is used to indicate whether data in the slot is * head of a linked list. * 3) The rest 7 bits are used as the "next pointer" (i.e. pointer to the next element). On 64-bit * architecture, an ordinary pointer can take up to 8 bytes, which is not acceptable overhead when * dealing with 16-byte ObjectRef pairs. Based on a commonly noticed fact that the lists are * relatively short (length <= 3) in hash maps, we follow [1]'s idea that only allows the pointer to * be one of the 126 possible values, i.e. if the next element of i-th slot is (i + x)-th element, * then x must be one of the 126 pre-defined values. * * A3. Data blocking. We organize the array in the way that every 16 elements forms a data block. * The 16-byte metadata of those 16 elements are stored together, followed by the real data, i.e. * 16 key-value pairs. * * B. Implementation details * * B1. Power-of-2 table size and Fibonacci Hashing. We use power-of-two as table size to avoid * modulo for more efficient arithmetics. To make the hash-to-slot mapping distribute more evenly, * we use the Fibonacci Hashing [2] trick. * * B2. Traverse a linked list in the array. * 1) List head. Assume Fibonacci Hashing maps a given key to slot i, if metadata at slot i * indicates that it is list head, then we found the head; otherwise the list is empty. No probing * is done in this procedure. 2) Next element. To find the next element of a non-empty slot i, we * look at the last 7 bits of the metadata at slot i. If they are all zeros, then it is the end of * list; otherwise, we know that the next element is (i + candidates[the-last-7-bits]). * * B3. InsertMaybeReHash an element. Following B2, we first traverse the linked list to see if this * element is in the linked list, and if not, we put it at the end by probing the next empty * position in one of the 126 candidate positions. If the linked list does not even exist, but the * slot for list head has been occupied by another linked list, we should find this intruder another * place. * * B4. Quadratic probing with triangle numbers. In open address hashing, it is provable that probing * with triangle numbers can traverse power-of-2-sized table [3]. In our algorithm, we follow the * suggestion in [1] that also use triangle numbers for "next pointer" as well as sparing for list * head. * * [1] https://github.com/skarupke/flat_hash_map * [2] https://programmingpraxis.com/2018/06/19/fibonacci-hash/ * [3] https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/ */ class DenseMapNode : public MapNode { private: /*! \brief The number of elements in a memory block */ static constexpr int kBlockCap = 16; /*! \brief Maximum load factor of the hash map */ static constexpr double kMaxLoadFactor = 0.99; /*! \brief Binary representation of the metadata of an empty slot */ static constexpr uint8_t kEmptySlot = uint8_t(0b11111111); /*! \brief Binary representation of the metadata of a protected slot */ static constexpr uint8_t kProtectedSlot = uint8_t(0b11111110); /*! \brief Number of probing choices available */ static constexpr int kNumJumpDists = 126; /*! \brief Head of the implicit linked list */ struct ListNode; /*! \brief POD type of a block of memory */ struct Block { uint8_t bytes[kBlockCap + kBlockCap * sizeof(KVType)]; }; static_assert(sizeof(Block) == kBlockCap * (sizeof(KVType) + 1), "sizeof(Block) incorrect"); static_assert(std::is_standard_layout<Block>::value, "Block is not standard layout"); public: using MapNode::iterator; /*! * \brief Destroy the DenseMapNode */ ~DenseMapNode() { this->Reset(); } /*! \return The number of elements of the key */ size_t count(const key_type& key) const { return !Search(key).IsNone(); } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The const reference to the value */ const mapped_type& at(const key_type& key) const { return At(key); } /*! * \brief Index value associated with a key, throw exception if the key does not exist * \param key The indexing key * \return The mutable reference to the value */ mapped_type& at(const key_type& key) { return At(key); } /*! * \brief Index value associated with a key * \param key The indexing key * \return The iterator of the entry associated with the key, end iterator if not exists */ iterator find(const key_type& key) const { ListNode node = Search(key); return node.IsNone() ? end() : iterator(node.index, this); } /*! * \brief Erase the entry associated with the iterator * \param position The iterator */ void erase(const iterator& position) { uint64_t index = position.index; if (position.self != nullptr && index <= this->slots_) { Erase(ListNode(index, this)); } } /*! \return begin iterator */ iterator begin() const { if (slots_ == 0) { return iterator(0, this); } for (uint64_t index = 0; index <= slots_; ++index) { if (!ListNode(index, this).IsEmpty()) { return iterator(index, this); } } return iterator(slots_ + 1, this); } /*! \return end iterator */ iterator end() const { return slots_ == 0 ? iterator(0, this) : iterator(slots_ + 1, this); } private: /*! * \brief Search for the given key * \param key The key * \return ListNode that associated with the key */ ListNode Search(const key_type& key) const { if (this->size_ == 0) { return ListNode(); } for (ListNode iter = GetListHead(ObjectHash()(key)); !iter.IsNone(); iter.MoveToNext(this)) { if (ObjectEqual()(key, iter.Key())) { return iter; } } return ListNode(); } /*! * \brief Search for the given key, throw exception if not exists * \param key The key * \return ListNode that associated with the key */ mapped_type& At(const key_type& key) const { ListNode iter = Search(key); ICHECK(!iter.IsNone()) << "IndexError: key is not in Map"; return iter.Val(); } /*! * \brief Try to insert a key, or do nothing if already exists * \param key The indexing key * \param result The linked-list entry found or just constructed * \return A boolean, indicating if actual insertion happens */ bool TryInsert(const key_type& key, ListNode* result) { if (slots_ == 0) { return false; } // required that `iter` to be the head of a linked list through which we can iterator ListNode iter = IndexFromHash(ObjectHash()(key)); // `iter` can be: 1) empty; 2) body of an irrelevant list; 3) head of the relevant list // Case 1: empty if (iter.IsEmpty()) { iter.NewHead(KVType(key, ObjectRef(nullptr))); this->size_ += 1; *result = iter; return true; } // Case 2: body of an irrelevant list if (!iter.IsHead()) { // we move the elements around and construct the single-element linked list return IsFull() ? false : TrySpareListHead(iter, key, result); } // Case 3: head of the relevant list // we iterate through the linked list until the end // make sure `iter` is the previous element of `next` ListNode next = iter; do { // find equal item, do not insert if (ObjectEqual()(key, next.Key())) { *result = next; return true; } // make sure `iter` is the previous element of `next` iter = next; } while (next.MoveToNext(this)); // `iter` is the tail of the linked list // always check capacity before insertion if (IsFull()) { return false; } // find the next empty slot uint8_t jump; if (!iter.GetNextEmpty(this, &jump, result)) { return false; } result->NewTail(KVType(key, ObjectRef(nullptr))); // link `iter` to `empty`, and move forward iter.SetJump(jump); this->size_ += 1; return true; } /*! * \brief Spare an entry to be the head of a linked list. * As described in B3, during insertion, it is possible that the entire linked list does not * exist, but the slot of its head has been occupied by other linked lists. In this case, we need * to spare the slot by moving away the elements to another valid empty one to make insertion * possible. * \param target The given entry to be spared * \param key The indexing key * \param result The linked-list entry constructed as the head * \return A boolean, if actual insertion happens */ bool TrySpareListHead(ListNode target, const key_type& key, ListNode* result) { // `target` is not the head of the linked list // move the original item of `target` (if any) // and construct new item on the position `target` // To make `target` empty, we // 1) find `w` the previous element of `target` in the linked list // 2) copy the linked list starting from `r = target` // 3) paste them after `w` // read from the linked list after `r` ListNode r = target; // write to the tail of `w` ListNode w = target.FindPrev(this); // after `target` is moved, we disallow writing to the slot bool is_first = true; uint8_t r_meta, jump; ListNode empty; do { // `jump` describes how `w` is jumped to `empty` // rehash if there is no empty space after `w` if (!w.GetNextEmpty(this, &jump, &empty)) { return false; } // move `r` to `empty` empty.NewTail(std::move(r.Data())); // clear the metadata of `r` r_meta = r.Meta(); if (is_first) { is_first = false; r.SetProtected(); } else { r.SetEmpty(); } // link `w` to `empty`, and move forward w.SetJump(jump); w = empty; // move `r` forward as well } while (r.MoveToNext(this, r_meta)); // finally we have done moving the linked list // fill data_ into `target` target.NewHead(KVType(key, ObjectRef(nullptr))); this->size_ += 1; *result = target; return true; } /*! * \brief Remove a ListNode * \param iter The node to be removed */ void Erase(const ListNode& iter) { this->size_ -= 1; if (!iter.HasNext()) { // `iter` is the last if (!iter.IsHead()) { // cut the link if there is any iter.FindPrev(this).SetJump(0); } iter.Data().KVType::~KVType(); iter.SetEmpty(); } else { ListNode last = iter, prev = iter; for (last.MoveToNext(this); last.HasNext(); prev = last, last.MoveToNext(this)) { } iter.Data() = std::move(last.Data()); last.SetEmpty(); prev.SetJump(0); } } /*! \brief Clear the container to empty, release all entries and memory acquired */ void Reset() { uint64_t n_blocks = CalcNumBlocks(this->slots_); for (uint64_t bi = 0; bi < n_blocks; ++bi) { uint8_t* meta_ptr = data_[bi].bytes; KVType* data_ptr = reinterpret_cast<KVType*>(data_[bi].bytes + kBlockCap); for (int j = 0; j < kBlockCap; ++j, ++meta_ptr, ++data_ptr) { uint8_t& meta = *meta_ptr; if (meta != uint8_t(kProtectedSlot) && meta != uint8_t(kEmptySlot)) { meta = uint8_t(kEmptySlot); data_ptr->KVType::~KVType(); } } } ReleaseMemory(); } /*! \brief Release the memory acquired by the container without deleting its entries stored inside */ void ReleaseMemory() { delete[] data_; data_ = nullptr; slots_ = 0; size_ = 0; fib_shift_ = 63; } /*! * \brief Create an empty container * \param fib_shift The fib shift provided * \param n_slots Number of slots required, should be power-of-two * \return The object created */ static ObjectPtr<DenseMapNode> Empty(uint32_t fib_shift, uint64_t n_slots) { ICHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize)); ObjectPtr<DenseMapNode> p = make_object<DenseMapNode>(); uint64_t n_blocks = CalcNumBlocks(n_slots - 1); Block* block = p->data_ = new Block[n_blocks]; p->slots_ = n_slots - 1; p->size_ = 0; p->fib_shift_ = fib_shift; for (uint64_t i = 0; i < n_blocks; ++i, ++block) { std::fill(block->bytes, block->bytes + kBlockCap, uint8_t(kEmptySlot)); } return p; } /*! * \brief Create an empty container with elements copying from another DenseMapNode * \param from The source container * \return The object created */ static ObjectPtr<DenseMapNode> CopyFrom(DenseMapNode* from) { ObjectPtr<DenseMapNode> p = make_object<DenseMapNode>(); uint64_t n_blocks = CalcNumBlocks(from->slots_); p->data_ = new Block[n_blocks]; p->slots_ = from->slots_; p->size_ = from->size_; p->fib_shift_ = from->fib_shift_; for (uint64_t bi = 0; bi < n_blocks; ++bi) { uint8_t* meta_ptr_from = from->data_[bi].bytes; KVType* data_ptr_from = reinterpret_cast<KVType*>(from->data_[bi].bytes + kBlockCap); uint8_t* meta_ptr_to = p->data_[bi].bytes; KVType* data_ptr_to = reinterpret_cast<KVType*>(p->data_[bi].bytes + kBlockCap); for (int j = 0; j < kBlockCap; ++j, ++meta_ptr_from, ++data_ptr_from, ++meta_ptr_to, ++data_ptr_to) { uint8_t& meta = *meta_ptr_to = *meta_ptr_from; ICHECK(meta != kProtectedSlot); if (meta != uint8_t(kEmptySlot)) { new (data_ptr_to) KVType(*data_ptr_from); } } } return p; } /*! * \brief InsertMaybeReHash an entry into the given hash map * \param kv The entry to be inserted * \param map The pointer to the map, can be changed if re-hashing happens */ static void InsertMaybeReHash(const KVType& kv, ObjectPtr<Object>* map) { DenseMapNode* map_node = static_cast<DenseMapNode*>(map->get()); ListNode iter; // Try to insert. If succeed, we simply return if (map_node->TryInsert(kv.first, &iter)) { iter.Val() = kv.second; return; } ICHECK_GT(map_node->slots_, uint64_t(SmallMapNode::kMaxSize)); // Otherwise, start rehash ObjectPtr<Object> p = Empty(map_node->fib_shift_ - 1, map_node->slots_ * 2 + 2); // Insert the given `kv` into the new hash map InsertMaybeReHash(kv, &p); uint64_t n_blocks = CalcNumBlocks(map_node->slots_); // Then Insert data from the original block. for (uint64_t bi = 0; bi < n_blocks; ++bi) { uint8_t* meta_ptr = map_node->data_[bi].bytes; KVType* data_ptr = reinterpret_cast<KVType*>(map_node->data_[bi].bytes + kBlockCap); for (int j = 0; j < kBlockCap; ++j, ++meta_ptr, ++data_ptr) { uint8_t& meta = *meta_ptr; if (meta != uint8_t(kProtectedSlot) && meta != uint8_t(kEmptySlot)) { meta = uint8_t(kEmptySlot); KVType kv = std::move(*data_ptr); InsertMaybeReHash(kv, &p); } } } map_node->ReleaseMemory(); *map = p; } /*! * \brief Check whether the hash table is full * \return A boolean indicating whether hash table is full */ bool IsFull() const { return size_ + 1 > (slots_ + 1) * kMaxLoadFactor; } /*! * \brief Increment the pointer * \param index The pointer to be incremented * \return The increased pointer */ uint64_t IncItr(uint64_t index) const { for (++index; index <= slots_; ++index) { if (!ListNode(index, this).IsEmpty()) { return index; } } return slots_ + 1; } /*! * \brief Decrement the pointer * \param index The pointer to be decremented * \return The decreased pointer */ uint64_t DecItr(uint64_t index) const { while (index != 0) { index -= 1; if (!ListNode(index, this).IsEmpty()) { return index; } } return slots_ + 1; } /*! * \brief De-reference the pointer * \param index The pointer to be dereferenced * \return The result */ KVType* DeRefItr(uint64_t index) const { return &ListNode(index, this).Data(); } /*! \brief Construct from hash code */ ListNode IndexFromHash(uint64_t hash_value) const { return ListNode(FibHash(hash_value, fib_shift_), this); } /*! \brief Construct from hash code if the position is head of list */ ListNode GetListHead(uint64_t hash_value) const { ListNode node = IndexFromHash(hash_value); return node.IsHead() ? node : ListNode(); } /*! \brief Construct the number of blocks in the hash table */ static uint64_t CalcNumBlocks(uint64_t n_slots_m1) { uint64_t n_slots = n_slots_m1 > 0 ? n_slots_m1 + 1 : 0; return (n_slots + kBlockCap - 1) / kBlockCap; } /*! * \brief Calculate the power-of-2 table size given the lower-bound of required capacity. * \param cap The lower-bound of the required capacity * \param fib_shift The result shift for Fibonacci Hashing * \param n_slots The result number of slots */ static void CalcTableSize(uint64_t cap, uint32_t* fib_shift, uint64_t* n_slots) { uint32_t shift = 64; uint64_t slots = 1; for (uint64_t c = cap; c; c >>= 1) { shift -= 1; slots <<= 1; } ICHECK_GT(slots, cap); if (slots < cap * 2) { *fib_shift = shift - 1; *n_slots = slots << 1; } else { *fib_shift = shift; *n_slots = slots; } } /*! * \brief Fibonacci Hashing, maps a hash code to an index in a power-of-2-sized table. * See also: https://programmingpraxis.com/2018/06/19/fibonacci-hash/. * \param hash_value The raw hash value * \param fib_shift The shift in Fibonacci Hashing * \return An index calculated using Fibonacci Hashing */ static uint64_t FibHash(uint64_t hash_value, uint32_t fib_shift) { constexpr uint64_t coeff = 11400714819323198485ull; return (coeff * hash_value) >> fib_shift; } /*! \brief The implicit in-place linked list used to index a chain */ struct ListNode { /*! \brief Construct None */ ListNode() : index(0), block(nullptr) {} /*! \brief Construct from position */ ListNode(uint64_t index, const DenseMapNode* self) : index(index), block(self->data_ + (index / kBlockCap)) {} /*! \brief Metadata on the entry */ uint8_t& Meta() const { return *(block->bytes + index % kBlockCap); } /*! \brief Data on the entry */ KVType& Data() const { return *(reinterpret_cast<KVType*>(block->bytes + kBlockCap + (index % kBlockCap) * sizeof(KVType))); } /*! \brief Key on the entry */ key_type& Key() const { return Data().first; } /*! \brief Value on the entry */ mapped_type& Val() const { return Data().second; } /*! \brief If the entry is head of linked list */ bool IsHead() const { return (Meta() & 0b10000000) == 0b00000000; } /*! \brief If the entry is none */ bool IsNone() const { return block == nullptr; } /*! \brief If the entry is empty slot */ bool IsEmpty() const { return Meta() == uint8_t(kEmptySlot); } /*! \brief If the entry is protected slot */ bool IsProtected() const { return Meta() == uint8_t(kProtectedSlot); } /*! \brief Set the entry to be empty */ void SetEmpty() const { Meta() = uint8_t(kEmptySlot); } /*! \brief Set the entry to be protected */ void SetProtected() const { Meta() = uint8_t(kProtectedSlot); } /*! \brief Set the entry's jump to its next entry */ void SetJump(uint8_t jump) const { (Meta() &= 0b10000000) |= jump; } /*! \brief Construct a head of linked list in-place */ void NewHead(KVType v) const { Meta() = 0b00000000; new (&Data()) KVType(std::move(v)); } /*! \brief Construct a tail of linked list in-place */ void NewTail(KVType v) const { Meta() = 0b10000000; new (&Data()) KVType(std::move(v)); } /*! \brief If the entry has next entry on the linked list */ bool HasNext() const { return NextProbeLocation(Meta() & 0b01111111) != 0; } /*! \brief Move the entry to the next entry on the linked list */ bool MoveToNext(const DenseMapNode* self, uint8_t meta) { uint64_t offset = NextProbeLocation(meta & 0b01111111); if (offset == 0) { index = 0; block = nullptr; return false; } index = (index + offset) & (self->slots_); block = self->data_ + (index / kBlockCap); return true; } /*! \brief Move the entry to the next entry on the linked list */ bool MoveToNext(const DenseMapNode* self) { return MoveToNext(self, Meta()); } /*! \brief Get the previous entry on the linked list */ ListNode FindPrev(const DenseMapNode* self) const { // start from the head of the linked list, which must exist ListNode next = self->IndexFromHash(ObjectHash()(Key())); // `prev` is always the previous item of `next` ListNode prev = next; for (next.MoveToNext(self); index != next.index; prev = next, next.MoveToNext(self)) { } return prev; } /*! \brief Get the next empty jump */ bool GetNextEmpty(const DenseMapNode* self, uint8_t* jump, ListNode* result) const { for (uint8_t idx = 1; idx < kNumJumpDists; ++idx) { ListNode candidate((index + NextProbeLocation(idx)) & (self->slots_), self); if (candidate.IsEmpty()) { *jump = idx; *result = candidate; return true; } } return false; } /*! \brief Index on the real array */ uint64_t index; /*! \brief Pointer to the actual block */ Block* block; }; protected: /*! \brief fib shift in Fibonacci Hashing */ uint32_t fib_shift_; /*! \brief array of data blocks */ Block* data_; static uint64_t NextProbeLocation(size_t index) { /* clang-format off */ /*! \brief Candidates of probing distance */ static const uint64_t kNextProbeLocation[kNumJumpDists] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, // Quadratic probing with triangle numbers. See also: // 1) https://en.wikipedia.org/wiki/Quadratic_probing // 2) https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/ // 3) https://github.com/skarupke/flat_hash_map 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136, 153, 171, 190, 210, 231, 253, 276, 300, 325, 351, 378, 406, 435, 465, 496, 528, 561, 595, 630, 666, 703, 741, 780, 820, 861, 903, 946, 990, 1035, 1081, 1128, 1176, 1225, 1275, 1326, 1378, 1431, 1485, 1540, 1596, 1653, 1711, 1770, 1830, 1891, 1953, 2016, 2080, 2145, 2211, 2278, 2346, 2415, 2485, 2556, 2628, // larger triangle numbers 8515, 19110, 42778, 96141, 216153, 486591, 1092981, 2458653, 5532801, 12442566, 27993903, 62983476, 141717030, 318844378, 717352503, 1614057336, 3631522476, 8170957530, 18384510628, 41364789378, 93070452520, 209408356380, 471168559170, 1060128894105, 2385289465695, 5366898840628, 12075518705635, 27169915244790, 61132312065111, 137547689707000, 309482283181501, 696335127828753, 1566753995631385, 3525196511162271, 7931691992677701, 17846306936293605, 40154190677507445, 90346928918121501, 203280589587557251, 457381325854679626, 1029107982097042876, 2315492959180353330, 5209859154120846435, }; /* clang-format on */ return kNextProbeLocation[index]; } friend class MapNode; }; #define TVM_DISPATCH_MAP(base, var, body) \ { \ using TSmall = SmallMapNode*; \ using TDense = DenseMapNode*; \ uint64_t slots = base->slots_; \ if (slots <= SmallMapNode::kMaxSize) { \ TSmall var = static_cast<TSmall>(base); \ body; \ } else { \ TDense var = static_cast<TDense>(base); \ body; \ } \ } #define TVM_DISPATCH_MAP_CONST(base, var, body) \ { \ using TSmall = const SmallMapNode*; \ using TDense = const DenseMapNode*; \ uint64_t slots = base->slots_; \ if (slots <= SmallMapNode::kMaxSize) { \ TSmall var = static_cast<TSmall>(base); \ body; \ } else { \ TDense var = static_cast<TDense>(base); \ body; \ } \ } inline MapNode::iterator::pointer MapNode::iterator::operator->() const { TVM_MAP_FAIL_IF_CHANGED() TVM_DISPATCH_MAP_CONST(self, p, { return p->DeRefItr(index); }); } inline MapNode::iterator& MapNode::iterator::operator++() { TVM_MAP_FAIL_IF_CHANGED() TVM_DISPATCH_MAP_CONST(self, p, { index = p->IncItr(index); return *this; }); } inline MapNode::iterator& MapNode::iterator::operator--() { TVM_MAP_FAIL_IF_CHANGED() TVM_DISPATCH_MAP_CONST(self, p, { index = p->DecItr(index); return *this; }); } inline size_t MapNode::count(const key_type& key) const { TVM_DISPATCH_MAP_CONST(this, p, { return p->count(key); }); } inline const MapNode::mapped_type& MapNode::at(const MapNode::key_type& key) const { TVM_DISPATCH_MAP_CONST(this, p, { return p->at(key); }); } inline MapNode::mapped_type& MapNode::at(const MapNode::key_type& key) { TVM_DISPATCH_MAP(this, p, { return p->at(key); }); } inline MapNode::iterator MapNode::begin() const { TVM_DISPATCH_MAP_CONST(this, p, { return p->begin(); }); } inline MapNode::iterator MapNode::end() const { TVM_DISPATCH_MAP_CONST(this, p, { return p->end(); }); } inline MapNode::iterator MapNode::find(const MapNode::key_type& key) const { TVM_DISPATCH_MAP_CONST(this, p, { return p->find(key); }); } inline void MapNode::erase(const MapNode::iterator& position) { TVM_DISPATCH_MAP(this, p, { return p->erase(position); }); } #undef TVM_DISPATCH_MAP #undef TVM_DISPATCH_MAP_CONST inline ObjectPtr<MapNode> MapNode::Empty() { return SmallMapNode::Empty(); } inline ObjectPtr<MapNode> MapNode::CopyFrom(MapNode* from) { if (from->slots_ <= SmallMapNode::kMaxSize) { return SmallMapNode::CopyFrom(static_cast<SmallMapNode*>(from)); } else { return DenseMapNode::CopyFrom(static_cast<DenseMapNode*>(from)); } } template <typename IterType> inline ObjectPtr<Object> MapNode::CreateFromRange(IterType first, IterType last) { int64_t _cap = std::distance(first, last); if (_cap < 0) { return SmallMapNode::Empty(); } uint64_t cap = static_cast<uint64_t>(_cap); if (cap < SmallMapNode::kMaxSize) { return SmallMapNode::CreateFromRange(cap, first, last); } uint32_t fib_shift; uint64_t n_slots; DenseMapNode::CalcTableSize(cap, &fib_shift, &n_slots); ObjectPtr<Object> obj = DenseMapNode::Empty(fib_shift, n_slots); for (; first != last; ++first) { KVType kv(*first); DenseMapNode::InsertMaybeReHash(kv, &obj); } return obj; } inline void MapNode::InsertMaybeReHash(const KVType& kv, ObjectPtr<Object>* map) { constexpr uint64_t kSmallMapMaxSize = SmallMapNode::kMaxSize; MapNode* base = static_cast<MapNode*>(map->get()); #if TVM_LOG_DEBUG base->state_marker++; #endif // TVM_LOG_DEBUG if (base->slots_ < kSmallMapMaxSize) { SmallMapNode::InsertMaybeReHash(kv, map); } else if (base->slots_ == kSmallMapMaxSize) { if (base->size_ < base->slots_) { SmallMapNode::InsertMaybeReHash(kv, map); } else { ObjectPtr<Object> new_map = MapNode::CreateFromRange(base->begin(), base->end()); DenseMapNode::InsertMaybeReHash(kv, &new_map); *map = std::move(new_map); } } else { DenseMapNode::InsertMaybeReHash(kv, map); } } template <> inline ObjectPtr<MapNode> make_object<>() = delete; #endif /*! * \brief Map container of NodeRef->NodeRef in DSL graph. * Map implements copy on write semantics, which means map is mutable * but copy will happen when array is referenced in more than two places. * * operator[] only provide const acces, use Set to mutate the content. * \tparam K The key NodeRef type. * \tparam V The value NodeRef type. */ template <typename K, typename V, typename = typename std::enable_if<std::is_base_of<ObjectRef, K>::value>::type, typename = typename std::enable_if<std::is_base_of<ObjectRef, V>::value>::type> class Map : public ObjectRef { public: using key_type = K; using mapped_type = V; class iterator; /*! * \brief default constructor */ Map() { data_ = MapNode::Empty(); } /*! * \brief move constructor * \param other source */ Map(Map<K, V>&& other) { data_ = std::move(other.data_); } /*! * \brief copy constructor * \param other source */ Map(const Map<K, V>& other) : ObjectRef(other.data_) {} /*! * \brief copy assign operator * \param other The source of assignment * \return reference to self. */ Map<K, V>& operator=(Map<K, V>&& other) { data_ = std::move(other.data_); return *this; } /*! * \brief move assign operator * \param other The source of assignment * \return reference to self. */ Map<K, V>& operator=(const Map<K, V>& other) { data_ = other.data_; return *this; } /*! * \brief constructor from pointer * \param n the container pointer */ explicit Map(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief constructor from iterator * \param begin begin of iterator * \param end end of iterator * \tparam IterType The type of iterator */ template <typename IterType> Map(IterType begin, IterType end) { data_ = MapNode::CreateFromRange(begin, end); } /*! * \brief constructor from initializer list * \param init The initalizer list */ Map(std::initializer_list<std::pair<K, V>> init) { data_ = MapNode::CreateFromRange(init.begin(), init.end()); } /*! * \brief constructor from unordered_map * \param init The unordered_map */ template <typename Hash, typename Equal> Map(const std::unordered_map<K, V, Hash, Equal>& init) { // NOLINT(*) data_ = MapNode::CreateFromRange(init.begin(), init.end()); } /*! * \brief Read element from map. * \param key The key * \return the corresonding element. */ const V at(const K& key) const { return DowncastNoCheck<V>(GetMapNode()->at(key)); } /*! * \brief Read element from map. * \param key The key * \return the corresonding element. */ const V operator[](const K& key) const { return this->at(key); } /*! \return The size of the array */ size_t size() const { MapNode* n = GetMapNode(); return n == nullptr ? 0 : n->size(); } /*! \return The number of elements of the key */ size_t count(const K& key) const { MapNode* n = GetMapNode(); return n == nullptr ? 0 : GetMapNode()->count(key); } /*! \return whether array is empty */ bool empty() const { return size() == 0; } /*! \brief Release reference to all the elements */ void clear() { MapNode* n = GetMapNode(); if (n != nullptr) { data_ = MapNode::Empty(); } } /*! * \brief set the Map. * \param key The index key. * \param value The value to be setted. */ void Set(const K& key, const V& value) { CopyOnWrite(); MapNode::InsertMaybeReHash(MapNode::KVType(key, value), &data_); } /*! \return begin iterator */ iterator begin() const { return iterator(GetMapNode()->begin()); } /*! \return end iterator */ iterator end() const { return iterator(GetMapNode()->end()); } /*! \return find the key and returns the associated iterator */ iterator find(const K& key) const { return iterator(GetMapNode()->find(key)); } /*! \return The value associated with the key, NullOpt if not found */ Optional<V> Get(const K& key) const { MapNode::iterator iter = GetMapNode()->find(key); if (iter == GetMapNode()->end()) { return NullOptType{}; } return DowncastNoCheck<V>(iter->second); } void erase(const K& key) { CopyOnWrite()->erase(key); } /*! * \brief copy on write semantics * Do nothing if current handle is the unique copy of the array. * Otherwise make a new copy of the array to ensure the current handle * hold a unique copy. * * \return Handle to the internal node container(which guarantees to be unique) */ MapNode* CopyOnWrite() { if (data_.get() == nullptr) { data_ = MapNode::Empty(); } else if (!data_.unique()) { data_ = MapNode::CopyFrom(GetMapNode()); } return GetMapNode(); } /*! \brief specify container node */ using ContainerType = MapNode; /*! \brief Iterator of the hash map */ class iterator { public: using iterator_category = std::bidirectional_iterator_tag; using difference_type = int64_t; using value_type = const std::pair<K, V>; using pointer = value_type*; using reference = value_type; iterator() : itr() {} /*! \brief Compare iterators */ bool operator==(const iterator& other) const { return itr == other.itr; } /*! \brief Compare iterators */ bool operator!=(const iterator& other) const { return itr != other.itr; } /*! \brief De-reference iterators is not allowed */ pointer operator->() const = delete; /*! \brief De-reference iterators */ reference operator*() const { auto& kv = *itr; return std::make_pair(DowncastNoCheck<K>(kv.first), DowncastNoCheck<V>(kv.second)); } /*! \brief Prefix self increment, e.g. ++iter */ iterator& operator++() { ++itr; return *this; } /*! \brief Suffix self increment */ iterator operator++(int) { iterator copy = *this; ++(*this); return copy; } private: iterator(const MapNode::iterator& itr) // NOLINT(*) : itr(itr) {} template <typename, typename, typename, typename> friend class Map; MapNode::iterator itr; }; private: /*! \brief Return data_ as type of pointer of MapNode */ MapNode* GetMapNode() const { return static_cast<MapNode*>(data_.get()); } }; /*! * \brief Merge two Maps. * \param lhs the first Map to merge. * \param rhs the second Map to merge. * @return The merged Array. Original Maps are kept unchanged. */ template <typename K, typename V, typename = typename std::enable_if<std::is_base_of<ObjectRef, K>::value>::type, typename = typename std::enable_if<std::is_base_of<ObjectRef, V>::value>::type> inline Map<K, V> Merge(Map<K, V> lhs, const Map<K, V>& rhs) { for (const auto& p : rhs) { lhs.Set(p.first, p.second); } return std::move(lhs); } } // namespace runtime // expose the functions to the root namespace. using runtime::Map; using runtime::MapNode; } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_MAP_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/optional.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/optional.h * \brief Runtime Optional container types. */ #ifndef TVM_RUNTIME_CONTAINER_OPTIONAL_H_ #define TVM_RUNTIME_CONTAINER_OPTIONAL_H_ #include <utility> #include "./base.h" namespace tvm { namespace runtime { /*! \brief Helper to represent nullptr for optional. */ struct NullOptType {}; /*! * \brief Optional container that to represent to a Nullable variant of T. * \tparam T The original ObjectRef. * * \code * * Optional<String> opt0 = nullptr; * Optional<String> opt1 = String("xyz"); * ICHECK(opt0 == nullptr); * ICHECK(opt1 == "xyz"); * * \endcode */ template <typename T> class Optional : public ObjectRef { public: using ContainerType = typename T::ContainerType; static_assert(std::is_base_of<ObjectRef, T>::value, "Optional is only defined for ObjectRef."); // default constructors. Optional() = default; Optional(const Optional<T>&) = default; Optional(Optional<T>&&) = default; Optional<T>& operator=(const Optional<T>&) = default; Optional<T>& operator=(Optional<T>&&) = default; /*! * \brief Construct from an ObjectPtr * whose type already matches the ContainerType. * \param ptr */ explicit Optional(ObjectPtr<Object> ptr) : ObjectRef(ptr) {} /*! \brief Nullopt handling */ Optional(NullOptType) {} // NOLINT(*) // nullptr handling. // disallow implicit conversion as 0 can be implicitly converted to nullptr_t explicit Optional(std::nullptr_t) {} Optional<T>& operator=(std::nullptr_t) { data_ = nullptr; return *this; } // normal value handling. Optional(T other) // NOLINT(*) : ObjectRef(std::move(other)) {} Optional<T>& operator=(T other) { ObjectRef::operator=(std::move(other)); return *this; } // delete the int constructor // since Optional<Integer>(0) is ambiguious // 0 can be implicitly casted to nullptr_t explicit Optional(int val) = delete; Optional<T>& operator=(int val) = delete; /*! * \return A not-null container value in the optional. * \note This function performs not-null checking. */ T value() const { ICHECK(data_ != nullptr); return T(data_); } /*! * \return The internal object pointer with container type of T. * \note This function do not perform not-null checking. */ const ContainerType* get() const { return static_cast<ContainerType*>(data_.get()); } /*! * \return The contained value if the Optional is not null * otherwise return the default_value. */ T value_or(T default_value) const { return data_ != nullptr ? T(data_) : default_value; } /*! \return Whether the container is not nullptr.*/ explicit operator bool() const { return *this != nullptr; } // operator overloadings bool operator==(std::nullptr_t) const { return data_ == nullptr; } bool operator!=(std::nullptr_t) const { return data_ != nullptr; } auto operator==(const Optional<T>& other) const { // support case where sub-class returns a symbolic ref type. using RetType = decltype(value() == other.value()); if (same_as(other)) return RetType(true); if (*this != nullptr && other != nullptr) { return value() == other.value(); } else { // one of them is nullptr. return RetType(false); } } auto operator!=(const Optional<T>& other) const { // support case where sub-class returns a symbolic ref type. using RetType = decltype(value() != other.value()); if (same_as(other)) return RetType(false); if (*this != nullptr && other != nullptr) { return value() != other.value(); } else { // one of them is nullptr. return RetType(true); } } auto operator==(const T& other) const { using RetType = decltype(value() == other); if (same_as(other)) return RetType(true); if (*this != nullptr) return value() == other; return RetType(false); } auto operator!=(const T& other) const { return !(*this == other); } template <typename U> auto operator==(const U& other) const { using RetType = decltype(value() == other); if (*this == nullptr) return RetType(false); return value() == other; } template <typename U> auto operator!=(const U& other) const { using RetType = decltype(value() != other); if (*this == nullptr) return RetType(true); return value() != other; } static constexpr bool _type_is_nullable = true; }; } // namespace runtime // expose the functions to the root namespace. using runtime::Optional; constexpr runtime::NullOptType NullOpt{}; } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_OPTIONAL_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/shape_tuple.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/shape_tuple.h * \brief Runtime ShapeTuple container types. */ #ifndef TVM_RUNTIME_CONTAINER_SHAPE_TUPLE_H_ #define TVM_RUNTIME_CONTAINER_SHAPE_TUPLE_H_ #include <utility> #include <vector> #include "./base.h" namespace tvm { namespace runtime { /*! \brief An object representing a shape tuple. */ class ShapeTupleObj : public Object { public: /*! \brief The type of shape index element. */ using index_type = int64_t; /*! \brief The pointer to shape tuple data. */ index_type* data; /*! \brief The size of the shape tuple object. */ uint64_t size; static constexpr const uint32_t _type_index = runtime::TypeIndex::kRuntimeShapeTuple; static constexpr const char* _type_key = "runtime.ShapeTuple"; TVM_DECLARE_FINAL_OBJECT_INFO(ShapeTupleObj, Object); private: /*! \brief ShapeTuple object which is moved from std::vector container. */ class FromStd; friend class ShapeTuple; }; /*! \brief An object representing shape tuple moved from std::vector. */ class ShapeTupleObj::FromStd : public ShapeTupleObj { public: /*! \brief The type of shape index element. */ using index_type = ShapeTupleObj::index_type; /*! * \brief Construct a new FromStd object * * \param other The moved/copied std::vector object * * \note If user passes const reference, it will trigger copy. If it's rvalue, * it will be moved into other. */ explicit FromStd(std::vector<index_type> other) : data_container{other} {} private: /*! \brief Container that holds the memory. */ std::vector<index_type> data_container; friend class ShapeTuple; }; /*! * \brief Reference to shape tuple objects. */ class ShapeTuple : public ObjectRef { public: /*! \brief The type of shape index element. */ using index_type = ShapeTupleObj::index_type; /*! * \brief Construct an empty shape tuple. */ ShapeTuple() : ShapeTuple(std::vector<index_type>()) {} /*! * \brief Constructor from iterator * \param begin begin of iterator * \param end end of iterator * \tparam IterType The type of iterator */ template <typename IterType> ShapeTuple(IterType begin, IterType end) : ShapeTuple(std::vector<index_type>(begin, end)) {} /*! * \brief constructor from initializer list * \param shape The initializer list */ ShapeTuple(std::initializer_list<index_type> shape) : ShapeTuple(shape.begin(), shape.end()) {} /*! * \brief Construct a new ShapeTuple object * * \param shape The moved/copied std::vector object * * \note If user passes const reference, it will trigger copy. If it's rvalue, * it will be moved into other. */ ShapeTuple(std::vector<index_type> shape); // NOLINT(*) /*! * \brief Return the data pointer * * \return const index_type* data pointer */ const index_type* data() const { return get()->data; } /*! * \brief Return the size of the shape tuple * * \return size_t shape tuple size */ size_t size() const { return get()->size; } /*! * \brief Immutably read i-th element from the shape tuple. * \param idx The index * \return the i-th element. */ index_type operator[](size_t idx) const { ICHECK(idx < this->size()) << "IndexError: indexing " << idx << " on an array of size " << this->size(); return this->data()[idx]; } /*! * \brief Immutably read i-th element from the shape tuple. * \param idx The index * \return the i-th element. */ index_type at(size_t idx) const { return this->operator[](idx); } /*! \return Whether shape tuple is empty */ bool empty() const { return size() == 0; } /*! \return The first element of the shape tuple */ index_type front() const { return this->at(0); } /*! \return The last element of the shape tuple */ index_type back() const { return this->at(this->size() - 1); } /*! \return begin iterator */ const index_type* begin() const { return get()->data; } /*! \return end iterator */ const index_type* end() const { return (get()->data + size()); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(ShapeTuple, ObjectRef, ShapeTupleObj); }; inline ShapeTuple::ShapeTuple(std::vector<index_type> shape) { auto ptr = make_object<ShapeTupleObj::FromStd>(std::move(shape)); ptr->size = ptr->data_container.size(); ptr->data = ptr->data_container.data(); data_ = std::move(ptr); } } // namespace runtime // expose the functions to the root namespace. using runtime::ShapeTuple; using runtime::ShapeTupleObj; } // namespace tvm #endif // TVM_RUNTIME_CONTAINER_SHAPE_TUPLE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/container/string.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/container/string.h * \brief Runtime String container types. */ #ifndef TVM_RUNTIME_CONTAINER_STRING_H_ #define TVM_RUNTIME_CONTAINER_STRING_H_ #include <dmlc/logging.h> #include <tvm/runtime/container/base.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/memory.h> #include <tvm/runtime/object.h> #include <algorithm> #include <cstddef> #include <cstring> #include <initializer_list> #include <memory> #include <string> #include <string_view> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace runtime { // Forward declare TVMArgValue class TVMArgValue; /*! \brief An object representing string. It's POD type. */ class StringObj : public Object { public: /*! \brief The pointer to string data. */ const char* data; /*! \brief The length of the string object. */ uint64_t size; static constexpr const uint32_t _type_index = TypeIndex::kRuntimeString; static constexpr const char* _type_key = "runtime.String"; TVM_DECLARE_FINAL_OBJECT_INFO(StringObj, Object); private: /*! \brief String object which is moved from std::string container. */ class FromStd; friend class String; }; /*! * \brief Reference to string objects. * * \code * * // Example to create runtime String reference object from std::string * std::string s = "hello world"; * * // You can create the reference from existing std::string * String ref{std::move(s)}; * * // You can rebind the reference to another string. * ref = std::string{"hello world2"}; * * // You can use the reference as hash map key * std::unordered_map<String, int32_t> m; * m[ref] = 1; * * // You can compare the reference object with other string objects * assert(ref == "hello world", true); * * // You can convert the reference to std::string again * string s2 = (string)ref; * * \endcode */ class String : public ObjectRef { public: /*! * \brief Construct an empty string. */ String() : String(std::string()) {} /*! * \brief Construct a new String object * * \param other The moved/copied std::string object * * \note If user passes const reference, it will trigger copy. If it's rvalue, * it will be moved into other. */ String(std::string other); // NOLINT(*) /*! * \brief Construct a new String object * * \param other a char array. */ String(const char* other) // NOLINT(*) : String(std::string(other)) {} /*! * \brief Construct a new null object */ String(std::nullptr_t) // NOLINT(*) : ObjectRef(nullptr) {} /*! * \brief Change the value the reference object points to. * * \param other The value for the new String * */ inline String& operator=(std::string other); /*! * \brief Change the value the reference object points to. * * \param other The value for the new String */ inline String& operator=(const char* other); /*! * \brief Compares this String object to other * * \param other The String to compare with. * * \return zero if both char sequences compare equal. negative if this appear * before other, positive otherwise. */ int compare(const String& other) const { return memncmp(data(), other.data(), size(), other.size()); } /*! * \brief Compares this String object to other * * \param other The string to compare with. * * \return zero if both char sequences compare equal. negative if this appear * before other, positive otherwise. */ int compare(const std::string& other) const { return memncmp(data(), other.data(), size(), other.size()); } /*! * \brief Compares this to other * * \param other The character array to compare with. * * \return zero if both char sequences compare equal. negative if this appear * before other, positive otherwise. */ int compare(const char* other) const { return memncmp(data(), other, size(), std::strlen(other)); } /*! * \brief Returns a pointer to the char array in the string. * * \return const char* */ const char* c_str() const { return get()->data; } /*! * \brief Return the length of the string * * \return size_t string length */ size_t size() const { const auto* ptr = get(); return ptr->size; } /*! * \brief Return the length of the string * * \return size_t string length */ size_t length() const { return size(); } /*! * \brief Retun if the string is empty * * \return true if empty, false otherwise. */ bool empty() const { return size() == 0; } /*! * \brief Read an element. * \param pos The position at which to read the character. * * \return The char at position */ char at(size_t pos) const { if (pos < size()) { return data()[pos]; } else { throw std::out_of_range("tvm::String index out of bounds"); } } /*! * \brief Return the data pointer * * \return const char* data pointer */ const char* data() const { return get()->data; } /*! * \brief Convert String to an std::string object * * \return std::string */ operator std::string() const { return std::string{get()->data, size()}; } /*! * \brief Check if a TVMArgValue can be converted to String, i.e. it can be std::string or String * \param val The value to be checked * \return A boolean indicating if val can be converted to String */ inline static bool CanConvertFrom(const TVMArgValue& val); /*! * \brief Hash the binary bytes * \param data The data pointer * \param size The size of the bytes. * \return the hash value. */ static size_t HashBytes(const char* data, size_t size) { // This function falls back to string copy with c++11 compiler and is // recommended to be compiled with c++14 return std::hash<std::string_view>()(std::string_view(data, size)); } TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(String, ObjectRef, StringObj); private: /*! * \brief Compare two char sequence * * \param lhs Pointers to the char array to compare * \param rhs Pointers to the char array to compare * \param lhs_count Length of the char array to compare * \param rhs_count Length of the char array to compare * \return int zero if both char sequences compare equal. negative if this * appear before other, positive otherwise. */ static int memncmp(const char* lhs, const char* rhs, size_t lhs_count, size_t rhs_count); /*! * \brief Concatenate two char sequences * * \param lhs Pointers to the lhs char array * \param lhs_size The size of the lhs char array * \param rhs Pointers to the rhs char array * \param rhs_size The size of the rhs char array * * \return The concatenated char sequence */ static String Concat(const char* lhs, size_t lhs_size, const char* rhs, size_t rhs_size) { std::string ret(lhs, lhs_size); ret.append(rhs, rhs_size); return String(ret); } // Overload + operator friend String operator+(const String& lhs, const String& rhs); friend String operator+(const String& lhs, const std::string& rhs); friend String operator+(const std::string& lhs, const String& rhs); friend String operator+(const String& lhs, const char* rhs); friend String operator+(const char* lhs, const String& rhs); friend struct tvm::runtime::ObjectEqual; }; /*! \brief An object representing string moved from std::string. */ class StringObj::FromStd : public StringObj { public: /*! * \brief Construct a new FromStd object * * \param other The moved/copied std::string object * * \note If user passes const reference, it will trigger copy. If it's rvalue, * it will be moved into other. */ explicit FromStd(std::string other) : data_container{other} {} private: /*! \brief Container that holds the memory. */ std::string data_container; friend class String; }; inline String::String(std::string other) { auto ptr = make_object<StringObj::FromStd>(std::move(other)); ptr->size = ptr->data_container.size(); ptr->data = ptr->data_container.data(); data_ = std::move(ptr); } inline String& String::operator=(std::string other) { String replace{std::move(other)}; data_.swap(replace.data_); return *this; } inline String& String::operator=(const char* other) { return operator=(std::string(other)); } inline String operator+(const String& lhs, const String& rhs) { size_t lhs_size = lhs.size(); size_t rhs_size = rhs.size(); return String::Concat(lhs.data(), lhs_size, rhs.data(), rhs_size); } inline String operator+(const String& lhs, const std::string& rhs) { size_t lhs_size = lhs.size(); size_t rhs_size = rhs.size(); return String::Concat(lhs.data(), lhs_size, rhs.data(), rhs_size); } inline String operator+(const std::string& lhs, const String& rhs) { size_t lhs_size = lhs.size(); size_t rhs_size = rhs.size(); return String::Concat(lhs.data(), lhs_size, rhs.data(), rhs_size); } inline String operator+(const char* lhs, const String& rhs) { size_t lhs_size = std::strlen(lhs); size_t rhs_size = rhs.size(); return String::Concat(lhs, lhs_size, rhs.data(), rhs_size); } inline String operator+(const String& lhs, const char* rhs) { size_t lhs_size = lhs.size(); size_t rhs_size = std::strlen(rhs); return String::Concat(lhs.data(), lhs_size, rhs, rhs_size); } // Overload < operator inline bool operator<(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) < 0; } inline bool operator<(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) > 0; } inline bool operator<(const String& lhs, const String& rhs) { return lhs.compare(rhs) < 0; } inline bool operator<(const String& lhs, const char* rhs) { return lhs.compare(rhs) < 0; } inline bool operator<(const char* lhs, const String& rhs) { return rhs.compare(lhs) > 0; } // Overload > operator inline bool operator>(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) > 0; } inline bool operator>(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) < 0; } inline bool operator>(const String& lhs, const String& rhs) { return lhs.compare(rhs) > 0; } inline bool operator>(const String& lhs, const char* rhs) { return lhs.compare(rhs) > 0; } inline bool operator>(const char* lhs, const String& rhs) { return rhs.compare(lhs) < 0; } // Overload <= operator inline bool operator<=(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) <= 0; } inline bool operator<=(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) >= 0; } inline bool operator<=(const String& lhs, const String& rhs) { return lhs.compare(rhs) <= 0; } inline bool operator<=(const String& lhs, const char* rhs) { return lhs.compare(rhs) <= 0; } inline bool operator<=(const char* lhs, const String& rhs) { return rhs.compare(lhs) >= 0; } // Overload >= operator inline bool operator>=(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) >= 0; } inline bool operator>=(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) <= 0; } inline bool operator>=(const String& lhs, const String& rhs) { return lhs.compare(rhs) >= 0; } inline bool operator>=(const String& lhs, const char* rhs) { return lhs.compare(rhs) >= 0; } inline bool operator>=(const char* lhs, const String& rhs) { return rhs.compare(rhs) <= 0; } // Overload == operator inline bool operator==(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) == 0; } inline bool operator==(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) == 0; } inline bool operator==(const String& lhs, const String& rhs) { return lhs.compare(rhs) == 0; } inline bool operator==(const String& lhs, const char* rhs) { return lhs.compare(rhs) == 0; } inline bool operator==(const char* lhs, const String& rhs) { return rhs.compare(lhs) == 0; } // Overload != operator inline bool operator!=(const String& lhs, const std::string& rhs) { return lhs.compare(rhs) != 0; } inline bool operator!=(const std::string& lhs, const String& rhs) { return rhs.compare(lhs) != 0; } inline bool operator!=(const String& lhs, const String& rhs) { return lhs.compare(rhs) != 0; } inline bool operator!=(const String& lhs, const char* rhs) { return lhs.compare(rhs) != 0; } inline bool operator!=(const char* lhs, const String& rhs) { return rhs.compare(lhs) != 0; } inline std::ostream& operator<<(std::ostream& out, const String& input) { out.write(input.data(), input.size()); return out; } inline int String::memncmp(const char* lhs, const char* rhs, size_t lhs_count, size_t rhs_count) { if (lhs == rhs && lhs_count == rhs_count) return 0; for (size_t i = 0; i < lhs_count && i < rhs_count; ++i) { if (lhs[i] < rhs[i]) return -1; if (lhs[i] > rhs[i]) return 1; } if (lhs_count < rhs_count) { return -1; } else if (lhs_count > rhs_count) { return 1; } else { return 0; } } inline size_t ObjectHash::operator()(const ObjectRef& a) const { if (const auto* str = a.as<StringObj>()) { return String::HashBytes(str->data, str->size); } return ObjectPtrHash()(a); } inline bool ObjectEqual::operator()(const ObjectRef& a, const ObjectRef& b) const { if (a.same_as(b)) { return true; } if (const auto* str_a = a.as<StringObj>()) { if (const auto* str_b = b.as<StringObj>()) { return String::memncmp(str_a->data, str_b->data, str_a->size, str_b->size) == 0; } } return false; } } // namespace runtime // expose the functions to the root namespace. using runtime::String; using runtime::StringObj; } // namespace tvm namespace std { template <> struct hash<::tvm::runtime::String> { std::size_t operator()(const ::tvm::runtime::String& str) const { return ::tvm::runtime::String::HashBytes(str.data(), str.size()); } }; } // namespace std #endif // TVM_RUNTIME_CONTAINER_STRING_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/contrib/libtorch_runtime.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief runtime implementation for LibTorch/TorchScript. */ #ifndef TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_ #define TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_ #include <tvm/runtime/module.h> #include <string> namespace tvm { namespace runtime { namespace contrib { runtime::Module TorchRuntimeCreate(const String& symbol_name, const std::string& serialized_function); } // namespace contrib } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CONTRIB_LIBTORCH_RUNTIME_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/contrib/papi.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \brief Performance counters for profiling via the PAPI library. */ #ifndef TVM_RUNTIME_CONTRIB_PAPI_H_ #define TVM_RUNTIME_CONTRIB_PAPI_H_ #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/profiling.h> namespace tvm { namespace runtime { namespace profiling { /*! \brief Construct a metric collector that collects data from hardware * performance counters using the Performance Application Programming Interface * (PAPI). * * \param metrics A mapping from a device type to the metrics that should be * collected on that device. You can find the names of available metrics by * running `papi_native_avail`. */ TVM_DLL MetricCollector CreatePAPIMetricCollector(Map<DeviceWrapper, Array<String>> metrics); } // namespace profiling } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CONTRIB_PAPI_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/aot_executor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file aot_executor.h * \brief AoT Executor */ #ifndef TVM_RUNTIME_CRT_AOT_EXECUTOR_H_ #define TVM_RUNTIME_CRT_AOT_EXECUTOR_H_ #ifdef __cplusplus extern "C" { #endif #include <dlpack/dlpack.h> #include <tvm/runtime/crt/internal/common/ndarray.h> #include <tvm/runtime/metadata_types.h> typedef struct TVMMetadata TVMMetadata; typedef struct TVMAotExecutor { /*! \brief The top-level metadata structure supplied by the generated code */ const TVMMetadata* metadata; /*! \brief The code module that contains the compiled model */ TVMModuleHandle module_handle; /*! \brief The device type */ DLDevice device; /*! \brief List of allocated arguments, input(s), output(s), and pool(s)*/ TVMNDArray* args; int64_t num_args; } TVMAotExecutor; /*! * \brief Allocate a new AotExecutor with TVMPlatformMemoryAllocate and initialize it. * * \param module_handle TVM Module that exposes the functions to call. * \param device Runtime execution device, only supports device type kDLCPU, index 0. * \param executor Pointer which receives a pointer to the newly-created instance. * \param module_name TVM Module name prefix, typically "default". * \return 0 if successful. */ int TVMAotExecutor_Create(TVMModuleHandle module_handle, const DLDevice device, TVMAotExecutor** executor, const char* module_name); /*! * \brief Release the AoT executor created by TVMAotExecutor_Create(). * * \param executor Pointer to executor instance, created by TVMAotExecutor_Create(). * \param device Runtime execution device, only supports device type kDLCPU, index 0. * \return 0 if successful. */ int TVMAotExecutor_Release(TVMAotExecutor* executor, const DLDevice device); /*! * \brief Return the number of inputs. * * \param executor Pointer to executor instance, created by TVMAotExecutor_Create(). * \return Number of inputs. */ int TVMAotExecutor_GetNumInputs(TVMAotExecutor* executor); /*! * \brief Return the number of outputs. * * \param executor Pointer to executor instance, created by TVMAotExecutor_Create(). * \return Number of outputs. */ int TVMAotExecutor_GetNumOutputs(TVMAotExecutor* executor); /*! * \brief Return the input index of the specified input name * * \param executor Pointer to executor instance, created by TVMAotExecutor_Create(). * \param name Input name for retrieving index. * \return Input index. */ int TVMAotExecutor_GetInputIndex(TVMAotExecutor* executor, const char* name); /*! * \brief Run the generated program. * * \param executor Pointer to executor instance, created by TVMAotExecutor_Create(). * \return 0 if successful. */ int TVMAotExecutor_Run(TVMAotExecutor* executor); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_AOT_EXECUTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/aot_executor_module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file graph_executor.h * \brief Tiny AoT executor */ #ifndef TVM_RUNTIME_CRT_AOT_EXECUTOR_MODULE_H_ #define TVM_RUNTIME_CRT_AOT_EXECUTOR_MODULE_H_ #ifdef __cplusplus extern "C" { #endif #include <tvm/runtime/crt/error_codes.h> /*! * \brief Register the "tvm.aot_executor.create" constructor PackedFunc. */ tvm_crt_error_t TVMAotExecutorModule_Register(); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_AOT_EXECUTOR_MODULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/crt.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/crt.h * \brief Defines core life cycle functions used by CRT. */ #ifndef TVM_RUNTIME_CRT_CRT_H_ #define TVM_RUNTIME_CRT_CRT_H_ #include <inttypes.h> #include <tvm/runtime/crt/error_codes.h> #ifdef __cplusplus extern "C" { #endif /*! * \brief Initialize various data structures used by the runtime. * Prior to calling this, any initialization needed to support TVMPlatformMemory* functions should * be completed. * \return An error code describing the outcome of initialization. Generally, initialization * is only expected to fail due to a misconfiguration. */ tvm_crt_error_t TVMInitializeRuntime(); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_CRT_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/error_codes.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file include/tvm/runtime/crt/error_codes.h * \brief Defines integral error codes returned by the CRT. */ #ifndef TVM_RUNTIME_CRT_ERROR_CODES_H_ #define TVM_RUNTIME_CRT_ERROR_CODES_H_ #ifdef __cplusplus extern "C" { #endif #define TVM_CRT_ERROR_CATEGORY_Pos 8 #define TVM_CRT_ERROR_CATEGORY_Msk (0xff << TVM_CRT_ERROR_CATEGORY_Pos) #define TVM_CRT_ERROR_CODE_Pos 0 #define TVM_CRT_ERROR_CODE_Msk (0xff << TVM_CRT_ERROR_CODE_Pos) #define DEFINE_TVM_CRT_ERROR(category, code) \ (((category) << TVM_CRT_ERROR_CATEGORY_Pos) | ((code) << TVM_CRT_ERROR_CODE_Pos)) typedef enum { kTvmErrorCategoryFunctionRegistry = 1, kTvmErrorCategoryFraming = 2, kTvmErrorCategoryWriteStream = 3, kTvmErrorCategorySession = 4, kTvmErrorCategoryPlatform = 5, kTvmErrorCategoryGenerated = 6, kTvmErrorCategoryExecutor = 7, kTvmErrorCategoryFunctionCall = 8, kTvmErrorCategoryTimeEvaluator = 9, } tvm_crt_error_category_t; typedef enum { kTvmErrorNoError = 0, // Function Registry kTvmErrorFunctionNameNotFound = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionRegistry, 0), kTvmErrorFunctionIndexInvalid = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionRegistry, 1), kTvmErrorFunctionRegistryFull = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionRegistry, 2), kTvmErrorFunctionAlreadyDefined = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionRegistry, 3), kTvmErrorBufferTooSmall = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionRegistry, 4), // Framing kTvmErrorFramingInvalidState = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFraming, 0), kTvmErrorFramingShortPacket = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFraming, 1), kTvmErrorFramingInvalidEscape = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFraming, 2), kTvmErrorFramingPayloadOverflow = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFraming, 3), kTvmErrorFramingPayloadIncomplete = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFraming, 4), // Write stream kTvmErrorWriteStreamShortWrite = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryWriteStream, 0), kTvmErrorWriteStreamLongWrite = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryWriteStream, 1), // Session kTvmErrorSessionInvalidState = DEFINE_TVM_CRT_ERROR(kTvmErrorCategorySession, 0), kTvmErrorSessionReceiveBufferBusy = DEFINE_TVM_CRT_ERROR(kTvmErrorCategorySession, 1), kTvmErrorSessionReceiveBufferShortWrite = DEFINE_TVM_CRT_ERROR(kTvmErrorCategorySession, 2), // Platform kTvmErrorPlatformCheckFailure = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 0), kTvmErrorPlatformMemoryManagerInitialized = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 1), kTvmErrorPlatformShutdown = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 2), kTvmErrorPlatformNoMemory = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 3), kTvmErrorPlatformTimerBadState = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 4), kTvmErrorPlatformStackAllocBadFree = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryPlatform, 5), // Common error codes returned from generated functions. kTvmErrorGeneratedInvalidStorageId = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryGenerated, 0), // Graph or AoT executor kTvmErrorExecutorModuleAlreadyCreated = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryExecutor, 0), kTvmErrorExecutorModuleBadContext = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryExecutor, 1), kTvmErrorExecutorModuleNoSuchInput = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryExecutor, 2), // Function Calls - common problems encountered calling functions. kTvmErrorFunctionCallNumArguments = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionCall, 0), kTvmErrorFunctionCallWrongArgType = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionCall, 1), kTvmErrorFunctionCallNotImplemented = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionCall, 2), kTvmErrorFunctionCallInvalidArg = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryFunctionCall, 3), // Time Evaluator - times functions for use with debug runtime. kTvmErrorTimeEvaluatorBadHandle = DEFINE_TVM_CRT_ERROR(kTvmErrorCategoryTimeEvaluator, 0), // System errors are always negative integers; this mask indicates presence of a system error. // Cast tvm_crt_error_t to a signed integer to interpret the negative error code. kTvmErrorSystemErrorMask = (1 << (sizeof(int) * 8 - 1)), } tvm_crt_error_t; #ifdef __cplusplus } #endif #endif // TVM_RUNTIME_CRT_ERROR_CODES_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/func_registry.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file include/tvm/runtime/crt/func_registry.h * \brief Defines generic string-based function lookup structs */ #ifndef TVM_RUNTIME_CRT_FUNC_REGISTRY_H_ #define TVM_RUNTIME_CRT_FUNC_REGISTRY_H_ #ifdef __cplusplus extern "C" { #endif #include <tvm/runtime/c_backend_api.h> #include <tvm/runtime/crt/error_codes.h> typedef uint16_t tvm_function_index_t; typedef uint16_t tvm_module_index_t; /*! * \brief A data structure that facilitates function lookup by C-string name. */ typedef struct TVMFuncRegistry { /*! \brief Names of registered functions, concatenated together and separated by \0. * An additional \0 is present at the end of the concatenated blob to mark the end. * * Byte 0 and 1 are the number of functions in `funcs`. */ const char* names; /*! \brief Function pointers, in the same order as their names in `names`. */ const TVMBackendPackedCFunc* funcs; } TVMFuncRegistry; /*! * \brief Get the of the number of functions from registry. * * \param reg TVMFunctionRegistry instance that contains the function. * \return The number of functions from registry. */ uint16_t TVMFuncRegistry_GetNumFuncs(const TVMFuncRegistry* reg); /*! * \brief Set the number of functions to registry. * * \param reg TVMFunctionRegistry instance that contains the function. * \param num_funcs The number of functions * \return 0 when successful. */ int TVMFuncRegistry_SetNumFuncs(const TVMFuncRegistry* reg, const uint16_t num_funcs); /*! * \brief Get the address of 0th function from registry. * * \param reg TVMFunctionRegistry instance that contains the function. * \return the address of 0th function from registry */ const char* TVMFuncRegistry_Get0thFunctionName(const TVMFuncRegistry* reg); /*! * \brief Get packed function from registry by name. * * \param reg TVMFunctionRegistry instance that contains the function. , * \param name The function name * \param function_index Pointer to receive the 0-based index of the function in the registry, if it * was found. Unmodified otherwise. * \return kTvmErrorNoError when successful. kTvmErrorFunctionNameNotFound when no function matched `name`. */ tvm_crt_error_t TVMFuncRegistry_Lookup(const TVMFuncRegistry* reg, const char* name, tvm_function_index_t* function_index); /*! * \brief Fetch TVMBackendPackedCFunc given a function index * * \param reg TVMFunctionRegistry instance that contains the function. * \param index Index of the function. * \param out_func Pointer which receives the function pointer at `index`, if a valid * index was given. Unmodified otherwise. * \return kTvmErrorNoError when successful. kTvmErrorFunctionIndexInvalid when index was out of * range. */ tvm_crt_error_t TVMFuncRegistry_GetByIndex(const TVMFuncRegistry* reg, tvm_function_index_t index, TVMBackendPackedCFunc* out_func); /*! * \brief A TVMFuncRegistry that supports adding and changing the functions. */ typedef struct TVMMutableFuncRegistry { TVMFuncRegistry registry; /*! \brief maximum number of functions in this registry. */ size_t max_functions; } TVMMutableFuncRegistry; // Defined to work around compiler limitations. #define TVM_AVERAGE_FUNCTION_NAME_STRLEN_BYTES 10 /*! * \brief Size of an average function name in a TVMMutableFuncRegistry, in bytes. * * This is just an assumption made by the runtime for ease of use. */ static const size_t kTvmAverageFunctionNameStrlenBytes = TVM_AVERAGE_FUNCTION_NAME_STRLEN_BYTES; /*! * \brief Size of an average entry in a TVMMutableFuncRegistry, in bytes. * * Assumes a constant average function name length. */ static const size_t kTvmAverageFuncEntrySizeBytes = TVM_AVERAGE_FUNCTION_NAME_STRLEN_BYTES + 1 + sizeof(void*); /*! * \brief Create a new mutable function registry from a block of memory. * * \param reg TVMMutableFuncRegistry to create. * \param buffer Backing memory available for this function registry. * \param buffer_size_bytes Number of bytes available in buffer. * \return kTvmErrorNoError when successful. kTvmErrorBufferTooSmall when buffer_size_bytes is so * small that a single function cannot be registered. */ tvm_crt_error_t TVMMutableFuncRegistry_Create(TVMMutableFuncRegistry* reg, uint8_t* buffer, size_t buffer_size_bytes); /*! * \brief Add or set a function in the registry. * * \param reg The mutable function registry to affect. * \param name Name of the function. * \param func The function pointer. * \param override non-zero if an existing entry should be overridden. * \return kTvmErrorNoError when successful. kTvmErrorRegistryFull when `reg` already contains * `max_functions` entries. kTvmErrorFunctionAlreadyDefined when a function named `name` is * already present in the registry, and `override` == 0. */ tvm_crt_error_t TVMMutableFuncRegistry_Set(TVMMutableFuncRegistry* reg, const char* name, TVMBackendPackedCFunc func, int override); #ifdef __cplusplus } #endif #endif // TVM_RUNTIME_CRT_FUNC_REGISTRY_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/graph_executor.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file graph_executor.h * \brief Tiny graph executor that can run graph containing only tvm PackedFunc. */ #ifndef TVM_RUNTIME_CRT_GRAPH_EXECUTOR_H_ #define TVM_RUNTIME_CRT_GRAPH_EXECUTOR_H_ #ifdef __cplusplus extern "C" { #endif #include <dlpack/dlpack.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/crt/packed_func.h> struct TVMModule; /*! \brief operator attributes about tvm op */ typedef struct TVMOpParam { char func_name[TVM_CRT_MAX_STRLEN_FUNCTION_NAME]; uint32_t num_inputs; uint32_t num_outputs; uint32_t flatten_data; } TVMOpParam; // Graph attribute typedef struct TVMGraphExecutorGraphAttr { uint32_t storage_num_not_alloctaed; uint32_t* storage_id; uint32_t* device_index; char* dltype; // "int8", "int16", "float32" uint32_t dltype_count; int64_t* shape; uint32_t* ndim; uint32_t shape_count; } TVMGraphExecutorGraphAttr; typedef struct TVMGraphExecutor TVMGraphExecutor; // public functions /*! * \brief Allocate a new GraphExecutor with TVMPlatformMemoryAllocate and initialize it. * * \param sym_json JSON-encoded graph. * \param module_handle TVM Module that exposes the functions to call. * \param devices runtime execution device. * \param executor Pointer which receives a pointer to the newly-created instance. * \return 0 if successful. */ int TVMGraphExecutor_Create(const char* sym_json, TVMModuleHandle module_handle, const DLDevice* devices, TVMGraphExecutor** executor); int TVMGraphExecutor_GetInputIndex(TVMGraphExecutor* executor, const char* name); /*! * \brief get number of input tensors allocated. * \return integer number of tensors available to use. */ int TVMGraphExecutor_GetNumInputs(); /*! * \brief set input to the graph based on name. * \param executor The graph executor. * \param name The name of the input. * \param data_in The input data. */ void TVMGraphExecutor_SetInput(TVMGraphExecutor* executor, const char* name, DLTensor* data_in); /*! * \brief get number of output tensors allocated. * \return integer number of output tensors allocated. */ int TVMGraphExecutor_GetNumOutputs(); /*! * \brief Return NDArray for given output index. * \param executor The graph executor. * \param index The output index. * \param out The DLTensor corresponding to given output node index. * \return The result of this function execution. */ int TVMGraphExecutor_GetOutput(TVMGraphExecutor* executor, const int32_t index, DLTensor* out); /*! * \brief Load parameters from parameter blob. * \param executor The graph executor. * \param param_blob A binary blob of parameter. * \param param_size The parameter size. * \return The result of this function execution. */ int TVMGraphExecutor_LoadParams(TVMGraphExecutor* executor, const char* param_blob, const uint32_t param_size); /*! * \brief Execute the graph. * \param executor The graph executor. */ void TVMGraphExecutor_Run(TVMGraphExecutor* executor); /*! * \brief Release memory associated with the graph executor. * \param executor Pointer to graph executor. * \return 0 if successful */ int TVMGraphExecutor_Release(TVMGraphExecutor** executor); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_GRAPH_EXECUTOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/graph_executor_module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file graph_executor_module.h * \brief Tiny graph executor that can run graph containing only tvm PackedFunc. */ #ifndef TVM_RUNTIME_CRT_GRAPH_EXECUTOR_MODULE_H_ #define TVM_RUNTIME_CRT_GRAPH_EXECUTOR_MODULE_H_ #ifdef __cplusplus extern "C" { #endif #include <tvm/runtime/crt/error_codes.h> /*! * \brief Register the "tvm.graph_executor.create" constructor PackedFunc. */ tvm_crt_error_t TVMGraphExecutorModule_Register(); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_GRAPH_EXECUTOR_MODULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/logging.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file runtime/crt/logging.h * \brief A replacement of the dmlc logging system that avoids * the usage of GLOG and C++ headers */ #ifndef TVM_RUNTIME_CRT_LOGGING_H_ #define TVM_RUNTIME_CRT_LOGGING_H_ #include <tvm/runtime/crt/platform.h> #define TVM_CRT_LOG_LEVEL_DEBUG 3 #define TVM_CRT_LOG_LEVEL_INFO 2 #define TVM_CRT_LOG_LEVEL_WARN 1 #define TVM_CRT_LOG_LEVEL_ERROR 0 #ifdef __cplusplus extern "C" { #endif #if defined(_MSC_VER) void TVMLogf(const char* fmt, ...); #else void __attribute__((format(printf, 1, 2))) TVMLogf(const char* fmt, ...); #endif #define LOG(level, x, ...) \ if (TVM_CRT_LOG_LEVEL >= level) { \ TVMLogf(x, ##__VA_ARGS__); \ } #define LOG_ERROR(x, ...) LOG(TVM_CRT_LOG_LEVEL_ERROR, x, ##__VA_ARGS__) #define LOG_WARN(x, ...) LOG(TVM_CRT_LOG_LEVEL_WARN, x, ##__VA_ARGS__) #define LOG_INFO(x, ...) LOG(TVM_CRT_LOG_LEVEL_INFO, x, ##__VA_ARGS__) #define LOG_DEBUG(x, ...) LOG(TVM_CRT_LOG_LEVEL_DEBUG, x, ##__VA_ARGS__) #ifndef CHECK #define CHECK(x) \ do { \ if (!(x)) { \ LOG_ERROR(__FILE__ ":%d: Check failed: %s\n", __LINE__, #x); \ TVMPlatformAbort(kTvmErrorPlatformCheckFailure); \ } \ } while (0) #endif #ifndef CHECK_BINARY_OP #define CHECK_BINARY_OP(op, x, y, fmt, ...) \ do { \ if (!(x op y)) { \ LOG_ERROR(__FILE__ ":%d: Check failed: %s %s %s: " fmt "\n", __LINE__, #x, #op, #y, \ ##__VA_ARGS__); \ TVMPlatformAbort(kTvmErrorPlatformCheckFailure); \ } \ } while (0) #endif #ifndef CHECK_LT #define CHECK_LT(x, y, fmt, ...) CHECK_BINARY_OP(<, x, y, fmt, ##__VA_ARGS__) #endif #ifndef CHECK_GT #define CHECK_GT(x, y, fmt, ...) CHECK_BINARY_OP(>, x, y, fmt, ##__VA_ARGS__) #endif #ifndef CHECK_LE #define CHECK_LE(x, y, fmt, ...) CHECK_BINARY_OP(<=, x, y, fmt, ##__VA_ARGS__) #endif #ifndef CHECK_GE #define CHECK_GE(x, y, fmt, ...) CHECK_BINARY_OP(>=, x, y, fmt, ##__VA_ARGS__) #endif #ifndef CHECK_EQ #define CHECK_EQ(x, y, fmt, ...) CHECK_BINARY_OP(==, x, y, fmt, ##__VA_ARGS__) #endif #ifndef CHECK_NE #define CHECK_NE(x, y, fmt, ...) CHECK_BINARY_OP(!=, x, y, fmt, ##__VA_ARGS__) #endif #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_LOGGING_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/microtvm_rpc_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file microtvm_rpc_server.h * \brief MicroTVM RPC Server */ #ifndef TVM_RUNTIME_CRT_MICROTVM_RPC_SERVER_H_ #define TVM_RUNTIME_CRT_MICROTVM_RPC_SERVER_H_ #include <stdlib.h> #include <sys/types.h> #include <tvm/runtime/crt/error_codes.h> #ifdef __cplusplus extern "C" { #endif /*! \brief TVM RPC channel write function. * * Tries to write `num_bytes` from `data` to the underlying channel. * \param data Pointer to data to write. * \param num_bytes Number of bytes avaiable in data. * \return The number of bytes written. */ typedef ssize_t (*microtvm_rpc_channel_write_t)(void* context, const uint8_t* data, size_t num_bytes); /*! \brief Opaque pointer type to TVM RPC Server. */ typedef void* microtvm_rpc_server_t; /*! \brief Initialize the TVM RPC Server. * * Call this on device startup before calling anyother microtvm_rpc_server_ functions. * * \param write_func A callback function invoked by the TVM RPC Server to write data back to the * host. Internally, the TVM RPC Server will block until all data in a reply * packet has been written. * \param write_func_ctx An opaque pointer passed to write_func when it is called. * \return A pointer to the TVM RPC Server. The pointer is allocated in the same memory space as * the TVM workspace. */ microtvm_rpc_server_t MicroTVMRpcServerInit(microtvm_rpc_channel_write_t write_func, void* write_func_ctx); /*! \brief Do any tasks suitable for the main thread, and maybe process new incoming data. * * \param server The TVM RPC Server pointer. * \param new_data If not nullptr, a pointer to a buffer pointer, which should point at new input * data to process. On return, updated to point past data that has been consumed. * \param new_data_size_bytes Points to the number of valid bytes in `new_data`. On return, * updated to the number of unprocessed bytes remaining in `new_data` (usually 0). * \return An error code indicating the outcome of the server main loop iteration. */ tvm_crt_error_t MicroTVMRpcServerLoop(microtvm_rpc_server_t server, uint8_t** new_data, size_t* new_data_size_bytes); #ifdef __cplusplus } #endif #endif // TVM_RUNTIME_CRT_MICROTVM_RPC_SERVER_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file include/tvm/runtime/crt/module.h * \brief Runtime container of the functions */ #ifndef TVM_RUNTIME_CRT_MODULE_H_ #define TVM_RUNTIME_CRT_MODULE_H_ #include <tvm/runtime/c_backend_api.h> #include <tvm/runtime/crt/func_registry.h> #ifdef __cplusplus extern "C" { #endif /*! * \brief Module container of TVM. */ typedef struct TVMModule { /*! \brief The function registry associated with this module. */ const TVMFuncRegistry* registry; } TVMModule; /*! * \brief Create a new module handle from the given TVMModule instance. * \param mod The module instance to register. * \param out_handle Pointer to receive the newly-minted handle for this module. * \return 0 on success, non-zero on error. */ int TVMModCreateFromCModule(const TVMModule* mod, TVMModuleHandle* out_handle); /*! \brief Entry point for the system lib module. */ const TVMModule* TVMSystemLibEntryPoint(void); #ifdef __cplusplus } #endif #endif // TVM_RUNTIME_CRT_MODULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/packed_func.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/packed_func.h * \brief Type-erased function used across TVM API. */ #ifndef TVM_RUNTIME_CRT_PACKED_FUNC_H_ #define TVM_RUNTIME_CRT_PACKED_FUNC_H_ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/crt/module.h> #include <tvm/runtime/crt/platform.h> #include "crt_config.h" DLDataType String2DLDataType(const char* s); typedef struct TVMArgs { TVMValue values[TVM_CRT_MAX_ARGS]; int tcodes[TVM_CRT_MAX_ARGS]; /* Data type should be identical to type_codes in TVMPackedCFunc */ uint32_t values_count; } TVMArgs; TVMArgs TVMArgs_Create(TVMValue* values, uint32_t* tcodes, uint32_t values_count); typedef struct TVMPackedFunc { char name[200]; TVMFunctionHandle fexec; TVMArgs args; TVMArgs ret_value; int (*Call)(struct TVMPackedFunc* pf); void (*SetArgs)(struct TVMPackedFunc* pf, const struct TVMArgs* args); } TVMPackedFunc; int TVMPackedFunc_InitGlobalFunc(TVMPackedFunc* pf, const char* name, const TVMArgs* args); int TVMPackedFunc_InitModuleFunc(TVMPackedFunc* pf, TVMModuleHandle module, const char* name, const TVMArgs* args); int TVMPackedFunc_Call(TVMPackedFunc* pf); void TVMPackedFunc_SetArgs(TVMPackedFunc* pf, const TVMArgs* args); inline TVMModuleHandle TVMArgs_AsModuleHandle(const TVMArgs* args, size_t index) { if (index >= args->values_count) { TVMPlatformAbort((tvm_crt_error_t)-1); } if (args->tcodes[index] != kTVMModuleHandle) { TVMPlatformAbort((tvm_crt_error_t)-1); } return args->values[index].v_handle; } extern TVMPackedFunc* g_fexecs; extern uint32_t g_fexecs_count; #endif // TVM_RUNTIME_CRT_PACKED_FUNC_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/page_allocator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/page_allocator.h * \brief An implementation of a dynamic memory allocator for microcontrollers. */ #ifndef TVM_RUNTIME_CRT_PAGE_ALLOCATOR_H_ #define TVM_RUNTIME_CRT_PAGE_ALLOCATOR_H_ #ifdef __cplusplus extern "C" { #endif #include <stdlib.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/crt/error_codes.h> extern int vleak_size; typedef struct MemoryManagerInterface MemoryManagerInterface; struct MemoryManagerInterface { /*! * \brief Allocate a chunk of memory. * \param interface Pointer to this structure. * \param num_bytes Number of bytes requested. * \param dev Execution device that will be used with the allocated memory. Must be {kDLCPU, 0}. * \param out_ptr A pointer to which is written a pointer to the newly-allocated memory. * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t (*Allocate)(MemoryManagerInterface* interface, size_t num_bytes, DLDevice dev, void** out_ptr); /*! * \brief Free a chunk of previously-used memory. * * \param interface Pointer to this structure. * \param ptr A pointer returned from TVMPlatformMemoryAllocate which should be free'd. * \param dev Execution device passed to TVMPlatformMemoryAllocate. Fixed to {kDLCPU, 0}. * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t (*Free)(MemoryManagerInterface* interface, void* ptr, DLDevice dev); /*! \brief Used in testing; the number of allocated objects. */ int vleak_size; }; /*! * Exposed for testing. * * \param manager Pointer, initialized with the new MemoryManager. * \param memory_pool Pointer to the global memory pool used by the CRT. * \param memory_pool_size_bytes Size of `memory_pool`, in bytes. * \param page_size_bytes_log2 log2 of the page size, in bytes. * \return kTvmErrorNoError on success. */ tvm_crt_error_t PageMemoryManagerCreate(MemoryManagerInterface** manager, uint8_t* memory_pool, size_t memory_pool_size_bytes, size_t page_size_bytes_log2); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_PAGE_ALLOCATOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/platform.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/platform.h * \brief The virtual memory manager for micro-controllers */ #ifndef TVM_RUNTIME_CRT_PLATFORM_H_ #define TVM_RUNTIME_CRT_PLATFORM_H_ #include <stdarg.h> #include <stddef.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/crt/error_codes.h> #ifdef __cplusplus extern "C" { #endif /*! \brief Called when an internal error occurs and execution cannot continue. * * The platform should ideally restart or hang at this point. * * \param code An error code. */ #if defined(_MSC_VER) __declspec(noreturn) void TVMPlatformAbort(tvm_crt_error_t code); #else void __attribute__((noreturn)) TVMPlatformAbort(tvm_crt_error_t code); #endif /*! \brief Called by the microTVM RPC server to implement TVMLogf. * * Not required to be implemented when the RPC server is not linked into the binary. This * function's signature matches that of vsnprintf, so trivial implementations can just call * vsnprintf. * * \param out_buf A char buffer where the formatted string should be written. * \param out_buf_size_bytes Number of bytes available for writing in out_buf. * \param fmt The printf-style formatstring. * \param args extra arguments to be formatted. * \return number of bytes written. */ size_t TVMPlatformFormatMessage(char* out_buf, size_t out_buf_size_bytes, const char* fmt, va_list args); /*! * \brief Allocate memory for use by TVM. * * When this function returns something other than kTvmErrorNoError, *out_ptr should not be modified * and the caller is not obligated to call TVMPlatformMemoryFree in order to avoid a memory leak. * * \param num_bytes Number of bytes requested. * \param dev Execution device that will be used with the allocated memory. Fixed to {kDLCPU, 0}. * \param out_ptr A pointer to which is written a pointer to the newly-allocated memory. * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformMemoryAllocate(size_t num_bytes, DLDevice dev, void** out_ptr); /*! * \brief Free memory used by TVM. * * \param ptr A pointer returned from TVMPlatformMemoryAllocate which should be free'd. * \param dev Execution device passed to TVMPlatformMemoryAllocate. Fixed to {kDLCPU, 0}. * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformMemoryFree(void* ptr, DLDevice dev); /*! \brief Start a device timer. * * The device timer used must not be running. * * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformTimerStart(); /*! \brief Stop the running device timer and get the elapsed time (in microseconds). * * The device timer used must be running. * * \param elapsed_time_seconds Pointer to write elapsed time into. * * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformTimerStop(double* elapsed_time_seconds); /*! \brief Platform-specific before measurement call. * * A function which is called before calling TVMFuncCall in the TimeEvaluator. * Can be used, for example, to initialize reset global state which may affect the results of * measurement. * * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformBeforeMeasurement(); /*! \brief Platform-specific after measurement call. * * A function which is called after calling TVMFuncCall in the TimeEvaluator. * It is the counterpart of the TVMPlatformBeforeMeasurement function. * * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformAfterMeasurement(); /*! \brief Fill a buffer with random data. * * Cryptographically-secure random data is NOT required. This function is intended for use * cases such as filling autotuning input tensors and choosing the nonce used for microTVM RPC. * * This function does not need to be implemented for inference tasks. It is used only by * AutoTVM and the RPC server. When not implemented, an internal weak-linked stub is provided. * * Please take care that across successive resets, this function returns different sequences of * values. If e.g. the random number generator is seeded with the same value, it may make it * difficult for a host to detect device resets during autotuning or host-driven inference. * * \param buffer Pointer to the 0th byte to write with random data. `num_bytes` of random data * should be written here. * \param num_bytes Number of bytes to write. * \return kTvmErrorNoError if successful; a descriptive error code otherwise. */ tvm_crt_error_t TVMPlatformGenerateRandom(uint8_t* buffer, size_t num_bytes); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_PLATFORM_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/rpc_common/frame_buffer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/rpc_common/frame_buffer.h * \brief Defines a buffer for use by the RPC framing layer. */ #ifndef TVM_RUNTIME_CRT_RPC_COMMON_FRAME_BUFFER_H_ #define TVM_RUNTIME_CRT_RPC_COMMON_FRAME_BUFFER_H_ #include <inttypes.h> #include <stdlib.h> namespace tvm { namespace runtime { namespace micro_rpc { class FrameBuffer { public: FrameBuffer(uint8_t* data, size_t data_size_bytes) : data_{data}, capacity_{data_size_bytes}, num_valid_bytes_{0}, read_cursor_{0} {} size_t Write(const uint8_t* data, size_t data_size_bytes); size_t Read(uint8_t* data, size_t data_size_bytes); size_t Peek(uint8_t* data, size_t data_size_bytes); void Clear(); size_t ReadAvailable() const { return num_valid_bytes_ - read_cursor_; } size_t Size() const { return num_valid_bytes_; } private: /*! \brief pointer to data buffer. */ uint8_t* data_; /*! \brief The total number of bytes available in data_. Always a power of 2. */ size_t capacity_; /*! \brief index into data_ of the next potentially-available byte in the buffer. * The byte is available when tail_ != data_ + capacity_. */ size_t num_valid_bytes_; /*! \brief Read cursor position. */ size_t read_cursor_; }; } // namespace micro_rpc } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CRT_RPC_COMMON_FRAME_BUFFER_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/rpc_common/framing.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file framing.h * \brief Framing for RPC. */ #ifndef TVM_RUNTIME_CRT_RPC_COMMON_FRAMING_H_ #define TVM_RUNTIME_CRT_RPC_COMMON_FRAMING_H_ #include <inttypes.h> #include <stddef.h> #include <tvm/runtime/crt/error_codes.h> #include <tvm/runtime/crt/rpc_common/write_stream.h> namespace tvm { namespace runtime { namespace micro_rpc { uint16_t crc16_compute(const uint8_t* data, size_t data_size_bytes, uint16_t* previous_crc); enum class Escape : uint8_t { kEscapeStart = 0xff, kEscapeNop = 0xfe, kPacketStart = 0xfd }; class PacketFieldSizeBytes { public: static constexpr const size_t kPayloadLength = sizeof(uint32_t); static constexpr const size_t kCrc = sizeof(uint16_t); }; class Unframer { public: explicit Unframer(WriteStream* stream) : stream_{stream}, state_{State::kFindPacketStart}, saw_escape_start_{false}, num_buffer_bytes_valid_{0} {} /*! * \brief Push data into unframer and try to decode one packet. * * This function will return when exactly one packet has been decoded. It may not consume all of * `data` in this case, and valid bytes may remain at the end of data. * * \param data The new data to unframe and send downstream. * \param data_size_bytes The number of valid bytes in data. * \param bytes_consumed Pointer written with the number of bytes consumed from data. * \return * - kTvmErrorNoError when successful -- continue writing data. * - kTvmErrorFramingInvalidState when the Unframer was in or enters an invalid state * (probably indicates memory corruption). * - kTvmErrorFramingShortPacket when a new packet started before the current one ended. * - kTvmErrorFramingInvalidEscape when an invalid escape sequence was seen */ tvm_crt_error_t Write(const uint8_t* data, size_t data_size_bytes, size_t* bytes_consumed); /*! \brief Reset unframer to initial state. */ void Reset(); /*! \brief Return an underestimate of the number of bytes needed from the wire. */ size_t BytesNeeded(); private: tvm_crt_error_t FindPacketStart(); tvm_crt_error_t FindPacketLength(); tvm_crt_error_t FindPacketCrc(); tvm_crt_error_t FindCrcEnd(); bool IsBufferFull(size_t buffer_full_bytes) { return num_buffer_bytes_valid_ >= buffer_full_bytes; } /*! \brief Consume input into buffer_ until buffer_ has buffer_full_bytes. */ tvm_crt_error_t AddToBuffer(size_t buffer_full_bytes, bool update_crc); void ClearBuffer(); /*! \brief Unescape and consume input bytes, storing into buffer. * * \param buffer A buffer to fill with consumed, unescaped bytes. * \param buffer_size_bytes Size of buffer, in bytes. * \param bytes_filled A pointer to an accumulator to which is added the number of bytes written * to `buffer`. * \param update_crc true when the CRC should be updated with the escaped bytes. * \return * - kTvmErrorNoError if successful * - kTvmErrorFramingShortPacket if a start-of-packet escape code was encountered. If so, * *bytes_filled indicates the number of bytes before the Escape::kEscapeStart byte. * - kTvmErrorFramingInvalidEscape if an invalid escape sequence was seen. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns 0. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns an invalid positive number. * - Any negative value (i.e. with bits in kTvmErrorSystemErrorMask set) returned by the * WriteStream's Write() function. */ tvm_crt_error_t ConsumeInput(uint8_t* buffer, size_t buffer_size_bytes, size_t* bytes_filled, bool update_crc); WriteStream* stream_; enum class State : uint8_t { kFindPacketStart = 0, kFindPacketLength = 1, kFindPacketCrc = 2, kFindCrcEnd = 3, }; State state_; const uint8_t* input_; size_t input_size_bytes_; bool saw_escape_start_; /*! \brief unframe buffer, sized to the longest framing field. */ uint8_t buffer_[128]; /*! \brief number of bytes in buffer that are currently valid. */ size_t num_buffer_bytes_valid_; /*! \brief number of payload bytes left to receive before the CRC begins. */ size_t num_payload_bytes_remaining_; /*! \brief Running CRC value. */ uint16_t crc_; }; class Framer { public: typedef ssize_t (*WriteFunc)(const uint8_t* data, size_t data_size_bytes); explicit Framer(WriteStream* stream) : stream_{stream}, state_{State::kReset}, num_payload_bytes_remaining_{0} {} /*! \brief Frame and write a full packet. * \param payload The entire packet payload. * \param payload_size_bytes Number of bytes in the packet. * \return * - kTvmErrorNoError when no error occurs * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns 0. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns an invalid positive number. * - Any negative value (i.e. with bits in kTvmErrorSystemErrorMask set) returned by the * WriteStream's Write() function. */ tvm_crt_error_t Write(const uint8_t* payload, size_t payload_size_bytes); /*! \brief Start framing and writing a new packet to the wire. * * When transmitting payloads that are too large to be buffered, call this function first to send * the packet header and length fields. * * \param payload_size_bytes Number of payload bytes included as part of this packet. * \return * - kTvmErrorNoError when no error occurs * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns 0. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns an invalid positive number. * - Any negative value (i.e. with bits in kTvmErrorSystemErrorMask set) returned by the * WriteStream's Write() function. */ tvm_crt_error_t StartPacket(size_t payload_size_bytes); /*! \brief Write payload data to the wire. * * When transmitting payloads that are too large to be buffered, call this function after calling * StartPacket to escape and transmit framed payloads. This function can be called multiple times * for a single packet. * * \param payload_chunk A piece of the packet payload. * \param payload_chunk_size_bytes Number of valid bytes in payload_chunk. * \return * - kTvmErrorNoError when no error occurs * - kTvmErrorFramingInvalidState when StartPacket() has not been called. * - kTvmErrorFramingPayloadOverflow when more bytes were requested to be written than were * declared in the payload_size_bytes parameter given to StartPacket(). * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns 0. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns an invalid positive number. * - Any negative value (i.e. with bits in kTvmErrorSystemErrorMask set) returned by the * WriteStream's Write() function. */ tvm_crt_error_t WritePayloadChunk(const uint8_t* payload_chunk, size_t payload_chunk_size_bytes); /* \brief Finish writing one packet by sending the CRC. * * When transmitting paylaods that are too large to be buffered, call this function after sending * the entire payload using WritePayloadChunk. * * \return * - kTvmErrorNoError when no error occurs * - kTvmErrorFramingInvalidState when StartPacket() has not been called. * - kTvmErrorFramingPayloadIncomplete when less bytes were written using WritePayloadChunk() * than were declared in the payload_size_bytes parameter given to StartPacket(). * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns 0. * - kTvmErrorWriteStreamShortWrite if the WriteStream passed to constructor's Write() * function returns an invalid positive number. * - Any negative value (i.e. with bits in kTvmErrorSystemErrorMask set) returned by the * WriteStream's Write() function. */ tvm_crt_error_t FinishPacket(); /* \brief Reset state of the Framer. */ void Reset(); private: /*! \brief Maximum size of stack-based buffer. */ static constexpr const size_t kMaxStackBufferSizeBytes = 128; enum class State : uint8_t { /*! \brief State entered at construction time or after write error, before first packet sent. */ kReset = 0, /*! \brief State entered after a packet has successfully finished transmitting. */ kIdle = 1, /*! \brief State entered when a packet payload or CRC needs to be transmitted. */ kTransmitPacketPayload = 2, }; /*! * \brief Escape data and write the result to wire, and update crc_. * * \param data Unescaped data to write. * \param data_size_bytes Number of valid bytes in data. * \param escape true if escaping should be applied. * \param update_crc true if escaping should be applied. * \return kTvmErrorNoError on success, negative value on error. */ tvm_crt_error_t WriteAndCrc(const uint8_t* data, size_t data_size_bytes, bool escape, bool update_crc); /*! \brief Called to write framed data to the transport. */ WriteStream* stream_; /*! \brief State fo the Framer. */ State state_; /*! \brief When state_ == kTransmitPacketPayload, number of payload bytes left to transmit. */ size_t num_payload_bytes_remaining_; /*! \brief Running CRC value. */ uint16_t crc_; }; } // namespace micro_rpc } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CRT_RPC_COMMON_FRAMING_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/rpc_common/session.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file session.h * \brief RPC Session */ #ifndef TVM_RUNTIME_CRT_RPC_COMMON_SESSION_H_ #define TVM_RUNTIME_CRT_RPC_COMMON_SESSION_H_ #include <inttypes.h> #include <tvm/runtime/crt/error_codes.h> #include <tvm/runtime/crt/rpc_common/frame_buffer.h> #include <tvm/runtime/crt/rpc_common/framing.h> #include <tvm/runtime/crt/rpc_common/write_stream.h> namespace tvm { namespace runtime { namespace micro_rpc { enum class MessageType : uint8_t { kStartSessionInit = 0x00, kStartSessionReply = 0x01, kTerminateSession = 0x02, kLog = 0x03, kNormal = 0x10, }; #if defined(_MSC_VER) #pragma pack(push, 1) typedef struct SessionHeader { uint16_t session_id; MessageType message_type; } SessionHeader; #pragma pack(pop) #else typedef struct SessionHeader { uint16_t session_id; MessageType message_type; } __attribute__((packed)) SessionHeader; #endif /*! * \brief CRT communication session management class. * Assumes the following properties provided by the underlying transport: * - in-order delivery. * - reliable delivery. * * Specifically, designed for use with UARTs. Will probably work over semihosting, USB, and TCP; * will probably not work reliably enough over UDP. */ class Session { public: /*! \brief Callback invoked when a full message is received. * * This function is called in the following situations: * - When a new session is established (this typically indicates the remote end reset). * In this case, buf is NULL. * - When a log message or normal traffic is received. In this case, buf points to a * valid buffer containing the message content. * * \param context The value of `message_received_func_context` passed to the constructor. * \param message_type The type of session message received. Currently, this is always * either kNormal or kLog. * \param buf When message_type is not kStartSessionMessage, a FrameBuffer whose read cursor is * at the first byte of the message payload. Otherwise, NULL. */ typedef void (*MessageReceivedFunc)(void* context, MessageType message_type, FrameBuffer* buf); /*! \brief An invalid nonce value that typically indicates an unknown nonce. */ static constexpr const uint8_t kInvalidNonce = 0; Session(Framer* framer, FrameBuffer* receive_buffer, MessageReceivedFunc message_received_func, void* message_received_func_context) : local_nonce_{kInvalidNonce}, session_id_{0}, state_{State::kReset}, receiver_{this}, framer_{framer}, receive_buffer_{receive_buffer}, receive_buffer_has_complete_message_{false}, message_received_func_{message_received_func}, message_received_func_context_{message_received_func_context} { // Session can be used for system startup logging, before the RPC server is instantiated. In // this case, allow receive_buffer_ to be nullptr. The instantiator agrees not to use // Receiver(). if (receive_buffer_ != nullptr) { receive_buffer_->Clear(); } } /*! * \brief Send a session terminate message, usually done at startup to interrupt a hanging remote. * \param initial_session_nonce Initial nonce that should be used on the first session start * message. Callers should ensure this is different across device resets. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t Initialize(uint8_t initial_session_nonce); /*! * \brief Terminate any previously-established session. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t TerminateSession(); /*! * \brief Start a new session regardless of state. Sends kStartSessionMessage. * * Generally speaking, this function should be called once per device reset by exactly one side * in the system. No traffic can flow until this function is called. * * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t StartSession(); /*! * \brief Obtain a WriteStream implementation for use by the framing layer. * \return A WriteStream to which received data should be written. Owned by this class. */ WriteStream* Receiver() { return &receiver_; } /*! * \brief Send a full message including header, payload, and CRC footer. * \param message_type One of MessageType; distinguishes the type of traffic at the session layer. * \param message_data The data contained in the message. * \param message_size_bytes The number of valid bytes in message_data. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t SendMessage(MessageType message_type, const uint8_t* message_data, size_t message_size_bytes); /*! * \brief Send the framing and session layer headers. * * This function allows messages to be sent in pieces. * * \param message_type One of MessageType; distinguishes the type of traffic at the session layer. * \param message_size_bytes The size of the message body, in bytes. Excludes the framing and * session layer headers. \return 0 on success, negative error code on failure. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t StartMessage(MessageType message_type, size_t message_size_bytes); /*! * \brief Send a part of the message body. * * This function allows messages to be sent in pieces. * * \param chunk_data The data contained in this message body chunk. * \param chunk_size_bytes The number of valid bytes in chunk_data. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t SendBodyChunk(const uint8_t* chunk_data, size_t chunk_size_bytes); /*! * \brief Finish sending the message by sending the framing layer footer. * \return kTvmErrorNoError on success, or an error code otherwise. */ tvm_crt_error_t FinishMessage(); /*! \brief Returns true if the session is in the established state. */ bool IsEstablished() const { return state_ == State::kSessionEstablished; } /*! * \brief Clear the receive buffer and prepare to receive next message. * * Call this function after MessageReceivedFunc is invoked. Any SessionReceiver::Write() calls * made will return errors until this function is called to prevent them from corrupting the * valid message in the receive buffer. */ void ClearReceiveBuffer(); /*! \brief A version number used to check compatibility of the remote session implementation. */ static const constexpr uint8_t kVersion = 0x01; private: class SessionReceiver : public WriteStream { public: explicit SessionReceiver(Session* session) : session_{session} {} virtual ~SessionReceiver() {} ssize_t Write(const uint8_t* data, size_t data_size_bytes) override; void PacketDone(bool is_valid) override; private: void operator delete(void*) noexcept {} // NOLINT(readability/casting) Session* session_; }; enum class State : uint8_t { kReset = 0, kNoSessionEstablished = 1, kStartSessionSent = 2, kSessionEstablished = 3, }; void RegenerateNonce(); tvm_crt_error_t SendInternal(MessageType message_type, const uint8_t* message_data, size_t message_size_bytes); void SendSessionStartReply(const SessionHeader& header); void ProcessStartSessionInit(const SessionHeader& header); void ProcessStartSessionReply(const SessionHeader& header); void OnSessionEstablishedMessage(); void OnSessionTerminatedMessage(); void SetSessionId(uint8_t initiator_nonce, uint8_t responder_nonce) { session_id_ = initiator_nonce | (((uint16_t)responder_nonce) << 8); } uint8_t InitiatorNonce(uint16_t session_id) { return session_id & 0xff; } uint8_t ResponderNonce(uint16_t session_id) { return (session_id >> 8) & 0xff; } uint8_t local_nonce_; uint16_t session_id_; State state_; SessionReceiver receiver_; Framer* framer_; FrameBuffer* receive_buffer_; bool receive_buffer_has_complete_message_; MessageReceivedFunc message_received_func_; void* message_received_func_context_; }; } // namespace micro_rpc } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CRT_RPC_COMMON_SESSION_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/rpc_common/write_stream.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file framing.h * \brief Framing for RPC. */ #ifndef TVM_RUNTIME_CRT_RPC_COMMON_WRITE_STREAM_H_ #define TVM_RUNTIME_CRT_RPC_COMMON_WRITE_STREAM_H_ #include <inttypes.h> #include <stddef.h> #include <sys/types.h> #include <tvm/runtime/crt/error_codes.h> #include "../../../../../src/support/ssize.h" namespace tvm { namespace runtime { namespace micro_rpc { class WriteStream { public: virtual ~WriteStream(); virtual ssize_t Write(const uint8_t* data, size_t data_size_bytes) = 0; virtual void PacketDone(bool is_valid) = 0; tvm_crt_error_t WriteAll(uint8_t* data, size_t data_size_bytes, size_t* bytes_consumed); }; } // namespace micro_rpc } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_CRT_RPC_COMMON_WRITE_STREAM_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/crt/stack_allocator.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // LINT_C_FILE #ifndef TVM_RUNTIME_CRT_STACK_ALLOCATOR_H_ #define TVM_RUNTIME_CRT_STACK_ALLOCATOR_H_ #include <stddef.h> #include <stdint.h> #include "crt_config.h" #include "error_codes.h" #define STACK_ALLOCATOR_TAG 0xabcd1234 #define STACK_ALLOCATOR_TAG_SIZE_BYTES 4 /*! Memory alignment for allocator */ #ifndef TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES #define TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES 16 #endif #ifdef __cplusplus extern "C" { #endif typedef struct { uint8_t* next_alloc; // Pointer to the next block of TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES uint8_t* workspace; // Pointer to start of the workspace size_t workspace_size; // Total number of bytes in the workspace } tvm_workspace_t; /*! * \brief Initialize the stack-based memory manager * * \param tvm_runtime_workspace The tvm_workspace_t struct containing state * \param g_aot_memory The memory buffer used to allocate within * \param workspace_size The total size of the workspace buffer workspace */ tvm_crt_error_t StackMemoryManager_Init(tvm_workspace_t* tvm_runtime_workspace, uint8_t* g_aot_memory, size_t workspace_size); /*! * \brief The intended user-facing function to allocate within the buffer. It wraps * StackMemoryManager_Allocate_Body enable and disable the LIFO check that is useful for debugging * the AoT codegen. * * \param tvm_runtime_workspace The tvm_workspace_t struct containing state * \param nbytes The number of bytes required for the allocation * \param current_alloc The pointer-to-pointer to be populated with the allocated address */ tvm_crt_error_t StackMemoryManager_Allocate(tvm_workspace_t* tvm_runtime_workspace, int32_t nbytes, void** current_alloc); /*! * \brief The internal function that accepts allocate inputs and an extra byte to say to enable the * LIFO check that is useful in debugging for debugging the AoT codegen. * * \param tvm_runtime_workspace The tvm_workspace_t struct containing state * \param nbytes The number of bytes required for the allocation * \param current_alloc The pointer-to-pointer to be populated with the allocated address * \param do_lifo_check This being non-zero indicates to perform a check LIFO pattern Allocs/Frees */ tvm_crt_error_t StackMemoryManager_Allocate_Body(tvm_workspace_t* tvm_runtime_workspace, int32_t nbytes, void** current_alloc, uint8_t do_lifo_check); /*! * \brief The intended user-facing function to free the tensor within the buffer. It wraps * StackMemoryManager_Free_Body enable and disable the stack allocator * * \param tvm_runtime_workspace The tvm_workspace_t struct containing state * \param ptr The base pointer of the tensor to be free'd */ tvm_crt_error_t StackMemoryManager_Free(tvm_workspace_t* tvm_runtime_workspace, void* ptr); /*! * \brief The internal function that accepts free inputs and an extra byte to say to enable the LIFO * check that is useful in debugging for debugging the AoT codegen. * * \param tvm_runtime_workspace The tvm_workspace_t struct containing state * \param ptr The base pointer of tensor to be free'd within the workspace buffer * \param do_lifo_check This being non-zero indicates to perform a check LIFO pattern Allocs/Frees */ tvm_crt_error_t StackMemoryManager_Free_Body(tvm_workspace_t* tvm_runtime_workspace, void* ptr, uint8_t do_lifo_check); #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_CRT_STACK_ALLOCATOR_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/data_type.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * \file tvm/runtime/data_type.h * \brief Primitive runtime data type. */ // Acknowledgement: DataType structure design originates from Halide. #ifndef TVM_RUNTIME_DATA_TYPE_H_ #define TVM_RUNTIME_DATA_TYPE_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/logging.h> #include <string> #include <type_traits> namespace tvm { namespace runtime { /*! * \brief Runtime primitive data type. * * This class is a thin wrapper of DLDataType. * We also make use of DataType in compiler to store quick hint */ class DataType { public: /*! * \brief Type code for the DataType. * * DLPack consistency: * 1) kInt is consistent with kDLInt * 2) kUInt is consistent with kDLUInt * 3) kFloat is consistent with kDLFloat */ enum TypeCode { kInt = kDLInt, kUInt = kDLUInt, kFloat = kDLFloat, kHandle = TVMArgTypeCode::kTVMOpaqueHandle, kBFloat = kDLBfloat, kCustomBegin = 129 }; /*! \brief default constructor */ DataType() { data_ = DataType::Void(); } /*! * \brief Constructor * \param dtype The DLDataType */ explicit DataType(DLDataType dtype) : data_(dtype) {} /*! * \brief Constructor * \param code The type code. * \param bits The number of bits in the type. * \param lanes The number of lanes. */ DataType(int code, int bits, int lanes) { data_.code = static_cast<uint8_t>(code); data_.bits = static_cast<uint8_t>(bits); data_.lanes = static_cast<uint16_t>(lanes); if (code == kBFloat) { ICHECK_EQ(bits, 16); } } /*! \return The type code. */ int code() const { return static_cast<int>(data_.code); } /*! \return number of bits in the data. */ int bits() const { return static_cast<int>(data_.bits); } /*! \return number of bytes to store each scalar. */ int bytes() const { return (bits() + 7) / 8; } /*! \return number of lanes in the data. */ int lanes() const { return static_cast<int>(data_.lanes); } /*! \return whether type is a scalar type. */ bool is_scalar() const { return lanes() == 1; } /*! \return whether type is a scalar type. */ bool is_bool() const { return code() == DataType::kUInt && bits() == 1; } /*! \return whether type is a float type. */ bool is_float() const { return code() == DataType::kFloat; } /*! \return whether type is a float16 type. */ bool is_float16() const { return is_float() && bits() == 16; } /*! \return whether type is a bfloat16 type. */ bool is_bfloat16() const { return code() == DataType::kBFloat && bits() == 16; } /*! \return whether type is an int type. */ bool is_int() const { return code() == DataType::kInt; } /*! \return whether type is an uint type. */ bool is_uint() const { return code() == DataType::kUInt; } /*! \return whether type is a handle type. */ bool is_handle() const { return code() == DataType::kHandle && !is_void(); } /*! \return whether type is a vector type. */ bool is_vector() const { return lanes() > 1; } /*! \return whether type is a bool vector type. */ bool is_vector_bool() const { return is_vector() && bits() == 1; } /*! \return whether type is a Void type. */ bool is_void() const { return code() == DataType::kHandle && bits() == 0 && lanes() == 0; } /*! * \brief Create a new data type by change lanes to a specified value. * \param lanes The target number of lanes. * \return the result type. */ DataType with_lanes(int lanes) const { return DataType(data_.code, data_.bits, lanes); } /*! * \brief Create a new data type by change bits to a specified value. * \param bits The target number of bits. * \return the result type. */ DataType with_bits(int bits) const { return DataType(data_.code, bits, data_.lanes); } /*! * \brief Get the scalar version of the type. * \return the result type. */ DataType element_of() const { return with_lanes(1); } /*! * \brief Assignment operator. */ DataType& operator=(const DataType& rhs) { if (this == &rhs) { return *this; } data_ = rhs.data_; return *this; } /*! * \brief Equal comparator. * \param other The data type to compare against. * \return The comparison result. */ bool operator==(const DataType& other) const { return data_.code == other.data_.code && data_.bits == other.data_.bits && data_.lanes == other.data_.lanes; } /*! * \brief NotEqual comparator. * \param other The data type to compare against. * \return The comparison result. */ bool operator!=(const DataType& other) const { return !operator==(other); } /*! * \brief Converter to DLDataType * \return the result. */ operator DLDataType() const { return data_; } /*! * \brief Construct an int type. * \param bits The number of bits in the type. * \param lanes The number of lanes. * \return The constructed data type. */ static DataType Int(int bits, int lanes = 1) { return DataType(kDLInt, bits, lanes); } /*! * \brief Construct an uint type. * \param bits The number of bits in the type. * \param lanes The number of lanes * \return The constructed data type. */ static DataType UInt(int bits, int lanes = 1) { return DataType(kDLUInt, bits, lanes); } /*! * \brief Construct an float type. * \param bits The number of bits in the type. * \param lanes The number of lanes * \return The constructed data type. */ static DataType Float(int bits, int lanes = 1) { return DataType(kDLFloat, bits, lanes); } /*! * \brief Construct an bfloat type. * \param bits The number of bits in the type. * \param lanes The number of lanes * \return The constructed data type. */ static DataType BFloat(int bits, int lanes = 1) { return DataType(kDLBfloat, bits, lanes); } /*! * \brief Construct a bool type. * \param lanes The number of lanes * \return The constructed data type. */ static DataType Bool(int lanes = 1) { return DataType::UInt(1, lanes); } /*! * \brief Construct a handle type. * \param bits The number of bits in the type. * \param lanes The number of lanes * \return The constructed data type. */ static DataType Handle(int bits = 64, int lanes = 1) { return DataType(kHandle, bits, lanes); } /*! * \brief Construct a Void type. * \return The constructed data type. */ static DataType Void() { return DataType(kHandle, 0, 0); } /*! * \brief Get the corresponding type of TVMShapeIndex. * \return The type of TVM shape index. */ static DataType ShapeIndex() { if (std::is_signed<tvm_index_t>::value) { return DataType::Int(sizeof(tvm_index_t) * 8); } else { return DataType::UInt(sizeof(tvm_index_t) * 8); } } private: DLDataType data_; }; /*! * \brief Get the number of bytes needed in a vector. * \param dtype The data type. * \return Number of bytes needed. */ inline int GetVectorBytes(DataType dtype) { int data_bits = dtype.bits() * dtype.lanes(); // allow bool to exist if (dtype == DataType::Bool() || dtype == DataType::Int(4) || dtype == DataType::UInt(4) || dtype == DataType::Int(1)) { return 1; } ICHECK_EQ(data_bits % 8, 0U) << "Need to load/store by multiple of bytes"; return data_bits / 8; } /*! * \brief Check whether type matches the given spec. * \param t The type * \param code The type code. * \param bits The number of bits to be matched. * \param lanes The number of lanes in the type. */ inline bool TypeMatch(DLDataType t, int code, int bits, int lanes = 1) { return t.code == code && t.bits == bits && t.lanes == lanes; } /*! * \brief Check whether two types are equal . * \param lhs The left operand. * \param rhs The right operand. */ inline bool TypeEqual(DLDataType lhs, DLDataType rhs) { return lhs.code == rhs.code && lhs.bits == rhs.bits && lhs.lanes == rhs.lanes; } /*! * \brief Runtime utility for getting custom type name from code * \param type_code Custom type code * \return Custom type name */ TVM_DLL std::string GetCustomTypeName(uint8_t type_code); /*! * \brief Runtime utility for checking whether custom type is registered * \param type_code Custom type code * \return Bool representing whether type is registered */ TVM_DLL bool GetCustomTypeRegistered(uint8_t type_code); /*! * \brief Runtime utility for parsing string of the form "custom[<typename>]" * \param s String to parse * \param scan pointer to parsing pointer, which is scanning across s * \return type code of custom type parsed */ TVM_DLL uint8_t ParseCustomDatatype(const std::string& s, const char** scan); /*! * \brief Convert type code to its name * \param type_code The type code . * \return The name of type code. */ inline const char* DLDataTypeCode2Str(DLDataTypeCode type_code); /*! * \brief convert a string to TVM type. * \param s The string to be converted. * \return The corresponding tvm type. */ inline DLDataType String2DLDataType(std::string s); /*! * \brief convert a TVM type to string. * \param t The type to be converted. * \return The corresponding tvm type in string. */ inline std::string DLDataType2String(DLDataType t); // implementation details inline const char* DLDataTypeCode2Str(DLDataTypeCode type_code) { switch (static_cast<int>(type_code)) { case kDLInt: return "int"; case kDLUInt: return "uint"; case kDLFloat: return "float"; case DataType::kHandle: return "handle"; case kDLBfloat: return "bfloat"; default: LOG(FATAL) << "unknown type_code=" << static_cast<int>(type_code); return ""; } } inline std::ostream& operator<<(std::ostream& os, DLDataType t) { // NOLINT(*) if (t.bits == 1 && t.lanes == 1 && t.code == kDLUInt) { os << "bool"; return os; } if (DataType(t).is_void()) { return os << "void"; } if (t.code < DataType::kCustomBegin) { os << DLDataTypeCode2Str(static_cast<DLDataTypeCode>(t.code)); } else { os << "custom[" << GetCustomTypeName(t.code) << "]"; } if (t.code == kTVMOpaqueHandle) return os; os << static_cast<int>(t.bits); if (t.lanes != 1) { os << 'x' << static_cast<int>(t.lanes); } return os; } inline std::ostream& operator<<(std::ostream& os, const DataType& dtype) { // NOLINT(*) return os << dtype.operator DLDataType(); } inline std::string DLDataType2String(DLDataType t) { if (t.bits == 0) return ""; std::ostringstream os; os << t; return os.str(); } inline DLDataType String2DLDataType(std::string s) { DLDataType t; // handle void type if (s.length() == 0) { t = DataType::Void(); return t; } t.bits = 32; t.lanes = 1; const char* scan; if (s.substr(0, 3) == "int") { t.code = kDLInt; scan = s.c_str() + 3; } else if (s.substr(0, 4) == "uint") { t.code = kDLUInt; scan = s.c_str() + 4; } else if (s.substr(0, 5) == "float") { t.code = kDLFloat; scan = s.c_str() + 5; } else if (s.substr(0, 6) == "handle") { t.code = kTVMOpaqueHandle; t.bits = 64; // handle uses 64 bit by default. scan = s.c_str() + 6; } else if (s == "bool") { t.code = kDLUInt; t.bits = 1; t.lanes = 1; return t; } else if (s.substr(0, 6) == "bfloat") { t.code = DataType::kBFloat; scan = s.c_str() + 6; } else if (s.substr(0, 6) == "custom") { t.code = ParseCustomDatatype(s, &scan); } else { scan = s.c_str(); LOG(FATAL) << "unknown type " << s; } char* xdelim; // emulate sscanf("%ux%u", bits, lanes) uint8_t bits = static_cast<uint8_t>(strtoul(scan, &xdelim, 10)); if (bits != 0) t.bits = bits; char* endpt = xdelim; if (*xdelim == 'x') { t.lanes = static_cast<uint16_t>(strtoul(xdelim + 1, &endpt, 10)); } ICHECK(endpt == s.c_str() + s.length()) << "unknown type " << s; return t; } } // namespace runtime using DataType = runtime::DataType; } // namespace tvm namespace std { template <> struct hash<tvm::DataType> { inline int cantor_pairing_function(int a, int b) const { return (a + b) * (a + b + 1) / 2 + b; } std::size_t operator()(tvm::DataType const& dtype) const { int a = dtype.code(); int b = dtype.bits(); int c = dtype.lanes(); int d = cantor_pairing_function(a, b); return cantor_pairing_function(c, d); } }; } // namespace std #endif // TVM_RUNTIME_DATA_TYPE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/debug.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/debug.h * \brief Helpers for debugging at runtime. */ #ifndef TVM_RUNTIME_DEBUG_H_ #define TVM_RUNTIME_DEBUG_H_ #include <tvm/runtime/container/adt.h> #include <tvm/runtime/ndarray.h> #include <ostream> #include <string> namespace tvm { namespace runtime { /*! * \brief Helpers to describe runtime objects in human-friendly form. For \p nd_arrays we show their * shapes and dtypes, but also their contents if 'small' and on the \p host_device (mostly so that * we can see dynamic shapes as they are computed). For \p adts we show the ADT fields. For * \p objects we dispatch to one of the above as appropriate. */ void AppendNDArray(std::ostream& os, const NDArray& nd_array, const DLDevice& host_device, bool show_content = true); void AppendADT(std::ostream& os, const ADT& adt, const DLDevice& host_device, bool show_content = true); void AppendRuntimeObject(std::ostream& os, const ObjectRef& object, const DLDevice& host_device, bool show_content = true); std::string RuntimeObject2String(const ObjectRef& object, const DLDevice& host_device, bool show_content = true); } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_DEBUG_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/device_api.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/device_api.h * \brief Abstract device memory management API */ #ifndef TVM_RUNTIME_DEVICE_API_H_ #define TVM_RUNTIME_DEVICE_API_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/packed_func.h> #include <string> namespace tvm { namespace runtime { /*! * \brief the query type into GetAttr */ enum DeviceAttrKind : int { kExist = 0, kMaxThreadsPerBlock = 1, kWarpSize = 2, kMaxSharedMemoryPerBlock = 3, kComputeVersion = 4, kDeviceName = 5, kMaxClockRate = 6, kMultiProcessorCount = 7, kMaxThreadDimensions = 8, kMaxRegistersPerBlock = 9, kGcnArch = 10, kApiVersion = 11, kDriverVersion = 12 }; /*! \brief Number of bytes each allocation must align to */ constexpr int kAllocAlignment = 64; /*! \brief Number of bytes each allocation must align to in temporary allocation */ constexpr int kTempAllocaAlignment = 64; /*! \brief Maximum size that can be allocated on stack */ constexpr int kMaxStackAlloca = 1024; /*! \brief Number of bytes each allocation must align to by default in the workspace buffer to * service intermediate tensors */ constexpr int kDefaultWorkspaceAlignment = 1; /*! * \brief TVM Runtime Device API, abstracts the device * specific interface for memory management. */ class TVM_DLL DeviceAPI { public: /*! \brief virtual destructor */ virtual ~DeviceAPI() {} /*! * \brief Set the environment device id to device * \param dev The device to be set. */ virtual void SetDevice(Device dev) = 0; /*! * \brief Get attribute of specified device. * \param dev The device device * \param kind The result kind * \param rv The return value. * \sa DeviceAttrKind */ virtual void GetAttr(Device dev, DeviceAttrKind kind, TVMRetValue* rv) = 0; /*! * \brief Query the device for specified properties. * * This is used to expand "-from_device=N" in the target string to * all properties that can be determined from that device. */ virtual void GetTargetProperty(Device dev, const std::string& property, TVMRetValue* rv) {} /*! * \brief Allocate a data space on device. * \param dev The device device to perform operation. * \param nbytes The number of bytes in memory. * \param alignment The alignment of the memory. * \param type_hint The type of elements. Only needed by certain backends such * as OpenGL, as nbytes & alignment are sufficient for most backends. * \return The allocated device pointer. */ virtual void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) = 0; /*! * \brief Allocate a data space on device with memory scope support. * \param dev The device device to perform operation. * \param ndim The number of dimension of allocated tensor. * \param shape The shape of allocated tensor. * \param dtype The type of elements. * \param mem_scope The memory scope of allocated tensor. * \return The allocated device pointer. */ virtual void* AllocDataSpace(Device dev, int ndim, const int64_t* shape, DLDataType dtype, Optional<String> mem_scope = NullOpt); /*! * \brief Free a data space on device. * \param dev The device device to perform operation. * \param ptr The data space. */ virtual void FreeDataSpace(Device dev, void* ptr) = 0; /*! * \brief copy data from one place to another * \note This API is designed to support special memory with shape dependent layout. * We pass in DLTensor* with shape information to support these cases. * \param from The source array. * \param to The target array. * \param stream Optional stream object. */ virtual void CopyDataFromTo(DLTensor* from, DLTensor* to, TVMStreamHandle stream); /*! * \brief Create a new stream of execution. * * \param dev The device of allocation. */ virtual TVMStreamHandle CreateStream(Device dev); /*! * \brief Free a stream of execution * * \param dev The device of the stream * \param stream The pointer to be freed. */ virtual void FreeStream(Device dev, TVMStreamHandle stream); /*! * \brief Synchronize the stream * \param dev The device to perform operation. * \param stream The stream to be sync. */ virtual void StreamSync(Device dev, TVMStreamHandle stream) = 0; /*! * \brief Set the stream * \param dev The device to set stream. * \param stream The stream to be set. */ virtual void SetStream(Device dev, TVMStreamHandle stream) {} /*! * \brief Synchronize 2 streams of execution. * * An event is created in event_src stream that the second then * stream waits on. Neither event_src or event_dst need to be of * the same device ID as the device, but they must be of the same * device type. * * \param dev The device of the streams. * \param event_src The source stream to synchronize. * \param event_dst The destination stream to synchronize. */ virtual void SyncStreamFromTo(Device dev, TVMStreamHandle event_src, TVMStreamHandle event_dst); /*! * \brief Allocate temporal workspace for backend execution. * * \note We have the following assumption about backend temporal * workspace allocation, and backend will optimize for such assumption: * * - Only a few allocation will happen, and space will be released after use. * - The release order is usually in reverse order of allocate (stack style). * - Repeative pattern of same allocations over different runs. * - Workspace should not overlap between different threads(i.e. be threadlocal) * * \param dev The device of allocation. * \param nbytes The size to be allocated. * \param type_hint The type of elements. Only needed by certain backends such * as OpenGL, as nbytes is sufficient for most backends. */ virtual void* AllocWorkspace(Device dev, size_t nbytes, DLDataType type_hint = {}); /*! * \brief Free temporal workspace in backend execution. * * \param dev The device of allocation. * \param ptr The pointer to be freed. */ virtual void FreeWorkspace(Device dev, void* ptr); /*! * \brief Get device API based on device. * \param dev The device * \param allow_missing Whether allow missing * \return The corresponding device API. */ static DeviceAPI* Get(Device dev, bool allow_missing = false); /*! * \brief Whether a certian device type requires set device device * before launching the kernel function. * \param device_type The device type. */ static bool NeedSetDevice(int device_type) { return device_type != kDLCPU && device_type != kDLMicroDev; } protected: /*! * \brief copy data from one place to another * \param from The source array. * \param from_offset The byte offeset in the from. * \param to The target array. * \param to_offset The byte offset in the to. * \param num_bytes The size of the memory in bytes * \param dev_from The source device * \param dev_to The target device * \param type_hint The type of elements, only neded by certain backends. * can be useful for cross device endian converison. * \param stream Optional stream object. */ virtual void CopyDataFromTo(const void* from, size_t from_offset, void* to, size_t to_offset, size_t num_bytes, Device dev_from, Device dev_to, DLDataType type_hint, TVMStreamHandle stream); }; /*! \brief The device type bigger than this is RPC device */ constexpr int kRPCSessMask = 128; static_assert(kRPCSessMask >= TVMDeviceExtType_End); /*! * \brief The name of Device API factory. * \param type The device type. * \return the device name. */ inline const char* DeviceName(int type) { switch (type) { case kDLCPU: return "cpu"; case kDLCUDA: return "cuda"; case kDLCUDAHost: return "cuda_host"; case kDLCUDAManaged: return "cuda_managed"; case kDLOpenCL: return "opencl"; case kDLSDAccel: return "sdaccel"; case kDLAOCL: return "aocl"; case kDLVulkan: return "vulkan"; case kDLMetal: return "metal"; case kDLVPI: return "vpi"; case kDLROCM: return "rocm"; case kDLROCMHost: return "rocm_host"; case kDLExtDev: return "ext_dev"; case kDLOneAPI: return "oneapi"; case kDLWebGPU: return "webgpu"; case kDLHexagon: return "hexagon"; case kOpenGL: return "opengl"; case kDLMicroDev: return "microdev"; default: LOG(FATAL) << "unknown type =" << type; return "Unknown"; } } /*! * \brief Return true if a Device is owned by an RPC session. */ inline bool IsRPCSessionDevice(Device dev) { return (dev.device_type / kRPCSessMask) > 0; } /*! * \brief Return the RPCSessTable index of the RPC Session that owns this device. * \return the table index. */ inline int GetRPCSessionIndex(Device dev) { ICHECK(IsRPCSessionDevice(dev)) << "GetRPCSessionIndex: dev has no RPC session"; return dev.device_type / kRPCSessMask - 1; } /*! * \brief Remove the RPC session mask from a Device. * RPC clients typically do this when encoding a Device for transmission to an RPC remote. * On the wire, RPCdevice are expected to be valid on the server without interpretation. * \param dev A Device with non-zero RPC Session mask, valid on the RPC client. * \return A Device without any RPC Session mask, valid on the RPC server. */ inline Device RemoveRPCSessionMask(Device dev) { dev.device_type = static_cast<DLDeviceType>(dev.device_type % kRPCSessMask); return dev; } inline std::ostream& operator<<(std::ostream& os, DLDevice dev) { // NOLINT(*) if (tvm::runtime::IsRPCSessionDevice(dev)) { os << "remote[" << tvm::runtime::GetRPCSessionIndex(dev) << "]-"; dev = tvm::runtime::RemoveRPCSessionMask(dev); } os << tvm::runtime::DeviceName(static_cast<int>(dev.device_type)) << "(" << dev.device_id << ")"; return os; } /*! * \brief Add a RPC session mask to a Device. * RPC clients typically do this when decoding a Device received from a RPC remote. * \param dev A Device without any RPC Session mask, valid on the RPC server. * \param session_table_index Numeric index of the RPC session in the session table. * \return A Device with RPC session mask added, valid on the RPC client. */ inline Device AddRPCSessionMask(Device dev, int session_table_index) { CHECK(!IsRPCSessionDevice(dev)) << "AddRPCSessionMask: dev already non-zero RPCSessionIndex: " << dev; dev.device_type = static_cast<DLDeviceType>(dev.device_type | (kRPCSessMask * (session_table_index + 1))); return dev; } } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_DEVICE_API_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/executor_info.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file executor_info.h * \brief Executor information */ #ifndef TVM_RUNTIME_EXECUTOR_INFO_H_ #define TVM_RUNTIME_EXECUTOR_INFO_H_ namespace tvm { namespace runtime { /*! \brief Value used to indicate the graph executor. */ static constexpr const char* kTvmExecutorGraph = "graph"; /*! \brief Value used to indicate the aot executor. */ static constexpr const char* kTvmExecutorAot = "aot"; } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_EXECUTOR_INFO_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/hexagon/ops/conv2d.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/device_api.h> #include <cassert> #ifndef TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_ #define TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_ namespace tvm { namespace runtime { namespace hexagon { static constexpr auto hexagon_device = DLDevice{static_cast<DLDeviceType>(kDLHexagon), 0}; // Standalone DLTensor: the standalone-ness means that this object owns the shape // (as opposed to a DLTensor). template <size_t NDIM> class SDLTensor : public DLTensor { public: SDLTensor(void* data_ptr, DLDataType data_type, void* data_space, const int64_t* data_dims) : SDLTensor(data_ptr, data_type, data_space) { for (size_t i = 0; i < NDIM; ++i) dims[i] = data_dims[i]; } SDLTensor(void* data_ptr, DLDataType data_type, void* data_space, std::initializer_list<int64_t> data_dims) : SDLTensor(data_ptr, data_type, data_space, data_dims.begin()) {} void* GetDataSpace() const { return data_space; } private: /** * @brief Construct SDLTensor * * @param data_ptr Either points to the same memory as data_space or an array of pointers to the * start of each chunk of weight. Since weights can be of varying sizes, this array could contain * the pointer to each chunk of memory * @param data_type data type of the elements in Tensor * @param data_space is meant to store the pointer returned from AllocDataSpace and can be freed * by passing it to FreeDataSpace */ SDLTensor(void* data_ptr, DLDataType data_type, void* data_space) : data_space(data_space) { data = data_ptr; device = hexagon_device; ndim = NDIM; dtype = data_type; shape = dims; strides = nullptr; byte_offset = 0; } void* data_space = nullptr; int64_t dims[NDIM]; }; inline void* to_ptr(uintptr_t v) { return reinterpret_cast<void*>(v); } inline uintptr_t to_uint(void* ptr) { return reinterpret_cast<uintptr_t>(ptr); } constexpr int xyc_to_sm_16b(int y, int x, int c) { // Map y,x,c coordinates within a block to the offset (in 16-bit elements) // from the beginning of the block in spatial-major layout. // 10-bit spatial mask: yyyxcccccx assert(y >= 0 && x >= 0 && c >= 0); return y << 7 | (x & 2) << 5 | c << 1 | (x & 1); } constexpr int hwio_to_sm_16b(int width, int y, int x, int i, int o) { // Map y,x,i,o coordinates within a chunk (assuming the origin at the // top-left spatial corner) to the offset (in 16-bit elements) from the // beginning of the chunk in spatial-major layout. // Spatial mask: p..piiiioooooi, where p..p are position bits. assert(width >= 1); assert(y >= 0 && x >= 0 && i >= 0 && o >= 0); int p = y * width + (width - 1 - x); return p << 10 | (i & 0x1e) << 5 | o << 1 | (i & 1); } inline constexpr int round_up(int v, int p2) { return (v + p2 - 1) & -p2; } // Returns the block address at the given index // Assumptions // - The data type of tensor is fp16 // - There is only one batch, and hence n==0 inline uintptr_t nhwc_at(const DLTensor& a, int n, int y, int x, int c) { if (y < 0 || y >= a.shape[1]) return uintptr_t(0); auto p = static_cast<uintptr_t*>(a.data); assert(n == 0); return p[y * a.shape[2] * a.shape[3] + x * a.shape[3] + c]; } // Returns the address of the chunk stored at given index // Assumptions // - The data type of tensor is fp16 inline uintptr_t hwio_at(const DLTensor& f, int y, int x, int i, int o) { auto p = static_cast<uintptr_t*>(f.data); return p[y * f.shape[1] * f.shape[2] * f.shape[3] + x * f.shape[2] * f.shape[3] + i * f.shape[3] + o]; } /** * @brief Function to "blockize" the flat input data * The term "blockize" is used to mention that the data is stored in non-contiguous blocks * * The input is mapped into the below mentioned layout (notation similar to index map used for * transform layout): * * lambda n, h, w, c: n, h//8, w//4, c//32, AXIS_SEPARATOR, h%8, (w%4)//2, c%32, w%2 * * where AXIS_SEPARATOR represents split up in the physical layout * * @param out Pre-allocated output memory pointer * @param inp_flat Flat input data pointer * @param height * @param width * @param depth */ void blockize_hwc_16b(void* out, void* inp_flat, int height, int width, int depth); /** * @brief Convert back from non-contguous layout to a flat layout * * @param out_flat Pre-allocated output memory pointer * @param inp Blockized input data pointer * @param height * @param width * @param depth */ void deblockize_hwc_16b(void* out_flat, void* inp, int height, int width, int depth); /** * @brief Convert the layout of weights from flat to "chunked". The term chunked is explained below: * * Weights are packed into the below mentioned layout (notation similar to index map): * Since weights cannot be exactly represented into a index map notation, the * base split up is mentioned below with a few gotchas * * lambda h, w, i, o: h//8, w//4, o//32, i//32, h%8, w%4, (i%32)//2, o%32, i%2 * * The gotchas are: * - (w%4) is actually stored in the right to left order, as in 3,2,1,0 instead of 0,1,2,3 * - The h%8 and (w%4) dimensions are not padded up, leading to chunks of different sizes * (thereby the name "chunked" instead of packed) * - The thinnest chunk of width is stored first. For example, if a kernel is 5x5, the first * chunk along the width has size 1 (representing index 0) and then next one has size 4 * representing indices (1,2,3,4) * * @param out_ptr Base pointer table to be filled with the list of pointers to the first addresses * of the "chunked" weights * @param out_ptr_size The number of chunks * @param out Pointer to pre-allocated output memory * @param inp Pointer to flat input data * @param height * @param width * @param idepth * @param odepth */ void chunkify_hwio_16b(void** out_ptr, int out_ptr_size, void* out, void* inp, int height, int width, int idepth, int odepth); SDLTensor<4> prepare_nhwc(tvm::runtime::DeviceAPI* device_api, const DLTensor* nhwc_flat, bool copy_data); int calculate_num_weight_chunks(int64_t* shape_hwio); SDLTensor<4> prepare_hwio(tvm::runtime::DeviceAPI* device_api, const DLTensor* hwio_flat, int num_chunks, void** ptr_table); template <size_t N> void release(tvm::runtime::DeviceAPI* device_api, const SDLTensor<N>& tensor) { if (auto* data_space = tensor.GetDataSpace()) { device_api->FreeDataSpace(hexagon_device, data_space); } } } // namespace hexagon } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/logging.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/logging.h * \brief logging utilities * * We define our own CHECK and LOG macros to replace those from dmlc-core. * These macros are then injected into dmlc-core via the * DMLC_USE_LOGGING_LIBRARY define. dmlc-core will #include this file wherever * it needs logging. */ #ifndef TVM_RUNTIME_LOGGING_H_ #define TVM_RUNTIME_LOGGING_H_ #include <dmlc/common.h> #include <dmlc/thread_local.h> #include <tvm/runtime/c_runtime_api.h> #include <ctime> #include <iomanip> #include <iostream> #include <memory> #include <sstream> #include <string> #include <unordered_map> #include <vector> /*! * \brief Macro helper to force a function not to be inlined. * It is only used in places that we know not inlining is good, * e.g. some logging functions. */ #if defined(_MSC_VER) #define TVM_NO_INLINE __declspec(noinline) #else #define TVM_NO_INLINE __attribute__((noinline)) #endif /*! * \brief Macro helper to force a function to be inlined. * It is only used in places that we know inline is important, * e.g. some template expansion cases. */ #ifdef _MSC_VER #define TVM_ALWAYS_INLINE __forceinline #else #define TVM_ALWAYS_INLINE inline __attribute__((always_inline)) #endif /*! * \brief Macro helper for exception throwing. */ #define TVM_THROW_EXCEPTION noexcept(false) /*! * \brief Whether or not enable backtrace logging during a * fatal error. * * \note TVM won't depend on LIBBACKTRACE or other exec_info * library when this option is disabled. */ #ifndef TVM_LOG_STACK_TRACE #define TVM_LOG_STACK_TRACE 1 #endif /*! * \brief Whether or not use libbacktrace library * for getting backtrace information */ #ifndef TVM_USE_LIBBACKTRACE #define TVM_USE_LIBBACKTRACE 0 #endif /*! * \brief Whether or not customize the logging output. * If log customize is enabled, the user must implement * tvm::runtime::detail::LogFatalImpl and tvm::runtime::detail::LogMessageImpl. */ #ifndef TVM_LOG_CUSTOMIZE #define TVM_LOG_CUSTOMIZE 0 #endif // a technique that enables overriding macro names on the number of parameters. This is used // to define other macros below #define GET_MACRO(_1, _2, _3, _4, _5, NAME, ...) NAME /*! * \brief COND_X calls COND_X_N where N is the number of parameters passed to COND_X * X can be any of CHECK_GE, CHECK_EQ, CHECK, or LOG COND_X (but not COND_X_N) * are supposed to be used outside this file. * The first parameter of COND_X (and therefore, COND_X_N), which we call 'quit_on_assert', * is a boolean. The rest of the parameters of COND_X is the same as the parameters of X. * quit_on_assert determines the overall behavior of COND_X. If it's true COND_X * quits the program on assertion failure. If it's false, then it moves on and somehow reports * the assertion failure back to the macro caller in an appropriate manner (e.g, 'return false' * in a function, or 'continue' or 'break' in a loop) * The default behavior when quit_on_assertion is false, is to 'return false'. If this is not * desirable, the macro caller can pass one more last parameter to COND_X to tell COND_X what * to do when when quit_on_assertion is false and the assertion fails. * * Rationale: These macros were designed to implement functions that have two behaviors * in a concise way. Those behaviors are quitting on assertion failures, or trying to * move on from assertion failures. Note that these macros hide lots of control flow in them, * and therefore, makes the logic of the whole code slightly harder to understand. However, * in pieces of code that use these macros frequently, it will significantly shorten the * amount of code needed to be read, and we won't need to clutter the main logic of the * function by repetitive control flow structure. The first problem * mentioned will be improved over time as the developer gets used to the macro. * * Here is an example of how to use it * \code * bool f(..., bool quit_on_assertion) { * int a = 0, b = 0; * ... * a = ... * b = ... * // if quit_on_assertion is true, if a==b, continue, otherwise quit. * // if quit_on_assertion is false, if a==b, continue, otherwise 'return false' * // (default behaviour) * COND_CHECK_EQ(quit_on_assertion, a, b) << "some error message when quiting" * ... * for (int i = 0; i < N; i++) { * a = ... * b = ... * // if quit_on_assertion is true, if a==b, continue, otherwise quit. * // if quit_on_assertion is false, if a==b, continue, otherwise 'break' * // (non-default behaviour, therefore, has to be explicitly specified) * COND_CHECK_EQ(quit_on_assertion, a, b, break) << "some error message when quiting" * } * } * \endcode */ #define COND_CHECK_GE(...) \ GET_MACRO(__VA_ARGS__, COND_CHECK_GE_5, COND_CHECK_GE_4, COND_CHECK_GE_3)(__VA_ARGS__) #define COND_CHECK_EQ(...) \ GET_MACRO(__VA_ARGS__, COND_CHECK_EQ_5, COND_CHECK_EQ_4, COND_CHECK_EQ_3)(__VA_ARGS__) #define COND_CHECK(...) \ GET_MACRO(__VA_ARGS__, COND_CHECK_5, COND_CHECK_4, COND_CHECK_3, COND_CHECK_2)(__VA_ARGS__) #define COND_LOG(...) \ GET_MACRO(__VA_ARGS__, COND_LOG_5, COND_LOG_4, COND_LOG_3, COND_LOG_2)(__VA_ARGS__) // Not supposed to be used by users directly. #define COND_CHECK_OP(quit_on_assert, x, y, what, op) \ if (!quit_on_assert) { \ if (!((x)op(y))) what; \ } else /* NOLINT(*) */ \ CHECK_##op(x, y) #define COND_CHECK_EQ_4(quit_on_assert, x, y, what) COND_CHECK_OP(quit_on_assert, x, y, what, ==) #define COND_CHECK_GE_4(quit_on_assert, x, y, what) COND_CHECK_OP(quit_on_assert, x, y, what, >=) #define COND_CHECK_3(quit_on_assert, x, what) \ if (!quit_on_assert) { \ if (!(x)) what; \ } else /* NOLINT(*) */ \ CHECK(x) #define COND_LOG_3(quit_on_assert, x, what) \ if (!quit_on_assert) { \ what; \ } else /* NOLINT(*) */ \ LOG(x) #define COND_CHECK_EQ_3(quit_on_assert, x, y) COND_CHECK_EQ_4(quit_on_assert, x, y, return false) #define COND_CHECK_GE_3(quit_on_assert, x, y) COND_CHECK_GE_4(quit_on_assert, x, y, return false) #define COND_CHECK_2(quit_on_assert, x) COND_CHECK_3(quit_on_assert, x, return false) #define COND_LOG_2(quit_on_assert, x) COND_LOG_3(quit_on_assert, x, return false) namespace tvm { namespace runtime { /*! * \brief Generate a backtrace when called. * \return A multiline string of the backtrace. There will be either one or two lines per frame. */ TVM_DLL std::string Backtrace(); /*! \brief Base error type for TVM. Wraps a string message. */ class Error : public ::dmlc::Error { // for backwards compatibility public: /*! * \brief Construct an error. * \param s The message to be displayed with the error. */ explicit Error(const std::string& s) : ::dmlc::Error(s) {} }; /*! * \brief Error message already set in frontend env. * * This error can be thrown by EnvCheckSignals to indicate * that there is an error set in the frontend environment(e.g. * python interpreter). The TVM FFI should catch this error * and return a proper code tell the frontend caller about * this fact. */ class EnvErrorAlreadySet : public ::dmlc::Error { public: /*! * \brief Construct an error. * \param s The message to be displayed with the error. */ explicit EnvErrorAlreadySet(const std::string& s) : ::dmlc::Error(s) {} }; /*! * \brief Error type for errors from CHECK, ICHECK, and LOG(FATAL). This error * contains a backtrace of where it occurred. */ class InternalError : public Error { public: /*! \brief Construct an error. Not recommended to use directly. Instead use LOG(FATAL). * * \param file The file where the error occurred. * \param lineno The line number where the error occurred. * \param message The error message to display. * \param time The time at which the error occurred. This should be in local time. * \param backtrace Backtrace from when the error occurred. */ InternalError(std::string file, int lineno, std::string message, std::time_t time = std::time(nullptr), std::string backtrace = Backtrace()) : Error(""), file_(file), lineno_(lineno), message_(message), time_(time), backtrace_(backtrace) { std::ostringstream s; // XXX: Do not change this format, otherwise all error handling in python will break (because it // parses the message to reconstruct the error type). // TODO(tkonolige): Convert errors to Objects, so we can avoid the mess of formatting/parsing // error messages correctly. s << "[" << std::put_time(std::localtime(&time), "%H:%M:%S") << "] " << file << ":" << lineno << ": " << message << std::endl; if (backtrace.size() > 0) { s << backtrace << std::endl; } full_message_ = s.str(); } /*! \return The file in which the error occurred. */ const std::string& file() const { return file_; } /*! \return The message associated with this error. */ const std::string& message() const { return message_; } /*! \return Formatted error message including file, linenumber, backtrace, and message. */ const std::string& full_message() const { return full_message_; } /*! \return The backtrace from where this error occurred. */ const std::string& backtrace() const { return backtrace_; } /*! \return The time at which this error occurred. */ const std::time_t& time() const { return time_; } /*! \return The line number at which this error occurred. */ int lineno() const { return lineno_; } virtual const char* what() const noexcept { return full_message_.c_str(); } private: std::string file_; int lineno_; std::string message_; std::time_t time_; std::string backtrace_; std::string full_message_; // holds the full error string }; /*! \brief Internal implementation */ namespace detail { // Provide support for customized logging. #if TVM_LOG_CUSTOMIZE /*! * \brief Custom implementations of LogFatal. * * \sa TVM_LOG_CUSTOMIZE */ [[noreturn]] TVM_DLL void LogFatalImpl(const std::string& file, int lineno, const std::string& message); /*! * \brief Custom implementations of LogMessage. * * \sa TVM_LOG_CUSTOMIZE */ TVM_DLL void LogMessageImpl(const std::string& file, int lineno, int level, const std::string& message); /*! * \brief Class to accumulate an error message and throw it. Do not use * directly, instead use LOG(FATAL). */ class LogFatal { public: LogFatal(const std::string& file, int lineno) : file_(file), lineno_(lineno) {} #ifdef _MSC_VER #pragma disagnostic push #pragma warning(disable : 4722) #endif [[noreturn]] ~LogFatal() TVM_THROW_EXCEPTION { LogFatalImpl(file_, lineno_, stream_.str()); } #ifdef _MSC_VER #pragma disagnostic pop #endif std::ostringstream& stream() { return stream_; } private: std::ostringstream stream_; std::string file_; int lineno_; }; /*! * \brief Class to accumulate an log message. Do not use directly, instead use * LOG(INFO), LOG(WARNING), LOG(ERROR). */ class LogMessage { public: LogMessage(const std::string& file, int lineno, int level) : file_(file), lineno_(lineno), level_(level) {} ~LogMessage() { LogMessageImpl(file_, lineno_, level_, stream_.str()); } std::ostringstream& stream() { return stream_; } private: std::string file_; int lineno_; int level_; std::ostringstream stream_; }; #else /*! * \brief Class to accumulate an error message and throw it. Do not use * directly, instead use LOG(FATAL). * \note The `LogFatal` class is designed to be an empty class to reduce stack size usage. * To play this trick, we use the thread-local storage to store its internal data. */ class LogFatal { public: TVM_NO_INLINE LogFatal(const char* file, int lineno) { GetEntry().Init(file, lineno); } #ifdef _MSC_VER #pragma disagnostic push #pragma warning(disable : 4722) #endif ~LogFatal() TVM_THROW_EXCEPTION { GetEntry().Finalize(); } #ifdef _MSC_VER #pragma disagnostic pop #endif std::ostringstream& stream() { return GetEntry().stream_; } private: struct Entry { void Init(const char* file, int lineno) { this->stream_.str(""); this->file_ = file; this->lineno_ = lineno; } TVM_NO_INLINE dmlc::Error Finalize() { throw InternalError(file_, lineno_, stream_.str()); } std::ostringstream stream_; std::string file_; int lineno_; }; TVM_DLL TVM_NO_INLINE static Entry& GetEntry(); }; /*! * \brief Class to accumulate an log message. Do not use directly, instead use * LOG(INFO), LOG(WARNING), LOG(ERROR). */ class LogMessage { public: LogMessage(const std::string& file, int lineno, int level) { std::time_t t = std::time(nullptr); stream_ << "[" << std::put_time(std::localtime(&t), "%H:%M:%S") << "] " << file << ":" << lineno << level_strings_[level]; } TVM_NO_INLINE ~LogMessage() { std::cerr << stream_.str() << std::endl; } std::ostringstream& stream() { return stream_; } private: std::ostringstream stream_; TVM_DLL static const char* level_strings_[]; }; #endif // Below is from dmlc-core // This class is used to explicitly ignore values in the conditional // logging macros. This avoids compiler warnings like "value computed // is not used" and "statement has no effect". class LogMessageVoidify { public: LogMessageVoidify() {} // This has to be an operator with a precedence lower than << but // higher than "?:". See its usage. void operator&(std::ostream&) {} }; /*! \brief Captures the state of the \p TVM_LOG_DEBUG environment flag. */ class TvmLogDebugSettings { public: /*! * \brief Parses the \p TVM_LOG_DEBUG environment flag as per the specification given by * \p DebugLoggingEnabled and \p VerboseLoggingEnabled, and caches the result. */ inline static const TvmLogDebugSettings& FromFlag() { // Parse and cache the verbosity level map. static const auto* settings = new TvmLogDebugSettings(TvmLogDebugSettings::ParseSpec(std::getenv("TVM_LOG_DEBUG"))); return *settings; } /*! * \brief Parses \p opt_spec as per specification for \p TVM_LOG_DEBUG given by * \p DebugLoggingEnabled and \p VerboseLoggingEnabled. Throws if specification is ill-formed. */ static TvmLogDebugSettings ParseSpec(const char* opt_spec); /*! * \brief Implements \p VerboseLoggingEnabled below w.r.t. the already parsed \p TVM_LOG_DEBUG * environment variable. */ inline bool VerboseEnabled(const char* opt_filename, int level) const { if (opt_filename == nullptr || level < 0 || vlog_level_map_.empty()) { return false; } return VerboseEnabledImpl(opt_filename, level); } /*! \brief Returns true if \p DLOG statements should be executed. */ bool dlog_enabled() const { return dlog_enabled_; } private: // Slow path for VerboseEnabled. bool VerboseEnabledImpl(const std::string& filename, int level) const; /*! \brief If true, DLOG statements are enabled. */ bool dlog_enabled_ = false; /*! * \brief A map from canonicalized filenames to the maximum VLOG verbosity level for that file. * May also contain the 'wildcard' entry \p "DEFAULT" representing the level for all other files. */ std::unordered_map<std::string, int> vlog_level_map_; }; /*! * \brief Returns true if a DLOG statement is enabled by the \p TVM_LOG_DEBUG environment * variable. Requires: * \code * TVM_LOG_DEBUG=1 * \endcode * or a valid setting as described by \p VerboseLoggingEnabled below. */ // Also from dmlc-core inline bool DebugLoggingEnabled() { static int state = 0; if (state == 0) { state = TvmLogDebugSettings::FromFlag().dlog_enabled() ? 1 : -1; } return state == 1; } /*! * \brief Returns true if a VLOG statement in \p filename is enabled by the \p TVM_LOG_DEBUG * environment variable for logging at verbosity \p level. Levels should be non-negative. * * Filenames are canonicalized to be w.r.t. the src/ dir of the TVM tree. (VLOG's should not * appear under include/). * * To enable file \p relay/foo.cc up to level 2 and \p ir/bar.cc for level 0 only set: * \code * TVM_LOG_DEBUG="relay/foo.cc=2,ir/bar.cc=0" * \endcode * * To enable all files up to level 3 but disable \p ir/bar.cc set: * \code * TVM_LOG_DEBUG="DEFAULT=2,ir/bar.cc=-1" * \endcode * * Any of these settings will also enable DLOG statements. */ inline bool VerboseLoggingEnabled(const char* opt_filename, int level) { return TvmLogDebugSettings::FromFlag().VerboseEnabled(opt_filename, level); } /*! * \brief A stack of VLOG context messages. * * For use by \p VLOG_CONTEXT macro only. */ class VLogContext { public: void Push(std::stringstream* stream) { context_stack_.push_back(stream); } void Pop() { if (!context_stack_.empty()) { context_stack_.pop_back(); } } std::string str() const; private: std::vector<std::stringstream*> context_stack_; }; /*! \brief Thread local \p VLogContext for tracking a stack of VLOG context messages. */ using ThreadLocalVLogContext = dmlc::ThreadLocalStore<VLogContext>; /*! * \brief A RAII class to push/pos a VLOG context message onto the thread-local stack. * * For use by \p VLOG_CONTEXT macro only. */ class VLogContextEntry { public: VLogContextEntry() { ThreadLocalVLogContext::Get()->Push(&sstream_); } ~VLogContextEntry() { ThreadLocalVLogContext::Get()->Pop(); } std::ostream& stream() { return sstream_; } private: std::stringstream sstream_; }; constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE = "\n" "---------------------------------------------------------------\n" "An error occurred during the execution of TVM.\n" "For more information, please see: https://tvm.apache.org/docs/errors.html\n" "---------------------------------------------------------------\n"; template <typename X, typename Y> std::unique_ptr<std::string> LogCheckFormat(const X& x, const Y& y) { std::ostringstream os; os << " (" << x << " vs. " << y << ") "; // CHECK_XX(x, y) requires x and y can be serialized to // string. Use CHECK(x OP y) otherwise. return std::make_unique<std::string>(os.str()); } // Inline _Pragma in macros does not work reliably on old version of MSVC and // GCC. We wrap all comparisons in a function so that we can use #pragma to // silence bad comparison warnings. #define TVM_CHECK_FUNC(name, op) \ template <typename X, typename Y> \ TVM_ALWAYS_INLINE std::unique_ptr<std::string> LogCheck##name(const X& x, const Y& y) { \ if (x op y) return nullptr; \ return LogCheckFormat(x, y); \ } \ TVM_ALWAYS_INLINE std::unique_ptr<std::string> LogCheck##name(int x, int y) { \ return LogCheck##name<int, int>(x, y); \ } #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-compare" TVM_CHECK_FUNC(_LT, <) TVM_CHECK_FUNC(_GT, >) TVM_CHECK_FUNC(_LE, <=) TVM_CHECK_FUNC(_GE, >=) TVM_CHECK_FUNC(_EQ, ==) TVM_CHECK_FUNC(_NE, !=) #pragma GCC diagnostic pop } // namespace detail #define TVM_LOG_LEVEL_DEBUG 0 #define TVM_LOG_LEVEL_INFO 1 #define TVM_LOG_LEVEL_WARNING 2 #define TVM_LOG_LEVEL_ERROR 3 #define TVM_LOG_LEVEL_FATAL 4 #define LOG(level) LOG_##level #define LOG_DEBUG \ ::tvm::runtime::detail::LogMessage(__FILE__, __LINE__, TVM_LOG_LEVEL_DEBUG).stream() #define LOG_FATAL ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() #define LOG_INFO ::tvm::runtime::detail::LogMessage(__FILE__, __LINE__, TVM_LOG_LEVEL_INFO).stream() #define LOG_ERROR \ ::tvm::runtime::detail::LogMessage(__FILE__, __LINE__, TVM_LOG_LEVEL_ERROR).stream() #define LOG_WARNING \ ::tvm::runtime::detail::LogMessage(__FILE__, __LINE__, TVM_LOG_LEVEL_WARNING).stream() #define TVM_CHECK_BINARY_OP(name, op, x, y) \ if (auto __tvm__log__err = ::tvm::runtime::detail::LogCheck##name(x, y)) \ ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << "Check failed: " << #x " " #op " " #y << *__tvm__log__err << ": " #define CHECK(x) \ if (!(x)) \ ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << "Check failed: (" #x << ") is false: " #define CHECK_LT(x, y) TVM_CHECK_BINARY_OP(_LT, <, x, y) #define CHECK_GT(x, y) TVM_CHECK_BINARY_OP(_GT, >, x, y) #define CHECK_LE(x, y) TVM_CHECK_BINARY_OP(_LE, <=, x, y) #define CHECK_GE(x, y) TVM_CHECK_BINARY_OP(_GE, >=, x, y) #define CHECK_EQ(x, y) TVM_CHECK_BINARY_OP(_EQ, ==, x, y) #define CHECK_NE(x, y) TVM_CHECK_BINARY_OP(_NE, !=, x, y) #define CHECK_NOTNULL(x) \ ((x) == nullptr ? ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << "Check not null: " #x << ' ', \ (x) : (x)) // NOLINT(*) #define LOG_IF(severity, condition) \ !(condition) ? (void)0 : ::tvm::runtime::detail::LogMessageVoidify() & LOG(severity) #if TVM_LOG_DEBUG #define LOG_DFATAL LOG_FATAL #define DFATAL FATAL #define DLOG(severity) LOG_IF(severity, ::tvm::runtime::detail::DebugLoggingEnabled()) #define DLOG_IF(severity, condition) \ LOG_IF(severity, ::tvm::runtime::detail::DebugLoggingEnabled() && (condition)) /*! * \brief If the \p TVM_LOG_DEBUG build flag is enabled, push a context message onto an internal * stack. All VLOG messages will include this stack in their prefix to help with debugging. E.g.: * \code * VLOG_CONTEXT << "my context"; * VLOG(1) << "my log message"; * \endcode * Thread safe. No-op with no execution overhead if the \p TVM_LOG_DEBUG build flag is not enabled. */ #define VLOG_CONTEXT \ ::tvm::runtime::detail::VLogContextEntry vlog_entry_; \ vlog_entry_.stream() #else #define LOG_DFATAL LOG_ERROR #define DFATAL ERROR #define DLOG(severity) true ? (void)0 : ::tvm::runtime::detail::LogMessageVoidify() & LOG(severity) #define DLOG_IF(severity, condition) \ (true || !(condition)) ? (void)0 : ::tvm::runtime::detail::LogMessageVoidify() & LOG(severity) #define VLOG_CONTEXT true ? (void)0 : ::tvm::runtime::detail::LogMessageVoidify() & LOG(INFO) #endif /*! * \brief If the \p TVM_LOG_DEBUG build flag is enabled, and the containing file has been enabled * at \p level or greater in the \p TVM_LOG_DEBUG environment variable, then log a message at * \p INFO severity. * * See \p VerboseLoggingEnabled for the format of the \p TVM_LOG_DEBUG environment variable. * Thread safe. No-op with no execution overhead if the \p TVM_LOG_DEBUG build flag is not enabled. * No-op with some execution overhead if the \p TVM_LOG_DEBUG build flag is enabled but the * containing file is not enabled. */ #define VLOG(level) \ DLOG_IF(INFO, ::tvm::runtime::detail::VerboseLoggingEnabled(__FILE__, (level))) \ << ::tvm::runtime::detail::ThreadLocalVLogContext::Get()->str() #if TVM_LOG_DEBUG #define DCHECK(x) CHECK(x) #define DCHECK_LT(x, y) CHECK((x) < (y)) #define DCHECK_GT(x, y) CHECK((x) > (y)) #define DCHECK_LE(x, y) CHECK((x) <= (y)) #define DCHECK_GE(x, y) CHECK((x) >= (y)) #define DCHECK_EQ(x, y) CHECK((x) == (y)) #define DCHECK_NE(x, y) CHECK((x) != (y)) #else #define DCHECK(x) \ while (false) CHECK(x) #define DCHECK_LT(x, y) \ while (false) CHECK((x) < (y)) #define DCHECK_GT(x, y) \ while (false) CHECK((x) > (y)) #define DCHECK_LE(x, y) \ while (false) CHECK((x) <= (y)) #define DCHECK_GE(x, y) \ while (false) CHECK((x) >= (y)) #define DCHECK_EQ(x, y) \ while (false) CHECK((x) == (y)) #define DCHECK_NE(x, y) \ while (false) CHECK((x) != (y)) #endif #define TVM_ICHECK_INDENT " " #define ICHECK_BINARY_OP(name, op, x, y) \ if (auto __tvm__log__err = ::tvm::runtime::detail::LogCheck##name(x, y)) \ ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << ::tvm::runtime::detail::kTVM_INTERNAL_ERROR_MESSAGE << std::endl \ << TVM_ICHECK_INDENT << "Check failed: " << #x " " #op " " #y << *__tvm__log__err << ": " #define ICHECK(x) \ if (!(x)) \ ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << ::tvm::runtime::detail::kTVM_INTERNAL_ERROR_MESSAGE << TVM_ICHECK_INDENT \ << "Check failed: (" #x << ") is false: " #define ICHECK_LT(x, y) ICHECK_BINARY_OP(_LT, <, x, y) #define ICHECK_GT(x, y) ICHECK_BINARY_OP(_GT, >, x, y) #define ICHECK_LE(x, y) ICHECK_BINARY_OP(_LE, <=, x, y) #define ICHECK_GE(x, y) ICHECK_BINARY_OP(_GE, >=, x, y) #define ICHECK_EQ(x, y) ICHECK_BINARY_OP(_EQ, ==, x, y) #define ICHECK_NE(x, y) ICHECK_BINARY_OP(_NE, !=, x, y) #define ICHECK_NOTNULL(x) \ ((x) == nullptr ? ::tvm::runtime::detail::LogFatal(__FILE__, __LINE__).stream() \ << ::tvm::runtime::detail::kTVM_INTERNAL_ERROR_MESSAGE \ << TVM_ICHECK_INDENT << "Check not null: " #x << ' ', \ (x) : (x)) // NOLINT(*) } // namespace runtime // Re-export error types using runtime::Error; using runtime::InternalError; } // namespace tvm #endif // TVM_RUNTIME_LOGGING_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/memory.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/memory.h * \brief Runtime memory management. */ #ifndef TVM_RUNTIME_MEMORY_H_ #define TVM_RUNTIME_MEMORY_H_ #include <tvm/runtime/object.h> #include <cstdlib> #include <type_traits> #include <utility> namespace tvm { namespace runtime { /*! * \brief Allocate an object using default allocator. * \param args arguments to the constructor. * \tparam T the node type. * \return The ObjectPtr to the allocated object. */ template <typename T, typename... Args> inline ObjectPtr<T> make_object(Args&&... args); // Detail implementations after this // // The current design allows swapping the // allocator pattern when necessary. // // Possible future allocator optimizations: // - Arena allocator that gives ownership of memory to arena (deleter_= nullptr) // - Thread-local object pools: one pool per size and alignment requirement. // - Can specialize by type of object to give the specific allocator to each object. /*! * \brief Base class of object allocators that implements make. * Use curiously recurring template pattern. * * \tparam Derived The derived class. */ template <typename Derived> class ObjAllocatorBase { public: /*! * \brief Make a new object using the allocator. * \tparam T The type to be allocated. * \tparam Args The constructor signature. * \param args The arguments. */ template <typename T, typename... Args> inline ObjectPtr<T> make_object(Args&&... args) { using Handler = typename Derived::template Handler<T>; static_assert(std::is_base_of<Object, T>::value, "make can only be used to create Object"); T* ptr = Handler::New(static_cast<Derived*>(this), std::forward<Args>(args)...); ptr->type_index_ = T::RuntimeTypeIndex(); ptr->deleter_ = Handler::Deleter(); return ObjectPtr<T>(ptr); } /*! * \tparam ArrayType The type to be allocated. * \tparam ElemType The type of array element. * \tparam Args The constructor signature. * \param num_elems The number of array elements. * \param args The arguments. */ template <typename ArrayType, typename ElemType, typename... Args> inline ObjectPtr<ArrayType> make_inplace_array(size_t num_elems, Args&&... args) { using Handler = typename Derived::template ArrayHandler<ArrayType, ElemType>; static_assert(std::is_base_of<Object, ArrayType>::value, "make_inplace_array can only be used to create Object"); ArrayType* ptr = Handler::New(static_cast<Derived*>(this), num_elems, std::forward<Args>(args)...); ptr->type_index_ = ArrayType::RuntimeTypeIndex(); ptr->deleter_ = Handler::Deleter(); return ObjectPtr<ArrayType>(ptr); } }; // Simple allocator that uses new/delete. class SimpleObjAllocator : public ObjAllocatorBase<SimpleObjAllocator> { public: template <typename T> class Handler { public: using StorageType = typename std::aligned_storage<sizeof(T), alignof(T)>::type; template <typename... Args> static T* New(SimpleObjAllocator*, Args&&... args) { // NOTE: the first argument is not needed for SimpleObjAllocator // It is reserved for special allocators that needs to recycle // the object to itself (e.g. in the case of object pool). // // In the case of an object pool, an allocator needs to create // a special chunk memory that hides reference to the allocator // and call allocator's release function in the deleter. // NOTE2: Use inplace new to allocate // This is used to get rid of warning when deleting a virtual // class with non-virtual destructor. // We are fine here as we captured the right deleter during construction. // This is also the right way to get storage type for an object pool. StorageType* data = new StorageType(); new (data) T(std::forward<Args>(args)...); return reinterpret_cast<T*>(data); } static Object::FDeleter Deleter() { return Deleter_; } private: static void Deleter_(Object* objptr) { // NOTE: this is important to cast back to T* // because objptr and tptr may not be the same // depending on how sub-class allocates the space. T* tptr = static_cast<T*>(objptr); // It is important to do tptr->T::~T(), // so that we explicitly call the specific destructor // instead of tptr->~T(), which could mean the intention // call a virtual destructor(which may not be available and is not required). tptr->T::~T(); delete reinterpret_cast<StorageType*>(tptr); } }; // Array handler that uses new/delete. template <typename ArrayType, typename ElemType> class ArrayHandler { public: using StorageType = typename std::aligned_storage<sizeof(ArrayType), alignof(ArrayType)>::type; // for now only support elements that aligns with array header. static_assert(alignof(ArrayType) % alignof(ElemType) == 0 && sizeof(ArrayType) % alignof(ElemType) == 0, "element alignment constraint"); template <typename... Args> static ArrayType* New(SimpleObjAllocator*, size_t num_elems, Args&&... args) { // NOTE: the first argument is not needed for ArrayObjAllocator // It is reserved for special allocators that needs to recycle // the object to itself (e.g. in the case of object pool). // // In the case of an object pool, an allocator needs to create // a special chunk memory that hides reference to the allocator // and call allocator's release function in the deleter. // NOTE2: Use inplace new to allocate // This is used to get rid of warning when deleting a virtual // class with non-virtual destructor. // We are fine here as we captured the right deleter during construction. // This is also the right way to get storage type for an object pool. size_t unit = sizeof(StorageType); size_t requested_size = num_elems * sizeof(ElemType) + sizeof(ArrayType); size_t num_storage_slots = (requested_size + unit - 1) / unit; StorageType* data = new StorageType[num_storage_slots]; new (data) ArrayType(std::forward<Args>(args)...); return reinterpret_cast<ArrayType*>(data); } static Object::FDeleter Deleter() { return Deleter_; } private: static void Deleter_(Object* objptr) { // NOTE: this is important to cast back to ArrayType* // because objptr and tptr may not be the same // depending on how sub-class allocates the space. ArrayType* tptr = static_cast<ArrayType*>(objptr); // It is important to do tptr->ArrayType::~ArrayType(), // so that we explicitly call the specific destructor // instead of tptr->~ArrayType(), which could mean the intention // call a virtual destructor(which may not be available and is not required). tptr->ArrayType::~ArrayType(); StorageType* p = reinterpret_cast<StorageType*>(tptr); delete[] p; } }; }; template <typename T, typename... Args> inline ObjectPtr<T> make_object(Args&&... args) { return SimpleObjAllocator().make_object<T>(std::forward<Args>(args)...); } template <typename ArrayType, typename ElemType, typename... Args> inline ObjectPtr<ArrayType> make_inplace_array_object(size_t num_elems, Args&&... args) { return SimpleObjAllocator().make_inplace_array<ArrayType, ElemType>(num_elems, std::forward<Args>(args)...); } } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_MEMORY_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/metadata.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/metadata.h * \brief Defines types which can be used in Metadata. */ #ifndef TVM_RUNTIME_METADATA_H_ #define TVM_RUNTIME_METADATA_H_ #include <dmlc/memory_io.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/metadata_base.h> #include <tvm/runtime/metadata_types.h> #include <tvm/runtime/object.h> #include <tvm/support/span.h> #include <memory> #include <string> #include <vector> // Version number recorded in emitted artifacts for runtime checking. #define TVM_METADATA_VERSION 1 namespace tvm { namespace runtime { namespace metadata { /*! * \brief Version of metadata emitted and understood by this compiler/runtime. * Should be populated into the `version` field of all TVMMetadata. */ static const constexpr int64_t kMetadataVersion = TVM_METADATA_VERSION; class Metadata; class TensorInfo; class ConstantInfoMetadata; class MetadataNode : public MetadataBaseNode { public: explicit MetadataNode(const struct ::TVMMetadata* data) : data_{data} {} static constexpr const char* _type_key = "metadata.MetadataNode"; const char* get_c_struct_name() const override; inline int64_t version() const { return int64_t(data_->version); } inline int64_t num_inputs() const { return data_->num_inputs; } ArrayAccessor<struct TVMTensorInfo, TensorInfo> inputs(); inline int64_t num_outputs() const { return data_->num_outputs; } ArrayAccessor<struct TVMTensorInfo, TensorInfo> outputs(); inline int64_t num_workspace_pools() const { return data_->num_workspace_pools; } ArrayAccessor<struct TVMTensorInfo, TensorInfo> workspace_pools(); inline ::tvm::runtime::String mod_name() const { return ::tvm::runtime::String(data_->mod_name); } const struct ::TVMMetadata* data() const { return data_; } ArrayAccessor<struct TVMConstantInfo, ConstantInfoMetadata> constant_pools(); inline int64_t num_constant_pools() const { return data_->num_constant_pools; } TVM_DECLARE_FINAL_OBJECT_INFO(MetadataNode, MetadataBaseNode); private: const struct ::TVMMetadata* data_; }; class Metadata : public MetadataBase { public: explicit Metadata(const struct ::TVMMetadata* data); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Metadata, MetadataBase, MetadataNode); }; class TensorInfoNode : public MetadataBaseNode { public: explicit TensorInfoNode(const struct ::TVMTensorInfo* data) : data_{data} {} static constexpr const char* _type_key = "metadata.TensorInfoNode"; const char* get_c_struct_name() const override; inline ::tvm::runtime::String name() const { return ::tvm::runtime::String(data_->name); } inline int64_t num_shape() const { return data_->num_shape; } inline ::tvm::support::Span<const int64_t, int64_t> shape() const { return ::tvm::support::Span<const int64_t, int64_t>(data_->shape, data_->shape + data_->num_shape); } inline ::tvm::runtime::DataType dtype() const { return ::tvm::runtime::DataType(data_->dtype); } const struct ::TVMTensorInfo* data() const { return data_; } TVM_DECLARE_FINAL_OBJECT_INFO(TensorInfoNode, MetadataBaseNode); private: const struct ::TVMTensorInfo* data_; }; class TensorInfo : public MetadataBase { public: explicit TensorInfo(const struct ::TVMTensorInfo* data); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(TensorInfo, MetadataBase, TensorInfoNode); }; class ConstantInfoMetadataNode : public MetadataBaseNode { public: explicit ConstantInfoMetadataNode(const struct ::TVMConstantInfo* data) : data_{data} {} // This name should match TVMConstantInfo after processing static constexpr const char* _type_key = "metadata.ConstantInfoNode"; const char* get_c_struct_name() const override; inline ::tvm::runtime::String name_hint() const { return ::tvm::runtime::String(data_->name_hint); } inline size_t byte_offset() const { return data_->byte_offset; } inline ::tvm::runtime::NDArray data() const { ::tvm::runtime::NDArray ndarray; if (data_->data_len) { dmlc::MemoryFixedSizeStream bytes(const_cast<void*>(data_->data_bytes), data_->data_len); ndarray.Load(&bytes); } return ndarray; } TVM_DECLARE_FINAL_OBJECT_INFO(ConstantInfoMetadataNode, MetadataBaseNode); protected: const struct ::TVMConstantInfo* data_; }; class ConstantInfoMetadata : public MetadataBase { public: explicit ConstantInfoMetadata(const struct ::TVMConstantInfo* data); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(ConstantInfoMetadata, MetadataBase, ConstantInfoMetadataNode); }; } // namespace metadata } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_METADATA_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/metadata_base.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/metadata_base.h * \brief Defines types which can be used in Metadata. */ #ifndef TVM_RUNTIME_METADATA_BASE_H_ #define TVM_RUNTIME_METADATA_BASE_H_ #include <tvm/ir/expr.h> #include <tvm/runtime/object.h> #include <memory> #include <string> #include <utility> #include <vector> namespace tvm { namespace runtime { namespace metadata { /*! * \brief Common base class for all Metadata. * * This class is used in the visitor classes as a internal check to ensure that verify that all * parts of the Metadata struct used in codegen are Metadata objects. */ class MetadataBaseNode : public ::tvm::runtime::Object { public: virtual const char* get_c_struct_name() const = 0; static constexpr const char* _type_key = "metadata.MetadataBaseNode"; TVM_DECLARE_BASE_OBJECT_INFO(MetadataBaseNode, ::tvm::runtime::Object); }; /*! \brief Reference class for the common MetadataBaseNode class. */ class MetadataBase : public ::tvm::runtime::ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(MetadataBase, ::tvm::runtime::ObjectRef, MetadataBaseNode); }; template <typename C, class Ref> class ArrayAccessor; /*! \brief An iterator implementation that lazily instantiates the C++ wrapping Metadata class. */ template <typename C, class Ref> class ArrayIterator { public: ArrayIterator(size_t index, const ArrayAccessor<C, Ref>* parent) : index_{index}, parent_{parent} {} inline Ref operator*() { return (*parent_)[index_]; } inline ArrayIterator<C, Ref>& operator++() { if (index_ < parent_->size()) { index_++; } return *this; } inline bool operator==(const ArrayIterator<C, Ref>& other) const { return parent_ == other.parent_ && index_ == other.index_; } inline bool operator!=(const ArrayIterator<C, Ref>& other) const { return !operator==(other); } private: size_t index_; const ArrayAccessor<C, Ref>* parent_; }; /*! \brief A span-like class which permits access to Array fields with complex elements. * These array fields should be accessed from C++ using the Metadata wrapper classes. This class * lazily instantiates those wrappers as they are accessed. */ template <typename C, class Ref> class ArrayAccessor { public: using value_type = Ref; using iterator = ArrayIterator<C, Ref>; using const_iterator = iterator; template <typename T = typename std::enable_if<std::is_base_of<ObjectRef, Ref>::value>::type> ArrayAccessor(const C* data, size_t num_data) : data_{data}, num_data_{num_data} {} inline size_t size() const { return num_data_; } inline Ref operator[](size_t index) const { if (index >= num_data_) { throw std::runtime_error("Index out of range"); } return Ref(&data_[index]); } inline ArrayIterator<C, Ref> begin() const { return ArrayIterator<C, Ref>{0, this}; } inline ArrayIterator<C, Ref> end() const { return ArrayIterator<C, Ref>{num_data_, this}; } private: const C* data_; size_t num_data_; }; /*! \brief A specialization of ArrayAccessor for String. * This class is needed because the String constructor signature is different from the typical * Metadata subclass. */ template <> class ArrayAccessor<const char*, ::tvm::runtime::String> { public: using value_type = ::tvm::runtime::String; using iterator = ArrayIterator<const char*, ::tvm::runtime::String>; using const_iterator = iterator; ArrayAccessor(const char** data, size_t num_data) : data_{data}, num_data_{num_data} {} inline size_t size() const { return num_data_; } inline ::tvm::runtime::String operator[](size_t index) const { if (index >= num_data_) { throw std::runtime_error("Index out of range"); } return ::tvm::runtime::String(data_[index]); } inline ArrayIterator<const char*, ::tvm::runtime::String> begin() const { return ArrayIterator<const char*, ::tvm::runtime::String>{0, this}; } inline ArrayIterator<const char*, ::tvm::runtime::String> end() const { return ArrayIterator<const char*, ::tvm::runtime::String>{num_data_, this}; } private: const char** data_; size_t num_data_; }; /*! \brief Enumerates the primitive types which can be part of a Metadata instance. * * These are separate from TIR DataType because TIR does not model structs. */ enum MetadataKind : uint8_t { kUint64 = 0, kInt64 = 1, kBool = 2, kString = 3, kHandle = 4, kMetadata = 5, }; /*! \brief Container for arrays in the metadata. * * Type information is needed when emitting arrays. This container augments the data field with * the necessary typing information. */ class MetadataArrayNode : public MetadataBaseNode { public: MetadataArrayNode(Array<ObjectRef> array, MetadataKind kind, const char* type_key) : array(::std::move(array)), kind{kind}, type_key{type_key} {} const char* get_c_struct_name() const final; std::string get_element_c_struct_name() const { CHECK(kind == MetadataKind::kMetadata) << "cannot get struct name for MetadataArray with kind=" << kind; constexpr int prefix_size = sizeof("metadata.") - 1; constexpr int suffix_size = sizeof("Node") - 1; std::string type_key_str(type_key); return std::string("TVM") + type_key_str.substr(prefix_size, type_key_str.size() - prefix_size - suffix_size); } Array<ObjectRef> array; /*! \brief Describes the storage class of the emitted struct member. */ MetadataKind kind; /*! \brief When `kind` is Metadata, type_key of the MetadataBaseNode used with this array. */ const char* type_key; static constexpr const char* _type_key = "metadata.MetadataArrayNode"; TVM_DECLARE_BASE_OBJECT_INFO(MetadataArrayNode, MetadataBaseNode); }; /*! \brief Reference class for MetadataArray. */ class MetadataArray : public MetadataBase { public: MetadataArray(Array<ObjectRef> array, MetadataKind kind, const char* struct_name); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(MetadataArray, MetadataBase, MetadataArrayNode); }; } // namespace metadata } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_METADATA_BASE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/metadata_types.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ // LINT_C_FILE /*! * \file tvm/runtime/metadata_types.h * \brief Defines types which can be used in metadata here which * are also shared between C and C++ code bases. */ #ifndef TVM_RUNTIME_METADATA_TYPES_H_ #define TVM_RUNTIME_METADATA_TYPES_H_ #include <inttypes.h> #include <tvm/runtime/c_runtime_api.h> #ifdef __cplusplus extern "C" { #endif /*! * \brief Top-level metadata structure. Holds all other metadata types. */ struct TVMMetadata { /*! \brief Version identifier for this metadata. */ int64_t version; /*! \brief Inputs to the AOT run_model function. * The order of the elements is the same as in the arguments to run_model. That is to say, * this array specifies the first `num_inputs` arguments to run_model. */ const struct TVMTensorInfo* inputs; /*! \brief Number of elements in `inputs` array. */ int64_t num_inputs; /*! \brief Outputs of the AOT run_model function. * The order of the elements is the same as in the arguments to run_model. That is to say, * this array specifies the last `num_outputs` arguments to run_model. */ const struct TVMTensorInfo* outputs; /*! \brief Number of elements in `outputs` array. */ int64_t num_outputs; /*! \brief Workspace Memory Pools needed by the AOT main function. * The order of the elements is the same as in the arguments to run_model. That is to say, * this array specifies the last `num_workspace_pools` arguments to run_model. */ const struct TVMTensorInfo* workspace_pools; /*! \brief Number of elements in `workspace_pools` array. */ int64_t num_workspace_pools; /*! \brief Constant pools needed by the AOT main function. */ const struct TVMConstantInfo* constant_pools; /*! \brief Number of elements in `constant_pools` array. */ int64_t num_constant_pools; /*! \brief Name of the model, as passed to tvm.relay.build. */ const char* mod_name; }; /*! * \brief Describes one tensor argument to `run_model`. * NOTE: while TIR allows for other types of arguments, such as scalars, the AOT run_model * function does not currently accept these. Therefore it's not possible to express those * in this metadata. A future patch may modify this. */ struct TVMTensorInfo { /*! \brief Name of the tensor, as specified in the Relay program. */ const char* name; /*! \brief Shape of the tensor. */ const int64_t* shape; /*! \brief Rank of this tensor. */ int64_t num_shape; /*! \brief Data type of one element of this tensor. */ DLDataType dtype; }; /*! * \brief Describes one constant argument to `run_model`. * */ struct TVMConstantInfo { /*! \brief Name of the constant */ const char* name_hint; /*! \brief Offset in bytes of the constant */ int64_t byte_offset; /*! \brief length of the data_bytes field */ int64_t data_len; /*! \brief data bytes of serialized NDArray */ const void* data_bytes; }; #ifdef __cplusplus } // extern "C" #endif #endif // TVM_RUNTIME_METADATA_TYPES_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/micro/standalone/microtvm_runtime.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef TVM_RUNTIME_MICRO_STANDALONE_MICROTVM_RUNTIME_H_ #define TVM_RUNTIME_MICRO_STANDALONE_MICROTVM_RUNTIME_H_ #include <stddef.h> #include <stdint.h> #define TVM_MICRO_RUNTIME_API_API extern "C" __attribute__((visibility("default"))) TVM_MICRO_RUNTIME_API_API void* MicroTVMRuntimeCreate(const char* json, size_t json_len, void* module); TVM_MICRO_RUNTIME_API_API void MicroTVMRuntimeDestroy(void* handle); TVM_MICRO_RUNTIME_API_API void MicroTVMRuntimeSetInput(void* handle, int index, void* tensor); TVM_MICRO_RUNTIME_API_API void MicroTVMRuntimeRun(void* handle); TVM_MICRO_RUNTIME_API_API void MicroTVMRuntimeGetOutput(void* handle, int index, void* tensor); TVM_MICRO_RUNTIME_API_API void* MicroTVMRuntimeDSOModuleCreate(const char* so, size_t so_len); TVM_MICRO_RUNTIME_API_API void MicroTVMRuntimeDSOModuleDestroy(void* module); #undef TVM_MICRO_RUNTIME_API_API #endif // TVM_RUNTIME_MICRO_STANDALONE_MICROTVM_RUNTIME_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/module.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/module.h * \brief Runtime container of the functions generated by TVM, * This is used to support dynamically link, load and save * functions from different convention under unified API. */ #ifndef TVM_RUNTIME_MODULE_H_ #define TVM_RUNTIME_MODULE_H_ #include <dmlc/io.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/memory.h> #include <tvm/runtime/object.h> #include <memory> #include <mutex> #include <string> #include <unordered_map> #include <vector> namespace tvm { namespace runtime { class ModuleNode; class PackedFunc; /*! * \brief Module container of TVM. */ class Module : public ObjectRef { public: Module() {} // constructor from container. explicit Module(ObjectPtr<Object> n) : ObjectRef(n) {} /*! * \brief Get packed function from current module by name. * * \param name The name of the function. * \param query_imports Whether also query dependency modules. * \return The result function. * This function will return PackedFunc(nullptr) if function do not exist. * \note Implemented in packed_func.cc */ inline PackedFunc GetFunction(const std::string& name, bool query_imports = false); // The following functions requires link with runtime. /*! * \brief Import another module into this module. * \param other The module to be imported. * * \note Cyclic dependency is not allowed among modules, * An error will be thrown when cyclic dependency is detected. */ inline void Import(Module other); /*! \return internal container */ inline ModuleNode* operator->(); /*! \return internal container */ inline const ModuleNode* operator->() const; /*! * \brief Load a module from file. * \param file_name The name of the host function module. * \param format The format of the file. * \note This function won't load the import relationship. * Re-create import relationship by calling Import. */ TVM_DLL static Module LoadFromFile(const std::string& file_name, const std::string& format = ""); // refer to the corresponding container. using ContainerType = ModuleNode; friend class ModuleNode; }; /*! * \brief Base container of module. * * Please subclass ModuleNode to create a specific runtime module. * * \code * * class MyModuleNode : public ModuleNode { * public: * // implement the interface * }; * * // use make_object to create a specific * // instace of MyModuleNode. * Module CreateMyModule() { * ObjectPtr<MyModuleNode> n = * tvm::runtime::make_object<MyModuleNode>(); * return Module(n); * } * * \endcode */ class TVM_DLL ModuleNode : public Object { public: /*! \brief virtual destructor */ virtual ~ModuleNode() = default; /*! * \return The per module type key. * \note This key is used to for serializing custom modules. */ virtual const char* type_key() const = 0; /*! * \brief Get a PackedFunc from module. * * The PackedFunc may not be fully initialized, * there might still be first time running overhead when * executing the function on certain devices. * For benchmarking, use prepare to eliminate * * \param name the name of the function. * \param sptr_to_self The ObjectPtr that points to this module node. * * \return PackedFunc(nullptr) when it is not available. * * \note The function will always remain valid. * If the function need resource from the module(e.g. late linking), * it should capture sptr_to_self. */ virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) = 0; /*! * \brief Save the module to file. * \param file_name The file to be saved to. * \param format The format of the file. */ virtual void SaveToFile(const std::string& file_name, const std::string& format); /*! * \brief Save the module to binary stream. * \param stream The binary stream to save to. * \note It is recommended to implement this for device modules, * but not necessarily host modules. * We can use this to do AOT loading of bundled device functions. */ virtual void SaveToBinary(dmlc::Stream* stream); /*! * \brief Get the source code of module, when available. * \param format Format of the source code, can be empty by default. * \return Possible source code when available. */ virtual std::string GetSource(const std::string& format = ""); /*! * \brief Get the format of the module, when available. * \return Possible format when available. */ virtual std::string GetFormat(); /*! * \brief Get packed function from current module by name. * * \param name The name of the function. * \param query_imports Whether also query dependency modules. * \return The result function. * This function will return PackedFunc(nullptr) if function do not exist. * \note Implemented in packed_func.cc */ PackedFunc GetFunction(const std::string& name, bool query_imports = false); /*! * \brief Import another module into this module. * \param other The module to be imported. * * \note Cyclic dependency is not allowed among modules, * An error will be thrown when cyclic dependency is detected. */ void Import(Module other); /*! * \brief Get a function from current environment * The environment includes all the imports as well as Global functions. * * \param name name of the function. * \return The corresponding function. */ const PackedFunc* GetFuncFromEnv(const std::string& name); /*! \return The module it imports from */ const std::vector<Module>& imports() const { return imports_; } /*! * \brief Returns true if this module is 'DSO exportable'. * * A DSO exportable module (eg a CSourceModuleNode of type_key 'c') can be incorporated into the * final runtime artifact (ie shared library) by compilation and/or linking using the external * compiler (llvm, nvcc, etc). DSO exportable modules must implement SaveToFile. * * By contrast, non-DSO exportable modules (eg CUDAModuleNode of type_key 'cuda') typically must * be incorporated into the final runtime artifact by being serialized as data into the * artifact, then deserialized at runtime. Non-DSO exportable modules must implement SaveToBinary, * and have a matching deserializer registered as 'runtime.module.loadbinary_<type_key>'. * * The default implementation returns false. */ virtual bool IsDSOExportable() const; /*! * \brief Returns true if this module has a definition for a function of \p name. If * \p query_imports is true, also search in any imported modules. * * Note that even if this function returns true the corresponding \p GetFunction result may be * nullptr if the function is not yet callable without further compilation. * * The default implementation just checkis if \p GetFunction is non-null. */ virtual bool ImplementsFunction(const String& name, bool query_imports = false); // integration with the existing components. static constexpr const uint32_t _type_index = TypeIndex::kRuntimeModule; static constexpr const char* _type_key = "runtime.Module"; // NOTE: ModuleNode can still be sub-classed // TVM_DECLARE_FINAL_OBJECT_INFO(ModuleNode, Object); protected: friend class Module; friend class ModuleInternal; /*! \brief The modules this module depend on */ std::vector<Module> imports_; private: /*! \brief Cache used by GetImport */ std::unordered_map<std::string, std::shared_ptr<PackedFunc>> import_cache_; std::mutex mutex_; }; /*! * \brief Check if runtime module is enabled for target. * \param target The target module name. * \return Whether runtime is enabled. */ TVM_DLL bool RuntimeEnabled(const std::string& target); /*! \brief namespace for constant symbols */ namespace symbol { /*! \brief A PackedFunc that retrieves exported metadata. */ constexpr const char* tvm_get_c_metadata = "get_c_metadata"; /*! \brief Global variable to store module context. */ constexpr const char* tvm_module_ctx = "__tvm_module_ctx"; /*! \brief Global variable to store device module blob */ constexpr const char* tvm_dev_mblob = "__tvm_dev_mblob"; /*! \brief Number of bytes of device module blob. */ constexpr const char* tvm_dev_mblob_nbytes = "__tvm_dev_mblob_nbytes"; /*! \brief global function to set device */ constexpr const char* tvm_set_device = "__tvm_set_device"; /*! \brief Auxiliary counter to global barrier. */ constexpr const char* tvm_global_barrier_state = "__tvm_global_barrier_state"; /*! \brief Prepare the global barrier before kernels that uses global barrier. */ constexpr const char* tvm_prepare_global_barrier = "__tvm_prepare_global_barrier"; /*! \brief Placeholder for the module's entry function. */ constexpr const char* tvm_module_main = "__tvm_main__"; /*! \brief Prefix for parameter symbols emitted into the main program. */ constexpr const char* tvm_param_prefix = "__tvm_param__"; /*! \brief A PackedFunc that looks up linked parameters by storage_id. */ constexpr const char* tvm_lookup_linked_param = "_lookup_linked_param"; /*! \brief Model entrypoint generated as an interface to the AOT function outside of TIR */ constexpr const char* tvm_entrypoint_suffix = "run"; } // namespace symbol // implementations of inline functions. inline void Module::Import(Module other) { return (*this)->Import(other); } inline ModuleNode* Module::operator->() { return static_cast<ModuleNode*>(get_mutable()); } inline const ModuleNode* Module::operator->() const { return static_cast<const ModuleNode*>(get()); } inline std::ostream& operator<<(std::ostream& out, const Module& module) { out << "Module(type_key= "; out << module->type_key(); out << ")"; return out; } } // namespace runtime } // namespace tvm #include <tvm/runtime/packed_func.h> // NOLINT(*) #endif // TVM_RUNTIME_MODULE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/name_transforms.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/name_transforms.h * \brief Transformations which are applied on names to generate appropriately named. * These functions are used in both Runtime and Backend. */ #ifndef TVM_RUNTIME_NAME_TRANSFORMS_H_ #define TVM_RUNTIME_NAME_TRANSFORMS_H_ #include <string> namespace tvm { namespace runtime { /*! * \brief Sanitize name for output into compiler artifacts * \param name Original name * \return Sanitized name */ std::string SanitizeName(const std::string& name); } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_NAME_TRANSFORMS_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/ndarray.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/ndarray.h * \brief A device-independent managed NDArray abstraction. */ #ifndef TVM_RUNTIME_NDARRAY_H_ #define TVM_RUNTIME_NDARRAY_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/container/optional.h> #include <tvm/runtime/container/shape_tuple.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/object.h> #include <tvm/runtime/serializer.h> #include <atomic> #include <functional> #include <utility> #include <vector> namespace tvm { // alias DLDevice using Device = DLDevice; // A 'null' device type, does not correspond to any DLDeviceType enum. // TODO(mbs): This is to help us as we transition away from representing the 'homogenous' case // as a singleton target map indexed by the invalid DLDeviceType '0'. constexpr DLDeviceType kNullDeviceType = static_cast<DLDeviceType>(0); // An 'invalid' device type, does not correspond to any DLDeviceType enum. constexpr DLDeviceType kInvalidDeviceType = static_cast<DLDeviceType>(-1); namespace runtime { /*! * \brief Managed NDArray. * The array is backed by reference counted blocks. */ class NDArray : public ObjectRef { public: /*! \brief ContainerBase used to back the TVMArrayHandle */ class ContainerBase; /*! \brief NDArray internal container type */ class Container; /*! \brief Container type for Object system. */ using ContainerType = Container; /*! \brief default constructor */ NDArray() {} /*! * \brief constructor. * \param data ObjectPtr to the data container. */ explicit NDArray(ObjectPtr<Object> data) : ObjectRef(data) {} /*! \brief reset the content of NDArray to be nullptr */ inline void reset(); /*! * \return the reference counter * \note this number is approximate in multi-threaded setting. */ inline int use_count() const; /*! \return Pointer to content of DLTensor */ inline const DLTensor* operator->() const; /*! \return Whether the tensor is contiguous */ inline bool IsContiguous() const; /*! * \brief Copy data content from another array. * \param other The source array to be copied from. * \note The copy may happen asynchronously if it involves a GPU context. * TVMSynchronize is necessary. */ inline void CopyFrom(const DLTensor* other); inline void CopyFrom(const NDArray& other); /*! * \brief Copy data content from a byte buffer. * \param data The source bytes to be copied from. * \param nbytes The size of the buffer in bytes * Must be equal to the size of the NDArray. * \note The copy always triggers a TVMSynchronize. */ TVM_DLL void CopyFromBytes(const void* data, size_t nbytes); /*! * \brief Copy data content into another array. * \param other The source array to be copied from. * \note The copy may happen asynchronously if it involves a GPU context. * TVMSynchronize is necessary. */ inline void CopyTo(DLTensor* other) const; inline void CopyTo(const NDArray& other) const; /*! * \brief Copy data content into another array. * \param data The source bytes to be copied from. * \param nbytes The size of the data buffer. * Must be equal to the size of the NDArray. * \note The copy always triggers a TVMSynchronize. */ TVM_DLL void CopyToBytes(void* data, size_t nbytes) const; /*! * \brief Copy the data to another device. * \param dev The target device. * \return The array under another device. */ inline NDArray CopyTo(const Device& dev) const; /*! * \brief Load NDArray from stream * \param stream The input data stream * \return Whether load is successful */ inline bool Load(dmlc::Stream* stream); /*! * \brief Save NDArray to stream * \param stream The output data stream */ inline void Save(dmlc::Stream* stream) const; /*! * \brief Create a NDArray that shares the data memory with the current one. * \param shape The shape of the new array. * \param dtype The data type of the new array. * \note The memory size of new array must be smaller than the current one. */ TVM_DLL NDArray CreateView(ShapeTuple shape, DLDataType dtype); /*! * \brief Create a reference view of NDArray that * represents as DLManagedTensor. * \return A DLManagedTensor */ TVM_DLL DLManagedTensor* ToDLPack() const; /*! * \brief Create an empty NDArray. * \param shape The shape of the new array. * \param dtype The data type of the new array. * \param dev The device of the array. * \param mem_scope The memory scope of the array. * \return The created Array */ TVM_DLL static NDArray Empty(ShapeTuple shape, DLDataType dtype, Device dev, Optional<String> mem_scope = NullOpt); /*! * \brief Create a NDArray backed by an external DLTensor without memory copying. * * If DLTensor is not contiguous or has bad aligned data, It fails. * This allows us to create a NDArray using the memory * allocated by an external source. Responsibility for memory * retaining lies with the external source. * \param dl_tensor The DLTensor for NDArray base. * \return The created NDArray view. */ TVM_DLL static NDArray FromExternalDLTensor(const DLTensor& dl_tensor); /*! * \brief Create new NDArray, data is copied from DLTensor. * * \param dl_tensor The DLTensor to copy from. * \param dev device location of the created NDArray. * \return The created NDArray view. */ TVM_DLL static NDArray NewFromDLTensor(DLTensor* dl_tensor, const Device& dev); /*! * \brief Create a NDArray backed by a dlpack tensor. * * This allows us to create a NDArray using the memory * allocated by an external deep learning framework * that is DLPack compatible. * * The memory is retained until the NDArray went out of scope. * \param tensor The DLPack tensor to copy from. * \return The created NDArray view. */ TVM_DLL static NDArray FromDLPack(DLManagedTensor* tensor); /*! * \brief Function to copy data from one array to another. * \param from The source array. * \param to The target array. * \param stream The stream used in copy. */ TVM_DLL static void CopyFromTo(const DLTensor* from, DLTensor* to, TVMStreamHandle stream = nullptr); TVM_DLL ShapeTuple Shape() const; TVM_DLL runtime::DataType DataType() const; /*! * \brief Check conditions for construction NDArray over DLTensor without copying. * There are three conditions to check: * 1. Destination device is the same as DLTensor device * 2. Destination device id is the same as DLTensor device id * 3. Memory in DLTensor is aligned as expected for NDArray * \param tensor the DLTensor. * \param dev destination device. * \return true if all conditions are satisfied. */ TVM_DLL static bool AbilityOfZeroCopyForDLTensor(DLTensor* tensor, const Device& dev); // internal namespace struct Internal; private: TVM_DLL static bool IsAligned(const DLTensor& tensor); protected: friend class TVMPODValue_; friend class TVMRetValue; friend class TVMArgsSetter; /*! * \brief Get mutable internal container pointer. * \return a mutable container pointer. */ inline Container* get_mutable() const; // Helper functions for FFI handling. /*! * \brief Construct NDArray's Data field from array handle in FFI. * \param handle The array handle. * \return The corresponding ObjectPtr to the constructed container object. * * \note We keep a special calling convention for NDArray by passing * ContainerBase pointer in FFI. * As a result, the argument is compatible to DLTensor*. */ inline static ObjectPtr<Object> FFIDataFromHandle(TVMArrayHandle handle); /*! * \brief DecRef resource managed by an FFI array handle. * \param handle The array handle. */ inline static void FFIDecRef(TVMArrayHandle handle); /*! * \brief Get FFI Array handle from ndarray. * \param nd The object with ndarray type. * \return The result array handle. */ inline static TVMArrayHandle FFIGetHandle(const ObjectRef& nd); }; /*! * \brief Save a DLTensor to stream * \param strm The output stream * \param tensor The tensor to be saved. */ inline bool SaveDLTensor(dmlc::Stream* strm, const DLTensor* tensor); /*! * \brief The container base structure * contains all the fields except for the Object header. * * \note We explicitly declare this structure in order to pass * PackedFunc argument using ContainerBase*. */ class NDArray::ContainerBase { public: /*! * \brief The corresponding dl_tensor field. * \note it is important that the first field is DLTensor * So that this data structure is DLTensor compatible. * The head ptr of this struct can be viewed as DLTensor*. */ DLTensor dl_tensor; /*! * \brief additional context, reserved for recycling * \note We can attach additional content here * which the current container depend on * (e.g. reference to original memory when creating views). */ void* manager_ctx{nullptr}; protected: /*! * \brief The shape container, * can be used used for shape data. */ ShapeTuple shape_; }; /*! * \brief Object container class that backs NDArray. * \note do not use this function directly, use NDArray. */ class NDArray::Container : public Object, public NDArray::ContainerBase { public: /*! \brief default constructor */ Container() { // Initialize the type index. type_index_ = Container::RuntimeTypeIndex(); dl_tensor.data = nullptr; dl_tensor.ndim = 0; dl_tensor.shape = nullptr; dl_tensor.strides = nullptr; dl_tensor.byte_offset = 0; } Container(void* data, ShapeTuple shape, DLDataType dtype, Device dev) { // Initialize the type index. type_index_ = Container::RuntimeTypeIndex(); dl_tensor.data = data; shape_ = std::move(shape); dl_tensor.ndim = static_cast<int>(shape_.size()); dl_tensor.shape = const_cast<ShapeTuple::index_type*>(shape_.data()); dl_tensor.dtype = dtype; dl_tensor.strides = nullptr; dl_tensor.byte_offset = 0; dl_tensor.device = dev; } /*! * \brief Set the deleter field. * \param deleter The deleter. */ void SetDeleter(FDeleter deleter) { deleter_ = deleter; } // Expose DecRef and IncRef as public function // NOTE: they are only for developer purposes only. using Object::DecRef; using Object::IncRef; // Information for object protocol. static constexpr const uint32_t _type_index = TypeIndex::kRuntimeNDArray; static constexpr const uint32_t _type_child_slots = 0; static constexpr const uint32_t _type_child_slots_can_overflow = true; static constexpr const char* _type_key = "runtime.NDArray"; TVM_DECLARE_BASE_OBJECT_INFO(NDArray::Container, Object); protected: friend class RPCWrappedFunc; friend class NDArray; }; // implementations of inline functions /*! * \brief return the size of data the DLTensor hold, in term of number of bytes * * \param arr the input DLTensor * \return number of bytes of data in the DLTensor. */ inline size_t GetDataSize(const DLTensor& arr) { size_t size = 1; for (tvm_index_t i = 0; i < arr.ndim; ++i) { size *= static_cast<size_t>(arr.shape[i]); } size *= (arr.dtype.bits * arr.dtype.lanes + 7) / 8; return size; } /*! * \brief check if a DLTensor is contiguous. * \param arr The input DLTensor. * \return The check result. */ static inline bool IsContiguous(const DLTensor& arr) { if (arr.strides == nullptr) return true; int64_t expected_stride = 1; for (int32_t i = arr.ndim; i != 0; --i) { int32_t k = i - 1; if (arr.shape[k] == 1) { // Skip stride check if shape[k] is 1, where the dimension is contiguous // regardless of the value of stride. // // For example, PyTorch will normalize stride to 1 if shape is 1 when exporting // to DLPack. // More context: https://github.com/pytorch/pytorch/pull/83158 continue; } if (arr.strides[k] != expected_stride) return false; expected_stride *= arr.shape[k]; } return true; } inline bool NDArray::IsContiguous() const { return ::tvm::runtime::IsContiguous(get_mutable()->dl_tensor); } inline void NDArray::CopyFrom(const DLTensor* other) { ICHECK(data_ != nullptr); CopyFromTo(other, &(get_mutable()->dl_tensor)); } inline void NDArray::CopyFrom(const NDArray& other) { ICHECK(data_ != nullptr); ICHECK(other.data_ != nullptr); CopyFromTo(&(other.get_mutable()->dl_tensor), &(get_mutable()->dl_tensor)); } inline void NDArray::CopyTo(DLTensor* other) const { ICHECK(data_ != nullptr); CopyFromTo(&(get_mutable()->dl_tensor), other); } inline void NDArray::CopyTo(const NDArray& other) const { ICHECK(data_ != nullptr); ICHECK(other.data_ != nullptr); CopyFromTo(&(get_mutable()->dl_tensor), &(other.get_mutable()->dl_tensor)); } inline NDArray NDArray::CopyTo(const Device& dev) const { ICHECK(data_ != nullptr); const DLTensor* dptr = operator->(); NDArray ret = Empty(ShapeTuple(dptr->shape, dptr->shape + dptr->ndim), dptr->dtype, dev); this->CopyTo(ret); return ret; } inline int NDArray::use_count() const { return data_.use_count(); } inline const DLTensor* NDArray::operator->() const { return &(get_mutable()->dl_tensor); } inline NDArray::Container* NDArray::get_mutable() const { return static_cast<NDArray::Container*>(data_.get()); } inline ObjectPtr<Object> NDArray::FFIDataFromHandle(TVMArrayHandle handle) { return GetObjectPtr<Object>( static_cast<NDArray::Container*>(reinterpret_cast<NDArray::ContainerBase*>(handle))); } inline TVMArrayHandle NDArray::FFIGetHandle(const ObjectRef& nd) { // NOTE: it is necessary to cast to container then to base // so that the FFI handle uses the ContainerBase address. auto ptr = reinterpret_cast<TVMArrayHandle>(static_cast<NDArray::ContainerBase*>( static_cast<NDArray::Container*>(const_cast<Object*>(nd.get())))); return ptr; } inline void NDArray::FFIDecRef(TVMArrayHandle handle) { static_cast<NDArray::Container*>(reinterpret_cast<NDArray::ContainerBase*>(handle))->DecRef(); } inline Object* TVMArrayHandleToObjectHandle(TVMArrayHandle handle) { return static_cast<NDArray::Container*>(reinterpret_cast<NDArray::ContainerBase*>(handle)); } /*! \brief Magic number for NDArray file */ constexpr uint64_t kTVMNDArrayMagic = 0xDD5E40F096B4A13F; inline bool SaveDLTensor(dmlc::Stream* strm, const DLTensor* tensor) { uint64_t header = kTVMNDArrayMagic, reserved = 0; strm->Write(header); strm->Write(reserved); // Always save data as CPU context // // Parameters that get serialized should be in CPU by default. // So even the array's context is GPU, it will be stored as CPU array. // This is used to prevent case when another user loads the parameters // back on machine that do not have GPU or related context. // // We can always do array.CopyTo(target_dev) to get a corresponding // array in the target context. Device cpu_dev; cpu_dev.device_type = kDLCPU; cpu_dev.device_id = 0; strm->Write(cpu_dev); strm->Write(tensor->ndim); strm->Write(tensor->dtype); int ndim = tensor->ndim; strm->WriteArray(tensor->shape, ndim); int type_bytes = (tensor->dtype.bits + 7) / 8; int64_t num_elems = 1; for (int i = 0; i < ndim; ++i) { num_elems *= tensor->shape[i]; } int64_t data_byte_size = type_bytes * num_elems; strm->Write(data_byte_size); if (DMLC_IO_NO_ENDIAN_SWAP && tensor->device.device_type == kDLCPU && tensor->strides == nullptr && tensor->byte_offset == 0) { // quick path strm->Write(tensor->data, data_byte_size); } else { std::vector<uint8_t> bytes(data_byte_size); ICHECK_EQ( TVMArrayCopyToBytes(const_cast<DLTensor*>(tensor), dmlc::BeginPtr(bytes), data_byte_size), 0) << TVMGetLastError(); if (!DMLC_IO_NO_ENDIAN_SWAP) { dmlc::ByteSwap(dmlc::BeginPtr(bytes), type_bytes, num_elems); } strm->Write(dmlc::BeginPtr(bytes), data_byte_size); } return true; } inline void NDArray::Save(dmlc::Stream* strm) const { SaveDLTensor(strm, operator->()); } inline bool NDArray::Load(dmlc::Stream* strm) { uint64_t header, reserved; ICHECK(strm->Read(&header)) << "Invalid DLTensor file format"; ICHECK(strm->Read(&reserved)) << "Invalid DLTensor file format"; ICHECK(header == kTVMNDArrayMagic) << "Invalid DLTensor file format"; Device dev; int ndim; DLDataType dtype; ICHECK(strm->Read(&dev)) << "Invalid DLTensor file format"; ICHECK(strm->Read(&ndim)) << "Invalid DLTensor file format"; ICHECK(strm->Read(&dtype)) << "Invalid DLTensor file format"; ICHECK_EQ(dev.device_type, kDLCPU) << "Invalid DLTensor device: can only save as CPU tensor"; std::vector<int64_t> shape(ndim); if (ndim != 0) { ICHECK(strm->ReadArray(&shape[0], ndim)) << "Invalid DLTensor file format"; } NDArray ret = NDArray::Empty(ShapeTuple(shape), dtype, dev); int64_t num_elems = 1; int elem_bytes = (ret->dtype.bits + 7) / 8; for (int i = 0; i < ret->ndim; ++i) { num_elems *= ret->shape[i]; } int64_t data_byte_size; ICHECK(strm->Read(&data_byte_size)) << "Invalid DLTensor file format"; ICHECK(data_byte_size == num_elems * elem_bytes) << "Invalid DLTensor file format"; auto read_ret = strm->Read(ret->data, data_byte_size); // Only check non-empty data if (ndim > 0 && shape[0] != 0) { ICHECK(read_ret) << "Invalid DLTensor file format"; } if (!DMLC_IO_NO_ENDIAN_SWAP) { dmlc::ByteSwap(ret->data, elem_bytes, num_elems); } *this = ret; return true; } } // namespace runtime } // namespace tvm namespace std { template <> struct hash<tvm::Device> { std::size_t operator()(const tvm::Device& dev) const { return ((dev.device_id << 8) | dev.device_type); } }; template <> struct equal_to<tvm::Device> { bool operator()(const tvm::Device& lhs, const tvm::Device& rhs) const { return (lhs.device_type == rhs.device_type && lhs.device_id == rhs.device_id); } }; } // namespace std #endif // TVM_RUNTIME_NDARRAY_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/object.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/object.h * \brief A managed object in the TVM runtime. */ #ifndef TVM_RUNTIME_OBJECT_H_ #define TVM_RUNTIME_OBJECT_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/logging.h> #include <string> #include <type_traits> #include <utility> /*! * \brief Whether or not use atomic reference counter. * If the reference counter is not atomic, * an object cannot be owned by multiple threads. * We can, however, move an object across threads */ #ifndef TVM_OBJECT_ATOMIC_REF_COUNTER #define TVM_OBJECT_ATOMIC_REF_COUNTER 1 #endif #if TVM_OBJECT_ATOMIC_REF_COUNTER #include <atomic> #endif // TVM_OBJECT_ATOMIC_REF_COUNTER namespace tvm { namespace runtime { /*! * \brief Namespace for the list of type index. * \note Use struct so that we have to use TypeIndex::ENumName to refer to * the constant, but still able to use enum. */ struct TypeIndex { enum { /*! \brief Root object type. */ kRoot = 0, // Standard static index assignments, // Frontends can take benefit of these constants. /*! \brief runtime::Module. */ kRuntimeModule = 1, /*! \brief runtime::NDArray. */ kRuntimeNDArray = 2, /*! \brief runtime::String. */ kRuntimeString = 3, /*! \brief runtime::Array. */ kRuntimeArray = 4, /*! \brief runtime::Map. */ kRuntimeMap = 5, /*! \brief runtime::ShapeTuple. */ kRuntimeShapeTuple = 6, /*! \brief runtime::PackedFunc. */ kRuntimePackedFunc = 7, // static assignments that may subject to change. kRuntimeClosure, kRuntimeADT, kStaticIndexEnd, /*! \brief Type index is allocated during runtime. */ kDynamic = kStaticIndexEnd }; }; // namespace TypeIndex /*! * \brief base class of all object containers. * * Sub-class of objects should declare the following static constexpr fields: * * - _type_index: * Static type index of the object, if assigned to TypeIndex::kDynamic * the type index will be assigned during runtime. * Runtime type index can be accessed by ObjectType::TypeIndex(); * - _type_key: * The unique string identifier of the type. * - _type_final: * Whether the type is terminal type(there is no subclass of the type in the object system). * This field is automatically set by macro TVM_DECLARE_FINAL_OBJECT_INFO * It is still OK to sub-class a terminal object type T and construct it using make_object. * But IsInstance check will only show that the object type is T(instead of the sub-class). * * The following two fields are necessary for base classes that can be sub-classed. * * - _type_child_slots: * Number of reserved type index slots for child classes. * Used for runtime optimization for type checking in IsInstance. * If an object's type_index is within range of [type_index, type_index + _type_child_slots] * Then the object can be quickly decided as sub-class of the current object class. * If not, a fallback mechanism is used to check the global type table. * Recommendation: set to estimate number of children needed. * - _type_child_slots_can_overflow: * Whether we can add additional child classes even if the number of child classes * exceeds the _type_child_slots. A fallback mechanism to check global type table will be * used. Recommendation: set to false for optimal runtime speed if we know exact number of children. * * Two macros are used to declare helper functions in the object: * - Use TVM_DECLARE_BASE_OBJECT_INFO for object classes that can be sub-classed. * - Use TVM_DECLARE_FINAL_OBJECT_INFO for object classes that cannot be sub-classed. * * New objects can be created using make_object function. * Which will automatically populate the type_index and deleter of the object. * * \sa make_object * \sa ObjectPtr * \sa ObjectRef * * \code * * // Create a base object * class BaseObj : public Object { * public: * // object fields * int field0; * * // object properties * static constexpr const uint32_t _type_index = TypeIndex::kDynamic; * static constexpr const char* _type_key = "test.BaseObj"; * TVM_DECLARE_BASE_OBJECT_INFO(BaseObj, Object); * }; * * class LeafObj : public BaseObj { * public: * // fields * int child_field0; * // object properties * static constexpr const uint32_t _type_index = TypeIndex::kDynamic; * static constexpr const char* _type_key = "test.LeafObj"; * TVM_DECLARE_BASE_OBJECT_INFO(LeafObj, Object); * }; * * // The following code should be put into a cc file. * TVM_REGISTER_OBJECT_TYPE(BaseObj); * TVM_REGISTER_OBJECT_TYPE(LeafObj); * * // Usage example. * void TestObjects() { * // create an object * ObjectRef leaf_ref(make_object<LeafObj>()); * // cast to a specific instance * const LeafObj* leaf_ptr = leaf_ref.as<LeafObj>(); * ICHECK(leaf_ptr != nullptr); * // can also cast to the base class. * ICHECK(leaf_ref.as<BaseObj>() != nullptr); * } * * \endcode */ class TVM_DLL Object { public: /*! * \brief Object deleter * \param self pointer to the Object. */ typedef void (*FDeleter)(Object* self); /*! \return The internal runtime type index of the object. */ uint32_t type_index() const { return type_index_; } /*! * \return the type key of the object. * \note this operation is expensive, can be used for error reporting. */ std::string GetTypeKey() const { return TypeIndex2Key(type_index_); } /*! * \return A hash value of the return of GetTypeKey. */ size_t GetTypeKeyHash() const { return TypeIndex2KeyHash(type_index_); } /*! * Check if the object is an instance of TargetType. * \tparam TargetType The target type to be checked. * \return Whether the target type is true. */ template <typename TargetType> inline bool IsInstance() const; /*! * \return Whether the cell has only one reference * \note We use stl style naming to be consistent with known API in shared_ptr. */ inline bool unique() const; /*! * \brief Get the type key of the corresponding index from runtime. * \param tindex The type index. * \return the result. */ static std::string TypeIndex2Key(uint32_t tindex); /*! * \brief Get the type key hash of the corresponding index from runtime. * \param tindex The type index. * \return the related key-hash. */ static size_t TypeIndex2KeyHash(uint32_t tindex); /*! * \brief Get the type index of the corresponding key from runtime. * \param key The type key. * \return the result. */ static uint32_t TypeKey2Index(const std::string& key); #if TVM_OBJECT_ATOMIC_REF_COUNTER using RefCounterType = std::atomic<int32_t>; #else using RefCounterType = int32_t; #endif static constexpr const char* _type_key = "runtime.Object"; static uint32_t _GetOrAllocRuntimeTypeIndex() { return TypeIndex::kRoot; } static uint32_t RuntimeTypeIndex() { return TypeIndex::kRoot; } // Default object type properties for sub-classes static constexpr bool _type_final = false; static constexpr uint32_t _type_child_slots = 0; static constexpr bool _type_child_slots_can_overflow = true; // member information static constexpr bool _type_has_method_visit_attrs = true; static constexpr bool _type_has_method_sequal_reduce = false; static constexpr bool _type_has_method_shash_reduce = false; // NOTE: the following field is not type index of Object // but was intended to be used by sub-classes as default value. // The type index of Object is TypeIndex::kRoot static constexpr uint32_t _type_index = TypeIndex::kDynamic; // Default constructor and copy constructor Object() {} // Override the copy and assign constructors to do nothing. // This is to make sure only contents, but not deleter and ref_counter // are copied when a child class copies itself. // This will enable us to use make_object<ObjectClass>(*obj_ptr) // to copy an existing object. Object(const Object& other) { // NOLINT(*) } Object(Object&& other) { // NOLINT(*) } Object& operator=(const Object& other) { // NOLINT(*) return *this; } Object& operator=(Object&& other) { // NOLINT(*) return *this; } protected: // The fields of the base object cell. /*! \brief Type index(tag) that indicates the type of the object. */ uint32_t type_index_{0}; /*! \brief The internal reference counter */ RefCounterType ref_counter_{0}; /*! * \brief deleter of this object to enable customized allocation. * If the deleter is nullptr, no deletion will be performed. * The creator of the object must always set the deleter field properly. */ FDeleter deleter_ = nullptr; // Invariant checks. static_assert(sizeof(int32_t) == sizeof(RefCounterType) && alignof(int32_t) == sizeof(RefCounterType), "RefCounter ABI check."); /*! * \brief Get the type index using type key. * * When the function is first time called for a type, * it will register the type to the type table in the runtime. * If the static_tindex is TypeIndex::kDynamic, the function will * allocate a runtime type index. * Otherwise, we will populate the type table and return the static index. * * \param key the type key. * \param static_tindex The current _type_index field. * can be TypeIndex::kDynamic. * \param parent_tindex The index of the parent. * \param type_child_slots Number of slots reserved for its children. * \param type_child_slots_can_overflow Whether to allow child to overflow the slots. * \return The allocated type index. */ static uint32_t GetOrAllocRuntimeTypeIndex(const std::string& key, uint32_t static_tindex, uint32_t parent_tindex, uint32_t type_child_slots, bool type_child_slots_can_overflow); // reference counter related operations /*! \brief developer function, increases reference counter. */ inline void IncRef(); /*! * \brief developer function, decrease reference counter. * \note The deleter will be called when ref_counter_ becomes zero. */ inline void DecRef(); private: /*! * \return The usage count of the cell. * \note We use stl style naming to be consistent with known API in shared_ptr. */ inline int use_count() const; /*! * \brief Check of this object is derived from the parent. * \param parent_tindex The parent type index. * \return The derivation results. */ bool DerivedFrom(uint32_t parent_tindex) const; // friend classes template <typename> friend class ObjAllocatorBase; template <typename> friend class ObjectPtr; friend class TVMRetValue; friend class ObjectInternal; }; /*! * \brief Get a reference type from a raw object ptr type * * It is always important to get a reference type * if we want to return a value as reference or keep * the object alive beyond the scope of the function. * * \param ptr The object pointer * \tparam RefType The reference type * \tparam ObjectType The object type * \return The corresponding RefType */ template <typename RelayRefType, typename ObjectType> inline RelayRefType GetRef(const ObjectType* ptr); /*! * \brief Downcast a base reference type to a more specific type. * * \param ref The input reference * \return The corresponding SubRef. * \tparam SubRef The target specific reference type. * \tparam BaseRef the current reference type. */ template <typename SubRef, typename BaseRef> inline SubRef Downcast(BaseRef ref); /*! * \brief A custom smart pointer for Object. * \tparam T the content data type. * \sa make_object */ template <typename T> class ObjectPtr { public: /*! \brief default constructor */ ObjectPtr() {} /*! \brief default constructor */ ObjectPtr(std::nullptr_t) {} // NOLINT(*) /*! * \brief copy constructor * \param other The value to be moved */ ObjectPtr(const ObjectPtr<T>& other) // NOLINT(*) : ObjectPtr(other.data_) {} /*! * \brief copy constructor * \param other The value to be moved */ template <typename U> ObjectPtr(const ObjectPtr<U>& other) // NOLINT(*) : ObjectPtr(other.data_) { static_assert(std::is_base_of<T, U>::value, "can only assign of child class ObjectPtr to parent"); } /*! * \brief move constructor * \param other The value to be moved */ ObjectPtr(ObjectPtr<T>&& other) // NOLINT(*) : data_(other.data_) { other.data_ = nullptr; } /*! * \brief move constructor * \param other The value to be moved */ template <typename Y> ObjectPtr(ObjectPtr<Y>&& other) // NOLINT(*) : data_(other.data_) { static_assert(std::is_base_of<T, Y>::value, "can only assign of child class ObjectPtr to parent"); other.data_ = nullptr; } /*! \brief destructor */ ~ObjectPtr() { this->reset(); } /*! * \brief Swap this array with another Object * \param other The other Object */ void swap(ObjectPtr<T>& other) { // NOLINT(*) std::swap(data_, other.data_); } /*! * \return Get the content of the pointer */ T* get() const { return static_cast<T*>(data_); } /*! * \return The pointer */ T* operator->() const { return get(); } /*! * \return The reference */ T& operator*() const { // NOLINT(*) return *get(); } /*! * \brief copy assignment * \param other The value to be assigned. * \return reference to self. */ ObjectPtr<T>& operator=(const ObjectPtr<T>& other) { // NOLINT(*) // takes in plane operator to enable copy elison. // copy-and-swap idiom ObjectPtr(other).swap(*this); // NOLINT(*) return *this; } /*! * \brief move assignment * \param other The value to be assigned. * \return reference to self. */ ObjectPtr<T>& operator=(ObjectPtr<T>&& other) { // NOLINT(*) // copy-and-swap idiom ObjectPtr(std::move(other)).swap(*this); // NOLINT(*) return *this; } /*! * \brief nullptr check * \return result of comparison of internal pointer with nullptr. */ explicit operator bool() const { return get() != nullptr; } /*! \brief reset the content of ptr to be nullptr */ void reset() { if (data_ != nullptr) { data_->DecRef(); data_ = nullptr; } } /*! \return The use count of the ptr, for debug purposes */ int use_count() const { return data_ != nullptr ? data_->use_count() : 0; } /*! \return whether the reference is unique */ bool unique() const { return data_ != nullptr && data_->use_count() == 1; } /*! \return Whether two ObjectPtr do not equal each other */ bool operator==(const ObjectPtr<T>& other) const { return data_ == other.data_; } /*! \return Whether two ObjectPtr equals each other */ bool operator!=(const ObjectPtr<T>& other) const { return data_ != other.data_; } /*! \return Whether the pointer is nullptr */ bool operator==(std::nullptr_t null) const { return data_ == nullptr; } /*! \return Whether the pointer is not nullptr */ bool operator!=(std::nullptr_t null) const { return data_ != nullptr; } private: /*! \brief internal pointer field */ Object* data_{nullptr}; /*! * \brief constructor from Object * \param data The data pointer */ explicit ObjectPtr(Object* data) : data_(data) { if (data != nullptr) { data_->IncRef(); } } /*! * \brief Move an ObjectPtr from an RValueRef argument. * \param ref The rvalue reference. * \return the moved result. */ static ObjectPtr<T> MoveFromRValueRefArg(Object** ref) { ObjectPtr<T> ptr; ptr.data_ = *ref; *ref = nullptr; return ptr; } // friend classes friend class Object; friend class ObjectRef; friend struct ObjectPtrHash; template <typename> friend class ObjectPtr; template <typename> friend class ObjAllocatorBase; friend class TVMPODValue_; friend class TVMArgsSetter; friend class TVMRetValue; friend class TVMArgValue; friend class TVMMovableArgValue_; template <typename RelayRefType, typename ObjType> friend RelayRefType GetRef(const ObjType* ptr); template <typename BaseType, typename ObjType> friend ObjectPtr<BaseType> GetObjectPtr(ObjType* ptr); }; /*! \brief Base class of all object reference */ class ObjectRef { public: /*! \brief default constructor */ ObjectRef() = default; /*! \brief Constructor from existing object ptr */ explicit ObjectRef(ObjectPtr<Object> data) : data_(data) {} /*! * \brief Comparator * \param other Another object ref. * \return the compare result. */ bool same_as(const ObjectRef& other) const { return data_ == other.data_; } /*! * \brief Comparator * \param other Another object ref. * \return the compare result. */ bool operator==(const ObjectRef& other) const { return data_ == other.data_; } /*! * \brief Comparator * \param other Another object ref. * \return the compare result. */ bool operator!=(const ObjectRef& other) const { return data_ != other.data_; } /*! * \brief Comparator * \param other Another object ref by address. * \return the compare result. */ bool operator<(const ObjectRef& other) const { return data_.get() < other.data_.get(); } /*! * \return whether the object is defined(not null). */ bool defined() const { return data_ != nullptr; } /*! \return the internal object pointer */ const Object* get() const { return data_.get(); } /*! \return the internal object pointer */ const Object* operator->() const { return get(); } /*! \return whether the reference is unique */ bool unique() const { return data_.unique(); } /*! \return The use count of the ptr, for debug purposes */ int use_count() const { return data_.use_count(); } /*! * \brief Try to downcast the internal Object to a * raw pointer of a corresponding type. * * The function will return a nullptr if the cast failed. * * if (const Add *add = node_ref.As<Add>()) { * // This is an add node * } * \tparam ObjectType the target type, must be a subtype of Object/ */ template <typename ObjectType> inline const ObjectType* as() const; /*! \brief type indicate the container type. */ using ContainerType = Object; // Default type properties for the reference class. static constexpr bool _type_is_nullable = true; protected: /*! \brief Internal pointer that backs the reference. */ ObjectPtr<Object> data_; /*! \return return a mutable internal ptr, can be used by sub-classes. */ Object* get_mutable() const { return data_.get(); } /*! * \brief Internal helper function downcast a ref without check. * \note Only used for internal dev purposes. * \tparam T The target reference type. * \return The casted result. */ template <typename T> static T DowncastNoCheck(ObjectRef ref) { return T(std::move(ref.data_)); } /*! * \brief Clear the object ref data field without DecRef * after we successfully moved the field. * \param ref The reference data. */ static void FFIClearAfterMove(ObjectRef* ref) { ref->data_.data_ = nullptr; } /*! * \brief Internal helper function get data_ as ObjectPtr of ObjectType. * \note only used for internal dev purpose. * \tparam ObjectType The corresponding object type. * \return the corresponding type. */ template <typename ObjectType> static ObjectPtr<ObjectType> GetDataPtr(const ObjectRef& ref) { return ObjectPtr<ObjectType>(ref.data_.data_); } // friend classes. friend struct ObjectPtrHash; friend class TVMRetValue; friend class TVMArgsSetter; friend class ObjectInternal; template <typename SubRef, typename BaseRef> friend SubRef Downcast(BaseRef ref); }; /*! * \brief Get an object ptr type from a raw object ptr. * * \param ptr The object pointer * \tparam BaseType The reference type * \tparam ObjectType The object type * \return The corresponding RefType */ template <typename BaseType, typename ObjectType> inline ObjectPtr<BaseType> GetObjectPtr(ObjectType* ptr); /*! \brief ObjectRef hash functor */ struct ObjectPtrHash { size_t operator()(const ObjectRef& a) const { return operator()(a.data_); } template <typename T> size_t operator()(const ObjectPtr<T>& a) const { return std::hash<Object*>()(a.get()); } }; /*! \brief ObjectRef equal functor */ struct ObjectPtrEqual { bool operator()(const ObjectRef& a, const ObjectRef& b) const { return a.same_as(b); } template <typename T> size_t operator()(const ObjectPtr<T>& a, const ObjectPtr<T>& b) const { return a == b; } }; /*! * \brief helper macro to declare a base object type that can be inherited. * \param TypeName The name of the current type. * \param ParentType The name of the ParentType */ #define TVM_DECLARE_BASE_OBJECT_INFO(TypeName, ParentType) \ static_assert(!ParentType::_type_final, "ParentObj marked as final"); \ static uint32_t RuntimeTypeIndex() { \ static_assert(TypeName::_type_child_slots == 0 || ParentType::_type_child_slots == 0 || \ TypeName::_type_child_slots < ParentType::_type_child_slots, \ "Need to set _type_child_slots when parent specifies it."); \ if (TypeName::_type_index != ::tvm::runtime::TypeIndex::kDynamic) { \ return TypeName::_type_index; \ } \ return _GetOrAllocRuntimeTypeIndex(); \ } \ static uint32_t _GetOrAllocRuntimeTypeIndex() { \ static uint32_t tindex = Object::GetOrAllocRuntimeTypeIndex( \ TypeName::_type_key, TypeName::_type_index, ParentType::_GetOrAllocRuntimeTypeIndex(), \ TypeName::_type_child_slots, TypeName::_type_child_slots_can_overflow); \ return tindex; \ } /*! * \brief helper macro to declare type information in a final class. * \param TypeName The name of the current type. * \param ParentType The name of the ParentType */ #define TVM_DECLARE_FINAL_OBJECT_INFO(TypeName, ParentType) \ static const constexpr bool _type_final = true; \ static const constexpr int _type_child_slots = 0; \ TVM_DECLARE_BASE_OBJECT_INFO(TypeName, ParentType) /*! \brief helper macro to suppress unused warning */ #if defined(__GNUC__) #define TVM_ATTRIBUTE_UNUSED __attribute__((unused)) #else #define TVM_ATTRIBUTE_UNUSED #endif #define TVM_STR_CONCAT_(__x, __y) __x##__y #define TVM_STR_CONCAT(__x, __y) TVM_STR_CONCAT_(__x, __y) #define TVM_OBJECT_REG_VAR_DEF static TVM_ATTRIBUTE_UNUSED uint32_t __make_Object_tid /*! * \brief Helper macro to register the object type to runtime. * Makes sure that the runtime type table is correctly populated. * * Use this macro in the cc file for each terminal class. */ #define TVM_REGISTER_OBJECT_TYPE(TypeName) \ TVM_STR_CONCAT(TVM_OBJECT_REG_VAR_DEF, __COUNTER__) = TypeName::_GetOrAllocRuntimeTypeIndex() /* * \brief Define the default copy/move constructor and assign operator * \param TypeName The class typename. */ #define TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(TypeName) \ TypeName(const TypeName& other) = default; \ TypeName(TypeName&& other) = default; \ TypeName& operator=(const TypeName& other) = default; \ TypeName& operator=(TypeName&& other) = default; /* * \brief Define object reference methods. * \param TypeName The object type name * \param ParentType The parent type of the objectref * \param ObjectName The type name of the object. */ #define TVM_DEFINE_OBJECT_REF_METHODS(TypeName, ParentType, ObjectName) \ TypeName() = default; \ explicit TypeName(::tvm::runtime::ObjectPtr<::tvm::runtime::Object> n) : ParentType(n) {} \ TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(TypeName); \ const ObjectName* operator->() const { return static_cast<const ObjectName*>(data_.get()); } \ const ObjectName* get() const { return operator->(); } \ using ContainerType = ObjectName; /* * \brief Define object reference methods that is not nullable. * * \param TypeName The object type name * \param ParentType The parent type of the objectref * \param ObjectName The type name of the object. */ #define TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(TypeName, ParentType, ObjectName) \ explicit TypeName(::tvm::runtime::ObjectPtr<::tvm::runtime::Object> n) : ParentType(n) {} \ TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(TypeName); \ const ObjectName* operator->() const { return static_cast<const ObjectName*>(data_.get()); } \ const ObjectName* get() const { return operator->(); } \ static constexpr bool _type_is_nullable = false; \ using ContainerType = ObjectName; /* * \brief Define object reference methods of whose content is mutable. * \param TypeName The object type name * \param ParentType The parent type of the objectref * \param ObjectName The type name of the object. * \note We recommend making objects immutable when possible. * This macro is only reserved for objects that stores runtime states. */ #define TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(TypeName, ParentType, ObjectName) \ TypeName() = default; \ TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(TypeName); \ explicit TypeName(::tvm::runtime::ObjectPtr<::tvm::runtime::Object> n) : ParentType(n) {} \ ObjectName* operator->() const { return static_cast<ObjectName*>(data_.get()); } \ using ContainerType = ObjectName; /* * \brief Define object reference methods that is both not nullable and mutable. * * \param TypeName The object type name * \param ParentType The parent type of the objectref * \param ObjectName The type name of the object. */ #define TVM_DEFINE_MUTABLE_NOTNULLABLE_OBJECT_REF_METHODS(TypeName, ParentType, ObjectName) \ explicit TypeName(::tvm::runtime::ObjectPtr<::tvm::runtime::Object> n) : ParentType(n) {} \ TVM_DEFINE_DEFAULT_COPY_MOVE_AND_ASSIGN(TypeName); \ ObjectName* operator->() const { return static_cast<ObjectName*>(data_.get()); } \ ObjectName* get() const { return operator->(); } \ static constexpr bool _type_is_nullable = false; \ using ContainerType = ObjectName; /*! * \brief Define CopyOnWrite function in an ObjectRef. * \param ObjectName The Type of the Node. * * CopyOnWrite will generate a unique copy of the internal node. * The node will be copied if it is referenced by multiple places. * The function returns the raw pointer to the node to allow modification * of the content. * * \code * * MyCOWObjectRef ref, ref2; * ref2 = ref; * ref.CopyOnWrite()->value = new_value; * assert(ref2->value == old_value); * assert(ref->value == new_value); * * \endcode */ #define TVM_DEFINE_OBJECT_REF_COW_METHOD(ObjectName) \ ObjectName* CopyOnWrite() { \ ICHECK(data_ != nullptr); \ if (!data_.unique()) { \ auto n = make_object<ObjectName>(*(operator->())); \ ObjectPtr<Object>(std::move(n)).swap(data_); \ } \ return static_cast<ObjectName*>(data_.get()); \ } // Implementations details below // Object reference counting. #if TVM_OBJECT_ATOMIC_REF_COUNTER inline void Object::IncRef() { ref_counter_.fetch_add(1, std::memory_order_relaxed); } inline void Object::DecRef() { if (ref_counter_.fetch_sub(1, std::memory_order_release) == 1) { std::atomic_thread_fence(std::memory_order_acquire); if (this->deleter_ != nullptr) { (*this->deleter_)(this); } } } inline int Object::use_count() const { return ref_counter_.load(std::memory_order_relaxed); } #else inline void Object::IncRef() { ++ref_counter_; } inline void Object::DecRef() { if (--ref_counter_ == 0) { if (this->deleter_ != nullptr) { (*this->deleter_)(this); } } } inline int Object::use_count() const { return ref_counter_; } #endif // TVM_OBJECT_ATOMIC_REF_COUNTER template <typename TargetType> inline bool Object::IsInstance() const { const Object* self = this; // NOTE: the following code can be optimized by // compiler dead-code elimination for already known constants. if (self != nullptr) { // Everything is a subclass of object. if (std::is_same<TargetType, Object>::value) return true; if (TargetType::_type_final) { // if the target type is a final type // then we only need to check the equivalence. return self->type_index_ == TargetType::RuntimeTypeIndex(); } else { // if target type is a non-leaf type // Check if type index falls into the range of reserved slots. uint32_t begin = TargetType::RuntimeTypeIndex(); // The condition will be optimized by constant-folding. if (TargetType::_type_child_slots != 0) { uint32_t end = begin + TargetType::_type_child_slots; if (self->type_index_ >= begin && self->type_index_ < end) return true; } else { if (self->type_index_ == begin) return true; } if (!TargetType::_type_child_slots_can_overflow) return false; // Invariance: parent index is always smaller than the child. if (self->type_index_ < TargetType::RuntimeTypeIndex()) return false; // The rare slower-path, check type hierarchy. return self->DerivedFrom(TargetType::RuntimeTypeIndex()); } } else { return false; } } inline bool Object::unique() const { return use_count() == 1; } template <typename ObjectType> inline const ObjectType* ObjectRef::as() const { if (data_ != nullptr && data_->IsInstance<ObjectType>()) { return static_cast<ObjectType*>(data_.get()); } else { return nullptr; } } template <typename RefType, typename ObjType> inline RefType GetRef(const ObjType* ptr) { static_assert(std::is_base_of<typename RefType::ContainerType, ObjType>::value, "Can only cast to the ref of same container type"); if (!RefType::_type_is_nullable) { ICHECK(ptr != nullptr); } return RefType(ObjectPtr<Object>(const_cast<Object*>(static_cast<const Object*>(ptr)))); } template <typename BaseType, typename ObjType> inline ObjectPtr<BaseType> GetObjectPtr(ObjType* ptr) { static_assert(std::is_base_of<BaseType, ObjType>::value, "Can only cast to the ref of same container type"); return ObjectPtr<BaseType>(static_cast<Object*>(ptr)); } template <typename SubRef, typename BaseRef> inline SubRef Downcast(BaseRef ref) { if (ref.defined()) { ICHECK(ref->template IsInstance<typename SubRef::ContainerType>()) << "Downcast from " << ref->GetTypeKey() << " to " << SubRef::ContainerType::_type_key << " failed."; } else { ICHECK(SubRef::_type_is_nullable) << "Downcast from nullptr to not nullable reference of " << SubRef::ContainerType::_type_key; } return SubRef(std::move(ref.data_)); } } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_OBJECT_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/packed_func.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/packed_func.h * \brief Type-erased function used across TVM API. */ #ifndef TVM_RUNTIME_PACKED_FUNC_H_ #define TVM_RUNTIME_PACKED_FUNC_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/container/array.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/data_type.h> #include <tvm/runtime/logging.h> #include <tvm/runtime/module.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <functional> #include <limits> #include <memory> #include <string> #include <tuple> #include <type_traits> #include <utility> #include <vector> // Whether use TVM runtime in header only mode. #ifndef TVM_RUNTIME_HEADER_ONLY #define TVM_RUNTIME_HEADER_ONLY 0 #endif namespace tvm { namespace runtime { // forward declarations class TVMArgs; class TVMArgValue; class TVMMovableArgValueWithContext_; class TVMRetValue; class TVMArgsSetter; template <typename FType> class TypedPackedFunc; template <typename TSignature> struct SignaturePrinter; /*! * \brief Object container class that backs PackedFunc. * \note Do not use this function directly, use PackedFunc. */ class PackedFuncObj : public Object { public: /*! * \brief Call the function in packed format. * \param args The arguments * \param rv The return value. */ TVM_ALWAYS_INLINE void CallPacked(TVMArgs args, TVMRetValue* rv) const; static constexpr const uint32_t _type_index = TypeIndex::kRuntimePackedFunc; static constexpr const char* _type_key = "runtime.PackedFunc"; TVM_DECLARE_FINAL_OBJECT_INFO(PackedFuncObj, Object); protected: /*! * \brief Internal struct for extracting the callable method from callable type. */ template <class TPackedFuncSubObj> struct Extractor { /*! * \brief Extracting the callable method from callable type. * \param obj The base packed function object class. * \param args The arguments * \param rv The return value. */ static void Call(const PackedFuncObj* obj, TVMArgs args, TVMRetValue* rv); }; /*! \brief The internal callable function type. */ using FCallPacked = void(const PackedFuncObj*, TVMArgs, TVMRetValue*); /*! * \brief Constructing a packed function object from a function pointer. * \param f_call_pack The function pointer used to call the packed function. */ explicit PackedFuncObj(FCallPacked* f_call_pack) : f_call_packed_(f_call_pack) {} /*! \brief Delete the default constructor explicitly. */ PackedFuncObj() = delete; /*! \brief Internal callable function pointer used to call the packed function. */ FCallPacked* f_call_packed_; }; /*! \brief Derived object class for constructing PackedFuncObj. */ template <class TCallable> class PackedFuncSubObj : public PackedFuncObj { using TStorage = typename std::remove_cv<typename std::remove_reference<TCallable>::type>::type; public: /*! \brief The type of derived object class */ using TSelf = PackedFuncSubObj<TCallable>; /*! * \brief Derived object class for constructing PackedFuncObj. * \param callable The type-erased callable object. */ explicit PackedFuncSubObj(TCallable callable) : PackedFuncObj(Extractor<TSelf>::Call), callable_(callable) {} /*! \brief Type-erased filed for storing callable object*/ mutable TStorage callable_; }; /*! * \brief Packed function is a type-erased function. * The arguments are passed by packed format. * * This is an useful unified interface to call generated functions, * It is the unified function function type of TVM. * It corresponds to TVMFunctionHandle in C runtime API. */ class PackedFunc : public ObjectRef { public: /*! \brief Constructor from null */ PackedFunc(std::nullptr_t null) : ObjectRef(nullptr) {} // NOLINT(*) /*! * \brief Constructing a packed function from a callable type * whose signature is consistent with `PackedFunc` * \param data the internal container of packed function. */ template <typename TCallable, typename = std::enable_if_t< std::is_convertible<TCallable, std::function<void(TVMArgs, TVMRetValue*)>>::value && !std::is_base_of<TCallable, PackedFunc>::value>> explicit PackedFunc(TCallable data) { using ObjType = PackedFuncSubObj<TCallable>; data_ = make_object<ObjType>(std::forward<TCallable>(data)); } /*! * \brief Call packed function by directly passing in unpacked format. * \param args Arguments to be passed. * \tparam Args arguments to be passed. * * \code * // Example code on how to call packed function * void CallPacked(PackedFunc f) { * // call like normal functions by pass in arguments * // return value is automatically converted back * int rvalue = f(1, 2.0); * } * \endcode */ template <typename... Args> inline TVMRetValue operator()(Args&&... args) const; /*! * \brief Call the function in packed format. * \param args The arguments * \param rv The return value. */ TVM_ALWAYS_INLINE void CallPacked(TVMArgs args, TVMRetValue* rv) const; /*! \return Whether the packed function is nullptr */ bool operator==(std::nullptr_t null) const { return data_ == nullptr; } /*! \return Whether the packed function is not nullptr */ bool operator!=(std::nullptr_t null) const { return data_ != nullptr; } TVM_DEFINE_OBJECT_REF_METHODS(PackedFunc, ObjectRef, PackedFuncObj); }; /*! \brief Using static function to output TypedPackedFunc signature */ using FSig = std::string(); /*! * \brief Please refer to \ref TypedPackedFuncAnchor "TypedPackedFunc<R(Args..)>" */ template <typename FType> class TypedPackedFunc; /*! * \anchor TypedPackedFuncAnchor * \brief A PackedFunc wrapper to provide typed function signature. * It is backed by a PackedFunc internally. * * TypedPackedFunc enables compile time type checking. * TypedPackedFunc works with the runtime system: * - It can be passed as an argument of PackedFunc. * - It can be assigned to TVMRetValue. * - It can be directly converted to a type-erased PackedFunc. * * Developers should prefer TypedPackedFunc over PackedFunc in C++ code * as it enables compile time checking. * We can construct a TypedPackedFunc from a lambda function * with the same signature. * * \code * // user defined lambda function. * auto addone = [](int x)->int { * return x + 1; * }; * // We can directly convert * // lambda function to TypedPackedFunc * TypedPackedFunc<int(int)> ftyped(addone); * // invoke the function. * int y = ftyped(1); * // Can be directly converted to PackedFunc * PackedFunc packed = ftype; * \endcode * \tparam R The return value of the function. * \tparam Args The argument signature of the function. */ template <typename R, typename... Args> class TypedPackedFunc<R(Args...)> { public: /*! \brief short hand for this function type */ using TSelf = TypedPackedFunc<R(Args...)>; /*! \brief default constructor */ TypedPackedFunc() {} /*! \brief constructor from null */ TypedPackedFunc(std::nullptr_t null) {} // NOLINT(*) /*! * \brief construct by wrap a PackedFunc * * Example usage: * \code * PackedFunc packed([](TVMArgs args, TVMRetValue *rv) { * int x = args[0]; * *rv = x + 1; * }); * // construct from packed function * TypedPackedFunc<int(int)> ftyped(packed); * // call the typed version. * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param packed The packed function */ inline TypedPackedFunc(PackedFunc packed); // NOLINT(*) /*! * \brief constructor from TVMRetValue * \param value The TVMRetValue */ inline TypedPackedFunc(const TVMRetValue& value); // NOLINT(*) /*! * \brief constructor from TVMArgValue * \param value The TVMArgValue */ inline TypedPackedFunc(const TVMArgValue& value); // NOLINT(*) /*! * \brief constructor from TVMMovableArgValue_ * \param value The TVMMovableArgValue_ */ inline TypedPackedFunc(TVMMovableArgValueWithContext_&& value); // NOLINT(*) /*! * \brief construct from a lambda function with the same signature. * * Example usage: * \code * auto typed_lambda = [](int x)->int { return x + 1; } * // construct from packed function * TypedPackedFunc<int(int)> ftyped(typed_lambda, "add_one"); * // call the typed version. * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param typed_lambda typed lambda function. * \param name the name of the lambda function. * \tparam FLambda the type of the lambda function. */ template <typename FLambda, typename = typename std::enable_if<std::is_convertible< FLambda, std::function<R(Args...)>>::value>::type> TypedPackedFunc(const FLambda& typed_lambda, std::string name) { // NOLINT(*) this->AssignTypedLambda(typed_lambda, name); } /*! * \brief construct from a lambda function with the same signature. * * This version does not take a name. It is highly recommend you use the * version that takes a name for the lambda. * * Example usage: * \code * auto typed_lambda = [](int x)->int { return x + 1; } * // construct from packed function * TypedPackedFunc<int(int)> ftyped(typed_lambda); * // call the typed version. * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param typed_lambda typed lambda function. * \tparam FLambda the type of the lambda function. */ template <typename FLambda, typename = typename std::enable_if<std::is_convertible< FLambda, std::function<R(Args...)>>::value>::type> TypedPackedFunc(const FLambda& typed_lambda) { // NOLINT(*) this->AssignTypedLambda(typed_lambda); } /*! * \brief copy assignment operator from typed lambda * * Example usage: * \code * // construct from packed function * TypedPackedFunc<int(int)> ftyped; * ftyped = [](int x) { return x + 1; } * // call the typed version. * ICHECK_EQ(ftyped(1), 2); * \endcode * * \param typed_lambda typed lambda function. * \tparam FLambda the type of the lambda function. * \returns reference to self. */ template <typename FLambda, typename = typename std::enable_if< std::is_convertible<FLambda, std::function<R(Args...)>>::value>::type> TSelf& operator=(FLambda typed_lambda) { // NOLINT(*) this->AssignTypedLambda(typed_lambda); return *this; } /*! * \brief copy assignment operator from PackedFunc. * \param packed The packed function. * \returns reference to self. */ TSelf& operator=(PackedFunc packed) { packed_ = packed; return *this; } /*! * \brief Invoke the operator. * \param args The arguments * \returns The return value. */ TVM_ALWAYS_INLINE R operator()(Args... args) const; /*! * \brief convert to PackedFunc * \return the internal PackedFunc */ operator PackedFunc() const { return packed(); } /*! * \return reference the internal PackedFunc */ const PackedFunc& packed() const { return packed_; } /*! \return Whether the packed function is nullptr */ bool operator==(std::nullptr_t null) const { return packed_ == nullptr; } /*! \return Whether the packed function is not nullptr */ bool operator!=(std::nullptr_t null) const { return packed_ != nullptr; } private: friend class TVMRetValue; /*! \brief The internal packed function */ PackedFunc packed_; /*! * \brief Assign the packed field using a typed lambda function. * * \param flambda The lambda function. * \param name The name associated with this lambda. * \tparam FLambda The lambda function type. * \note We capture the lambda when possible for maximum efficiency. */ template <typename FLambda> inline void AssignTypedLambda(FLambda flambda, std::string name); /*! * \brief Assign the packed field using a typed lambda function. This variant is for functions * without names. * * \param flambda The lambda function. * \tparam FLambda The lambda function type. * \note We capture the lambda when possible for maximum efficiency. */ template <typename FLambda> inline void AssignTypedLambda(FLambda flambda); }; /*! \brief Arguments into TVM functions. */ class TVMArgs { public: const TVMValue* values; const int* type_codes; int num_args; /*! * \brief constructor * \param values The argument values * \param type_codes The argument type codes * \param num_args number of arguments. */ TVMArgs(const TVMValue* values, const int* type_codes, int num_args) : values(values), type_codes(type_codes), num_args(num_args) {} /*! \return size of the arguments */ inline int size() const; /*! * \brief Get i-th argument * \param i the index. * \return the ith argument. */ inline TVMArgValue operator[](int i) const; }; /*! * \brief Convert argument type code to string. * \param type_code The input type code. * \return The corresponding string repr. */ inline const char* ArgTypeCode2Str(int type_code); // macro to check type code. #define TVM_CHECK_TYPE_CODE(CODE, T) \ ICHECK_EQ(CODE, T) << "expected " << ArgTypeCode2Str(T) << " but got " << ArgTypeCode2Str(CODE) /*! * \brief Type traits for runtime type check during FFI conversion. * \tparam T the type to be checked. */ template <typename T> struct ObjectTypeChecker { /*! * \brief Check if an object matches the template type and return the * mismatched type if it exists. * \param ptr The object to check the type of. * \return An Optional containing the actual type of the pointer if it does not match the * template type. If the Optional does not contain a value, then the types match. */ static Optional<String> CheckAndGetMismatch(const Object* ptr) { using ContainerType = typename T::ContainerType; if (ptr == nullptr) { if (T::_type_is_nullable) { return NullOpt; } else { return String("nullptr"); } } if (ptr->IsInstance<ContainerType>()) { return NullOpt; } else { return String(ptr->GetTypeKey()); } } /*! * \brief Check if an object matches the template type. * \param ptr The object to check the type of. * \return Whether or not the template type matches the objects type. */ static bool Check(const Object* ptr) { using ContainerType = typename T::ContainerType; if (ptr == nullptr) return T::_type_is_nullable; return ptr->IsInstance<ContainerType>(); } static std::string TypeName() { using ContainerType = typename T::ContainerType; return ContainerType::_type_key; } }; // Additional overloads for PackedFunc checking. template <typename T> struct ObjectTypeChecker<Array<T>> { static Optional<String> CheckAndGetMismatch(const Object* ptr) { if (ptr == nullptr) { return NullOpt; } if (!ptr->IsInstance<ArrayNode>()) { return String(ptr->GetTypeKey()); } const ArrayNode* n = static_cast<const ArrayNode*>(ptr); for (size_t i = 0; i < n->size(); i++) { const ObjectRef& p = (*n)[i]; Optional<String> check_subtype = ObjectTypeChecker<T>::CheckAndGetMismatch(p.get()); if (check_subtype.defined()) { return String("Array[index " + std::to_string(i) + ": " + check_subtype.value() + "]"); } } return NullOpt; } static bool Check(const Object* ptr) { if (ptr == nullptr) return true; if (!ptr->IsInstance<ArrayNode>()) return false; const ArrayNode* n = static_cast<const ArrayNode*>(ptr); for (const ObjectRef& p : *n) { if (!ObjectTypeChecker<T>::Check(p.get())) { return false; } } return true; } static std::string TypeName() { return "Array[" + ObjectTypeChecker<T>::TypeName() + "]"; } }; template <typename K, typename V> struct ObjectTypeChecker<Map<K, V>> { static Optional<String> CheckAndGetMismatch(const Object* ptr) { if (ptr == nullptr) return NullOpt; if (!ptr->IsInstance<MapNode>()) return String(ptr->GetTypeKey()); const MapNode* n = static_cast<const MapNode*>(ptr); for (const auto& kv : *n) { Optional<String> key_type = ObjectTypeChecker<K>::CheckAndGetMismatch(kv.first.get()); Optional<String> value_type = ObjectTypeChecker<K>::CheckAndGetMismatch(kv.first.get()); if (key_type.defined() || value_type.defined()) { std::string key_name = key_type.defined() ? std::string(key_type.value()) : ObjectTypeChecker<K>::TypeName(); std::string value_name = value_type.defined() ? std::string(value_type.value()) : ObjectTypeChecker<V>::TypeName(); return String("Map[" + key_name + ", " + value_name + "]"); } } return NullOpt; } static bool Check(const Object* ptr) { if (ptr == nullptr) return true; if (!ptr->IsInstance<MapNode>()) return false; const MapNode* n = static_cast<const MapNode*>(ptr); for (const auto& kv : *n) { if (!ObjectTypeChecker<K>::Check(kv.first.get())) return false; if (!ObjectTypeChecker<V>::Check(kv.second.get())) return false; } return true; } static std::string TypeName() { return "Map[" + ObjectTypeChecker<K>::TypeName() + ", " + ObjectTypeChecker<V>::TypeName() + ']'; } }; /*! * \brief Internal base class to * handle conversion to POD values. */ class TVMPODValue_ { public: operator double() const { // Allow automatic conversion from int to float // This avoids errors when user pass in int from // the frontend while the API expects a float. if (type_code_ == kDLInt) { return static_cast<double>(value_.v_int64); } TVM_CHECK_TYPE_CODE(type_code_, kDLFloat); return value_.v_float64; } operator int64_t() const { TVM_CHECK_TYPE_CODE(type_code_, kDLInt); return value_.v_int64; } operator uint64_t() const { TVM_CHECK_TYPE_CODE(type_code_, kDLInt); return value_.v_int64; } operator int() const { TVM_CHECK_TYPE_CODE(type_code_, kDLInt); ICHECK_LE(value_.v_int64, std::numeric_limits<int>::max()); ICHECK_GE(value_.v_int64, std::numeric_limits<int>::min()); return static_cast<int>(value_.v_int64); } operator bool() const { TVM_CHECK_TYPE_CODE(type_code_, kDLInt); return value_.v_int64 != 0; } operator void*() const { if (type_code_ == kTVMNullptr) return nullptr; if (type_code_ == kTVMDLTensorHandle) return value_.v_handle; TVM_CHECK_TYPE_CODE(type_code_, kTVMOpaqueHandle); return value_.v_handle; } operator DLTensor*() const { if (type_code_ == kTVMDLTensorHandle || type_code_ == kTVMNDArrayHandle) { return static_cast<DLTensor*>(value_.v_handle); } else { if (type_code_ == kTVMNullptr) return nullptr; LOG(FATAL) << "Expected " << "DLTensor* or NDArray but got " << ArgTypeCode2Str(type_code_); return nullptr; } } operator NDArray() const { if (type_code_ == kTVMNullptr) return NDArray(ObjectPtr<Object>(nullptr)); TVM_CHECK_TYPE_CODE(type_code_, kTVMNDArrayHandle); return NDArray(NDArray::FFIDataFromHandle(static_cast<TVMArrayHandle>(value_.v_handle))); } operator Module() const { if (type_code_ == kTVMNullptr) { return Module(ObjectPtr<Object>(nullptr)); } TVM_CHECK_TYPE_CODE(type_code_, kTVMModuleHandle); return Module(ObjectPtr<Object>(static_cast<Object*>(value_.v_handle))); } operator PackedFunc() const { if (type_code_ == kTVMNullptr) { return PackedFunc(ObjectPtr<Object>(nullptr)); } TVM_CHECK_TYPE_CODE(type_code_, kTVMPackedFuncHandle); return PackedFunc(ObjectPtr<Object>(static_cast<Object*>(value_.v_handle))); } operator Device() const { TVM_CHECK_TYPE_CODE(type_code_, kDLDevice); return value_.v_device; } int type_code() const { return type_code_; } /*! * \brief return handle as specific pointer type. * \tparam T the data type. * \return The pointer type. */ template <typename T> T* ptr() const { return static_cast<T*>(value_.v_handle); } // ObjectRef handling template <typename TObjectRef, typename = typename std::enable_if<std::is_base_of<ObjectRef, TObjectRef>::value>::type> inline bool IsObjectRef() const; template <typename TObjectRef> inline TObjectRef AsObjectRef() const; protected: friend class TVMArgsSetter; friend class TVMRetValue; friend class TVMMovableArgValue_; TVMPODValue_() : type_code_(kTVMNullptr) {} TVMPODValue_(TVMValue value, int type_code) : value_(value), type_code_(type_code) {} /*! \brief The value */ TVMValue value_; /*! \brief the type code */ int type_code_; }; /*! * \brief A single argument value to PackedFunc. * Containing both type_code and TVMValue * * Provides utilities to do type cast into other types. */ class TVMArgValue : public TVMPODValue_ { public: /*! \brief default constructor */ TVMArgValue() {} /*! * \brief constructor * \param value of the function * \param type_code The type code. */ TVMArgValue(TVMValue value, int type_code) : TVMPODValue_(value, type_code) {} // reuse converter from parent using TVMPODValue_::operator double; using TVMPODValue_::operator int64_t; using TVMPODValue_::operator uint64_t; using TVMPODValue_::operator int; using TVMPODValue_::operator bool; using TVMPODValue_::operator void*; using TVMPODValue_::operator DLTensor*; using TVMPODValue_::operator NDArray; using TVMPODValue_::operator Device; using TVMPODValue_::operator Module; using TVMPODValue_::operator PackedFunc; using TVMPODValue_::AsObjectRef; using TVMPODValue_::IsObjectRef; // conversion operator. operator std::string() const { if (type_code_ == kTVMDataType) { return DLDataType2String(operator DLDataType()); } else if (type_code_ == kTVMBytes) { TVMByteArray* arr = static_cast<TVMByteArray*>(value_.v_handle); return std::string(arr->data, arr->size); } else if (type_code_ == kTVMStr) { return std::string(value_.v_str); } else { ICHECK(IsObjectRef<tvm::runtime::String>()) << "Could not convert TVM object of type " << runtime::Object::TypeIndex2Key(type_code_) << " to a string."; return AsObjectRef<tvm::runtime::String>().operator std::string(); } } template <typename FType> operator TypedPackedFunc<FType>() const { return TypedPackedFunc<FType>(operator PackedFunc()); } const TVMValue& value() const { return value_; } template <typename T, typename = typename std::enable_if<std::is_class<T>::value>::type> inline operator T() const; inline operator DLDataType() const; inline operator DataType() const; }; /*! * \brief Internal auxiliary struct for TypedPackedFunc to indicate a movable argument. * * We can only construct a movable argument once from a single argument position. * If the argument is passed as RValue reference, the result will be moved. * We should only construct a MovableArg from an argument once, * as the result will can moved. * * \note For internal development purpose only. */ class TVMMovableArgValue_ : public TVMPODValue_ { public: TVMMovableArgValue_(TVMValue value, int type_code) : TVMPODValue_(value, type_code) {} // reuse converter from parent using TVMPODValue_::operator double; using TVMPODValue_::operator int64_t; using TVMPODValue_::operator uint64_t; using TVMPODValue_::operator int; using TVMPODValue_::operator bool; using TVMPODValue_::operator void*; using TVMPODValue_::operator DLTensor*; using TVMPODValue_::operator NDArray; using TVMPODValue_::operator Device; using TVMPODValue_::operator Module; using TVMPODValue_::operator PackedFunc; // reuse conversion rule from ArgValue. operator std::string() const { return AsArgValue().operator std::string(); } template <typename FType> operator TypedPackedFunc<FType>() const { return TypedPackedFunc<FType>(operator PackedFunc()); } operator DLDataType() const { return AsArgValue().operator DLDataType(); } operator DataType() const { return AsArgValue().operator DataType(); } operator TVMArgValue() const { return AsArgValue(); } /*! * \brief Helper converter function. * Try to move out an argument if possible, * fall back to normal argument conversion rule otherwise. */ template <typename T, typename = typename std::enable_if<std::is_base_of<ObjectRef, T>::value>::type> inline operator T() const; private: /*! \return The arg value repr of the value. */ TVMArgValue AsArgValue() const { return TVMArgValue(value_, type_code_); } }; /*! * \brief Internal auxiliary struct for TypedPackedFunc to indicate a movable argument with * additional context information (function name and argument index) for better error reporting. * * \sa MovableArgValue_ * \note For internal development purpose only. */ class TVMMovableArgValueWithContext_ { public: /*! * \brief move constructor from another return value. * \param value The other return value. * \param type_code The code associated with the type of the value. * \param arg_index In a function call, this argument is at index arg_index (0-indexed). * \param optional_name Name of the function being called. Can be nullptr if the function is not. * \param f_sig Pointer to static function outputting signature of the function being called. * named. */ TVMMovableArgValueWithContext_(TVMValue value, int type_code, int arg_index, const std::string* optional_name, FSig* f_sig) : value_(value, type_code), arg_index_(arg_index), optional_name_(optional_name), f_sig_(f_sig) {} template <typename T> operator T() const { try { return value_; // implicit conversion happens here } catch (dmlc::Error& e) { LOG(FATAL) << "In function " << (optional_name_ == nullptr ? "<anonymous>" : *optional_name_) << (f_sig_ == nullptr ? "" : (*f_sig_)()) << ": error while converting argument " << arg_index_ << ": " << e.what(); throw; // never reached, LOG(FATAL) throws, but this silences a warning. } } private: TVMMovableArgValue_ value_; int arg_index_; const std::string* optional_name_; FSig* f_sig_; }; /*! * \brief Return Value container, * Unlike TVMArgValue, which only holds reference and do not delete * the underlying container during destruction. * * TVMRetValue holds value and will manage the underlying containers * when it stores a complicated data type. */ class TVMRetValue : public TVMPODValue_ { public: /*! \brief default constructor */ TVMRetValue() {} /*! * \brief move constructor from another return value. * \param other The other return value. */ TVMRetValue(TVMRetValue&& other) : TVMPODValue_(other.value_, other.type_code_) { other.value_.v_handle = nullptr; other.type_code_ = kTVMNullptr; } /*! \brief destructor */ ~TVMRetValue() { this->Clear(); } // reuse converter from parent using TVMPODValue_::operator double; using TVMPODValue_::operator int64_t; using TVMPODValue_::operator uint64_t; using TVMPODValue_::operator int; using TVMPODValue_::operator bool; using TVMPODValue_::operator void*; using TVMPODValue_::operator DLTensor*; using TVMPODValue_::operator Device; using TVMPODValue_::operator NDArray; using TVMPODValue_::operator Module; using TVMPODValue_::operator PackedFunc; using TVMPODValue_::AsObjectRef; using TVMPODValue_::IsObjectRef; TVMRetValue(const TVMRetValue& other) : TVMPODValue_() { this->Assign(other); } // conversion operators operator std::string() const { if (type_code_ == kTVMDataType) { return DLDataType2String(operator DLDataType()); } else if (type_code_ == kTVMBytes) { return *ptr<std::string>(); } TVM_CHECK_TYPE_CODE(type_code_, kTVMStr); return *ptr<std::string>(); } operator DLDataType() const { if (type_code_ == kTVMStr) { return String2DLDataType(operator std::string()); } TVM_CHECK_TYPE_CODE(type_code_, kTVMDataType); return value_.v_type; } operator DataType() const { return DataType(operator DLDataType()); } template <typename FType> operator TypedPackedFunc<FType>() const { return TypedPackedFunc<FType>(operator PackedFunc()); } // Assign operators TVMRetValue& operator=(TVMRetValue&& other) { this->Clear(); value_ = other.value_; type_code_ = other.type_code_; other.type_code_ = kTVMNullptr; return *this; } TVMRetValue& operator=(double value) { this->SwitchToPOD(kDLFloat); value_.v_float64 = value; return *this; } TVMRetValue& operator=(std::nullptr_t value) { this->SwitchToPOD(kTVMNullptr); value_.v_handle = value; return *this; } TVMRetValue& operator=(void* value) { this->SwitchToPOD(kTVMOpaqueHandle); value_.v_handle = value; return *this; } TVMRetValue& operator=(int64_t value) { this->SwitchToPOD(kDLInt); value_.v_int64 = value; return *this; } TVMRetValue& operator=(int value) { this->SwitchToPOD(kDLInt); value_.v_int64 = value; return *this; } TVMRetValue& operator=(DLDevice value) { this->SwitchToPOD(kDLDevice); value_.v_device = value; return *this; } TVMRetValue& operator=(DLDataType t) { this->SwitchToPOD(kTVMDataType); value_.v_type = t; return *this; } TVMRetValue& operator=(const DataType& other) { return operator=(other.operator DLDataType()); } TVMRetValue& operator=(bool value) { this->SwitchToPOD(kDLInt); value_.v_int64 = value; return *this; } TVMRetValue& operator=(std::string value) { this->SwitchToClass(kTVMStr, value); return *this; } TVMRetValue& operator=(TVMByteArray value) { this->SwitchToClass(kTVMBytes, std::string(value.data, value.size)); return *this; } TVMRetValue& operator=(NDArray other) { if (other.data_ != nullptr) { this->Clear(); type_code_ = kTVMNDArrayHandle; value_.v_handle = NDArray::FFIGetHandle(other); ObjectRef::FFIClearAfterMove(&other); } else { SwitchToPOD(kTVMNullptr); value_.v_handle = nullptr; } return *this; } TVMRetValue& operator=(Module m) { SwitchToObject(kTVMModuleHandle, std::move(m.data_)); return *this; } TVMRetValue& operator=(PackedFunc f) { this->SwitchToObject(kTVMPackedFuncHandle, std::move(f.data_)); return *this; } template <typename FType> TVMRetValue& operator=(const TypedPackedFunc<FType>& f) { return operator=(f.packed()); } TVMRetValue& operator=(const TVMRetValue& other) { // NOLINT(*0 this->Assign(other); return *this; } TVMRetValue& operator=(const TVMArgValue& other) { this->Assign(other); return *this; } TVMRetValue& operator=(TVMMovableArgValue_&& other) { this->Assign(other); return *this; } /*! * \brief Move the value back to front-end via C API. * This marks the current container as null. * The managed resources are moved to the front-end. * The front end should take charge in managing them. * * \param ret_value The return value. * \param ret_type_code The return type code. */ void MoveToCHost(TVMValue* ret_value, int* ret_type_code) { // cannot move str; need specially handle. ICHECK(type_code_ != kTVMStr && type_code_ != kTVMBytes); *ret_value = value_; *ret_type_code = type_code_; type_code_ = kTVMNullptr; } /*! * \brief Construct a new TVMRetValue by * moving from return value stored via C API. * \param value the value. * \param type_code The type code. * \return The created TVMRetValue. */ static TVMRetValue MoveFromCHost(TVMValue value, int type_code) { // Can move POD and everything under the object system. ICHECK(type_code <= kTVMPackedFuncHandle || type_code == kTVMNDArrayHandle); TVMRetValue ret; ret.value_ = value; ret.type_code_ = type_code; return ret; } /*! \return The value field, if the data is POD */ const TVMValue& value() const { ICHECK(type_code_ != kTVMObjectHandle && type_code_ != kTVMPackedFuncHandle && type_code_ != kTVMModuleHandle && type_code_ != kTVMStr) << "TVMRetValue.value can only be used for POD data"; return value_; } // ObjectRef handling template <typename TObjectRef, typename = typename std::enable_if<std::is_base_of<ObjectRef, TObjectRef>::value>::type> inline TVMRetValue& operator=(TObjectRef other); template <typename T, typename = typename std::enable_if<std::is_class<T>::value>::type> inline operator T() const; private: template <typename T> void Assign(const T& other) { switch (other.type_code()) { case kTVMStr: { SwitchToClass<std::string>(kTVMStr, other); break; } case kTVMBytes: { SwitchToClass<std::string>(kTVMBytes, other); break; } case kTVMPackedFuncHandle: { *this = other.operator PackedFunc(); break; } case kTVMModuleHandle: { *this = other.operator Module(); break; } case kTVMNDArrayHandle: { *this = other.operator NDArray(); break; } case kTVMObjectHandle: { // Avoid operator ObjectRef as we already know it is not NDArray/Module SwitchToObject(kTVMObjectHandle, GetObjectPtr<Object>(static_cast<Object*>(other.value_.v_handle))); break; } case kTVMObjectRValueRefArg: { operator=(other.operator ObjectRef()); break; } default: { SwitchToPOD(other.type_code()); value_ = other.value_; break; } } } // get the internal container. void SwitchToPOD(int type_code) { if (type_code_ != type_code) { this->Clear(); type_code_ = type_code; } } template <typename T> void SwitchToClass(int type_code, T v) { if (type_code_ != type_code) { this->Clear(); type_code_ = type_code; value_.v_handle = new T(v); } else { *static_cast<T*>(value_.v_handle) = v; } } void SwitchToObject(int type_code, ObjectPtr<Object> other) { if (other.data_ != nullptr) { this->Clear(); type_code_ = type_code; // move the handle out value_.v_handle = other.data_; other.data_ = nullptr; } else { SwitchToPOD(kTVMNullptr); value_.v_handle = nullptr; } } void Clear() { if (type_code_ == kTVMNullptr) return; switch (type_code_) { case kTVMStr: case kTVMBytes: delete ptr<std::string>(); break; case kTVMPackedFuncHandle: static_cast<Object*>(value_.v_handle)->DecRef(); break; case kTVMNDArrayHandle: { NDArray::FFIDecRef(static_cast<TVMArrayHandle>(value_.v_handle)); break; } case kTVMModuleHandle: { static_cast<Object*>(value_.v_handle)->DecRef(); break; } case kTVMObjectHandle: { static_cast<Object*>(value_.v_handle)->DecRef(); break; } } type_code_ = kTVMNullptr; } }; /*! * \brief Type trait to specify special value conversion rules from * TVMArgValue and TVMRetValue. * * The trait can be specialized to add type specific conversion logic * from the TVMArgvalue and TVMRetValue. * * \tparam TObjectRef the specific ObjectRefType. */ template <typename TObjectRef> struct PackedFuncValueConverter { /*! * \brief Convert a TObjectRef from an argument value. * \param val The argument value. * \return the converted result. */ static TObjectRef From(const TVMArgValue& val) { return val.AsObjectRef<TObjectRef>(); } /*! * \brief Convert a TObjectRef from a return value. * \param val The argument value. * \return the converted result. */ static TObjectRef From(const TVMRetValue& val) { return val.AsObjectRef<TObjectRef>(); } }; /*! * \brief Export a function with the PackedFunc signature * as a PackedFunc that can be loaded by LibraryModule. * * \param ExportName The symbol name to be exported. * \param Function The function with PackedFunc signature. * \sa PackedFunc * * \code * * void AddOne_(TVMArgs args, TVMRetValue* rv) { * int value = args[0]; * *rv = value + 1; * } * // Expose the function as "AddOne" * TVM_DLL_EXPORT_PACKED_FUNC(AddOne, AddOne_); * * \endcode */ #define TVM_DLL_EXPORT_PACKED_FUNC(ExportName, Function) \ extern "C" { \ TVM_DLL int ExportName(TVMValue* args, int* type_code, int num_args, TVMValue* out_value, \ int* out_type_code, void* resource_handle); \ int ExportName(TVMValue* args, int* type_code, int num_args, TVMValue* out_value, \ int* out_type_code, void* resource_handle) { \ try { \ ::tvm::runtime::TVMRetValue rv; \ Function(::tvm::runtime::TVMArgs(args, type_code, num_args), &rv); \ rv.MoveToCHost(out_value, out_type_code); \ return 0; \ } catch (const ::std::exception& _except_) { \ TVMAPISetLastError(_except_.what()); \ return -1; \ } \ } \ } /*! * \brief Export typed function as a PackedFunc * that can be loaded by LibraryModule. * * \param ExportName The symbol name to be exported. * \param Function The typed function. * \note ExportName and Function must be different, * see code examples below. * * \sa TypedPackedFunc * * \code * * int AddOne_(int x) { * return x + 1; * } * * // Expose the function as "AddOne" * TVM_DLL_EXPORT_TYPED_FUNC(AddOne, AddOne_); * * // Expose the function as "SubOne" * TVM_DLL_EXPORT_TYPED_FUNC(SubOne, [](int x) { * return x - 1; * }); * * // The following code will cause compilation error. * // Because the same Function and ExportName * // TVM_DLL_EXPORT_TYPED_FUNC(AddOne_, AddOne_); * * // The following code is OK, assuming the macro * // is in a different namespace from xyz * // TVM_DLL_EXPORT_TYPED_FUNC(AddOne_, xyz::AddOne_); * * \endcode */ #define TVM_DLL_EXPORT_TYPED_FUNC(ExportName, Function) \ extern "C" { \ TVM_DLL int ExportName(TVMValue* args, int* type_code, int num_args, TVMValue* out_value, \ int* out_type_code, void* resource_handle) { \ try { \ auto f = Function; \ using FType = ::tvm::runtime::detail::function_signature<decltype(f)>::FType; \ ::tvm::runtime::TVMRetValue rv; \ ::tvm::runtime::detail::unpack_call_by_signature<FType>::run( \ f, ::tvm::runtime::TVMArgs(args, type_code, num_args), &rv); \ rv.MoveToCHost(out_value, out_type_code); \ return 0; \ } catch (const ::std::exception& _except_) { \ TVMAPISetLastError(_except_.what()); \ return -1; \ } \ } \ } inline TVMArgValue TVMArgs::operator[](int i) const { ICHECK_LT(i, num_args) << "not enough argument passed, " << num_args << " passed" << " but request arg[" << i << "]."; return TVMArgValue(values[i], type_codes[i]); } inline int TVMArgs::size() const { return num_args; } template <class TPackedFuncSubObj> void PackedFuncObj::Extractor<TPackedFuncSubObj>::Call(const PackedFuncObj* obj, TVMArgs args, TVMRetValue* rv) { (static_cast<const TPackedFuncSubObj*>(obj))->callable_(args, rv); } TVM_ALWAYS_INLINE void PackedFuncObj::CallPacked(TVMArgs args, TVMRetValue* rv) const { (*f_call_packed_)(this, args, rv); } TVM_ALWAYS_INLINE void PackedFunc::CallPacked(TVMArgs args, TVMRetValue* rv) const { (static_cast<PackedFuncObj*>(data_.get()))->CallPacked(args, rv); } // internal namespace inline const char* ArgTypeCode2Str(int type_code) { switch (type_code) { case kDLInt: return "int"; case kDLUInt: return "uint"; case kDLFloat: return "float"; case kTVMStr: return "str"; case kTVMBytes: return "bytes"; case kTVMOpaqueHandle: return "handle"; case kTVMNullptr: return "NULL"; case kTVMDLTensorHandle: return "ArrayHandle"; case kTVMDataType: return "DLDataType"; case kDLDevice: return "DLDevice"; case kTVMPackedFuncHandle: return "FunctionHandle"; case kTVMModuleHandle: return "ModuleHandle"; case kTVMNDArrayHandle: return "NDArrayContainer"; case kTVMObjectHandle: return "Object"; case kTVMObjectRValueRefArg: return "ObjectRValueRefArg"; default: LOG(FATAL) << "unknown type_code=" << static_cast<int>(type_code); return ""; } } namespace detail { template <bool stop, std::size_t I, typename F> struct for_each_dispatcher { template <typename T, typename... Args> static void run(const F& f, T&& value, Args&&... args) { // NOLINT(*) f(I, std::forward<T>(value)); for_each_dispatcher<sizeof...(Args) == 0, (I + 1), F>::run(f, std::forward<Args>(args)...); } }; template <std::size_t I, typename F> struct for_each_dispatcher<true, I, F> { static void run(const F& f) {} // NOLINT(*) }; template <typename F, typename... Args> inline void for_each(const F& f, Args&&... args) { // NOLINT(*) for_each_dispatcher<sizeof...(Args) == 0, 0, F>::run(f, std::forward<Args>(args)...); } namespace parameter_pack { template <typename... EnumArgs> struct EnumeratedParamPack { struct Invoke { template <template <size_t i, typename TArgument> class Functor, typename... ExtraParams> static void F(ExtraParams&&... extra_params) { using TExpander = int[]; (void)TExpander{ 0, (Functor<EnumArgs::i, typename EnumArgs::T>::F(extra_params...), 0)..., }; } }; }; template <typename... Args> struct EnumerateImpl { private: template <size_t _i, typename _T> struct Item { static const constexpr size_t i = _i; using T = _T; }; template <typename...> struct Zipper; template <std::size_t... id> struct Zipper<std::integer_sequence<std::size_t, id...>> { using T = EnumeratedParamPack<Item<id, Args>...>; }; public: using T = typename Zipper<std::index_sequence_for<Args...>>::T; }; template <typename... Args> using Enumerate = typename EnumerateImpl<Args...>::T; template <typename... Args> struct ParamPack { template <template <size_t i, typename TArgument> class Functor, typename... ExtraParams> static void InvokeWithoutArg(ExtraParams&&... extra_params) { Enumerate<Args...>::Invoke::template F<Functor, ExtraParams...>( std::forward<ExtraParams>(extra_params)...); } }; } // namespace parameter_pack /*! * \brief Template class to get function signature of a function or functor. * \tparam T The function/functor type. */ template <typename T> struct func_signature_helper { using FType = void; }; template <typename T, typename R, typename... Args> struct func_signature_helper<R (T::*)(Args...)> { using FType = R(Args...); using ParamType = parameter_pack::ParamPack<Args...>; using RetType = R; static_assert(!std::is_reference<R>::value, "TypedPackedFunc return reference"); }; template <typename T, typename R, typename... Args> struct func_signature_helper<R (T::*)(Args...) const> { using FType = R(Args...); using ParamType = parameter_pack::ParamPack<Args...>; using RetType = R; static_assert(!std::is_reference<R>::value, "TypedPackedFunc return reference"); }; /*! * \brief Template class to get function signature of a function or functor. * \tparam T The function/functor type. */ template <typename T> struct function_signature { using FType = typename func_signature_helper<decltype(&T::operator())>::FType; using ParamType = typename func_signature_helper<decltype(&T::operator())>::ParamType; using RetType = typename func_signature_helper<decltype(&T::operator())>::RetType; }; // handle case of function. template <typename R, typename... Args> struct function_signature<R(Args...)> { using FType = R(Args...); using ParamType = parameter_pack::ParamPack<Args...>; using RetType = R; static_assert(!std::is_reference<R>::value, "TypedPackedFunc return reference"); }; // handle case of function ptr. template <typename R, typename... Args> struct function_signature<R (*)(Args...)> { using FType = R(Args...); using ParamType = detail::parameter_pack::ParamPack<Args...>; using RetType = R; static_assert(!std::is_reference<R>::value, "TypedPackedFunc return reference"); }; template <typename TSignature> struct SignaturePrinter; namespace type2str { template <typename T> struct TypeSimplifier; template <typename T> struct Type2Str { template <typename = std::enable_if_t<std::is_base_of<ObjectRef, T>::value>> static std::string v() { return T::ContainerType::_type_key; } }; template <> struct Type2Str<int> { static std::string v() { return "int"; } }; template <> struct Type2Str<double> { static std::string v() { return "double"; } }; template <> struct Type2Str<int64_t> { static std::string v() { return "int64_t"; } }; template <> struct Type2Str<uint64_t> { static std::string v() { return "uint64_t"; } }; template <> struct Type2Str<bool> { static std::string v() { return "bool"; } }; template <> struct Type2Str<void> { static std::string v() { return "void"; } }; template <> struct Type2Str<std::basic_string<char>> { static std::string v() { return "basic_string<char>"; } }; template <typename K, typename V> struct Type2Str<Map<K, V>> { static std::string v() { return "Map<" + TypeSimplifier<K>::v() + ", " + TypeSimplifier<V>::v() + ">"; } }; template <> struct Type2Str<DLDevice> { static std::string v() { return "DLDevice"; } }; template <> struct Type2Str<DLTensor> { static std::string v() { return "DLTensor"; } }; template <> struct Type2Str<DataType> { static std::string v() { return "DataType"; } }; template <> struct Type2Str<DLDataType> { static std::string v() { return "DLDataType"; } }; template <> struct Type2Str<TVMRetValue> { static std::string v() { return "TVMRetValue"; } }; template <> struct Type2Str<TVMArgValue> { static std::string v() { return "TVMArgValue"; } }; template <typename FType> struct Type2Str<TypedPackedFunc<FType>> { static std::string v() { return SignaturePrinter<function_signature<FType>>::F(); } }; template <typename T> struct Type2Str<Array<T>> { static std::string v() { return "Array<" + TypeSimplifier<T>::v() + ">"; } }; /*! * \brief Template class to remove const, pointer and reference of original type. * \tparam T The original type. */ template <typename T> struct TypeSimplifier { static std::string v() { using U = typename std::remove_cv< typename std::remove_reference<typename std::remove_pointer<T>::type>::type>::type; return (std::is_const<T>::value ? "const " : "") + Type2Str<U>::v() + (std::is_pointer<T>::value ? "*" : "") + (std::is_reference<T>::value ? "&" : ""); } }; } // namespace type2str /*! * \brief Template class to generate static function outputting signature of a function or functor. * \tparam TSignature The function/functor signature type generated by `function_signature`. */ template <typename TSignature> struct SignaturePrinter { using ParamType = typename TSignature::ParamType; using RetType = typename TSignature::RetType; template <size_t i, typename TArgument> struct PrintParamType { static void F(std::ostream& os) { os << (i == 0 ? "" : ", ") << i << ": " << type2str::TypeSimplifier<TArgument>::v(); } }; static std::string F() { std::ostringstream oss; oss << "("; ParamType::template InvokeWithoutArg<PrintParamType>(oss); oss << ") -> " << type2str::TypeSimplifier<RetType>::v(); return oss.str(); } }; } // namespace detail /* \brief argument settter to PackedFunc */ class TVMArgsSetter { public: TVMArgsSetter(TVMValue* values, int* type_codes) : values_(values), type_codes_(type_codes) {} // setters for POD types template <typename T, typename = typename std::enable_if<std::is_integral<T>::value>::type> TVM_ALWAYS_INLINE void operator()(size_t i, T value) const { values_[i].v_int64 = static_cast<int64_t>(value); type_codes_[i] = kDLInt; } TVM_ALWAYS_INLINE void operator()(size_t i, uint64_t value) const { values_[i].v_int64 = static_cast<int64_t>(value); ICHECK_LE(value, static_cast<uint64_t>(std::numeric_limits<int64_t>::max())); type_codes_[i] = kDLInt; } TVM_ALWAYS_INLINE void operator()(size_t i, double value) const { values_[i].v_float64 = value; type_codes_[i] = kDLFloat; } TVM_ALWAYS_INLINE void operator()(size_t i, std::nullptr_t value) const { values_[i].v_handle = value; type_codes_[i] = kTVMNullptr; } TVM_ALWAYS_INLINE void operator()(size_t i, const TVMArgValue& value) const { values_[i] = value.value_; type_codes_[i] = value.type_code_; } TVM_ALWAYS_INLINE void operator()(size_t i, void* value) const { values_[i].v_handle = value; type_codes_[i] = kTVMOpaqueHandle; } TVM_ALWAYS_INLINE void operator()(size_t i, DLTensor* value) const { values_[i].v_handle = value; type_codes_[i] = kTVMDLTensorHandle; } TVM_ALWAYS_INLINE void operator()(size_t i, Device value) const { values_[i].v_device = value; type_codes_[i] = kDLDevice; } TVM_ALWAYS_INLINE void operator()(size_t i, DLDataType value) const { values_[i].v_type = value; type_codes_[i] = kTVMDataType; } TVM_ALWAYS_INLINE void operator()(size_t i, DataType dtype) const { operator()(i, dtype.operator DLDataType()); } TVM_ALWAYS_INLINE void operator()(size_t i, const char* value) const { values_[i].v_str = value; type_codes_[i] = kTVMStr; } // setters for container types TVM_ALWAYS_INLINE void operator()(size_t i, const std::string& value) const { values_[i].v_str = value.c_str(); type_codes_[i] = kTVMStr; } TVM_ALWAYS_INLINE void operator()(size_t i, const TVMByteArray& value) const { values_[i].v_handle = const_cast<TVMByteArray*>(&value); type_codes_[i] = kTVMBytes; } template <typename FType> TVM_ALWAYS_INLINE void operator()(size_t i, const TypedPackedFunc<FType>& value) const { operator()(i, value.packed()); } void operator()(size_t i, const TVMRetValue& value) const { if (value.type_code() == kTVMStr) { values_[i].v_str = value.ptr<std::string>()->c_str(); type_codes_[i] = kTVMStr; } else { ICHECK_NE(value.type_code(), kTVMBytes) << "not handled."; values_[i] = value.value_; type_codes_[i] = value.type_code(); } } // ObjectRef handling template <typename TObjectRef, typename = typename std::enable_if<std::is_base_of<ObjectRef, TObjectRef>::value>::type> TVM_ALWAYS_INLINE void operator()(size_t i, const TObjectRef& value) const { this->SetObject(i, value); } template <typename TObjectRef, typename = typename std::enable_if<std::is_base_of< ObjectRef, typename std::remove_reference<TObjectRef>::type>::value>::type> TVM_ALWAYS_INLINE void operator()(size_t i, TObjectRef&& value) const { this->SetObject(i, std::forward<TObjectRef>(value)); } private: template <typename TObjectRef> inline void SetObject(size_t i, TObjectRef&& value) const; /*! \brief The values fields */ TVMValue* values_; /*! \brief The type code fields */ int* type_codes_; }; template <typename... Args> inline TVMRetValue PackedFunc::operator()(Args&&... args) const { const int kNumArgs = sizeof...(Args); const int kArraySize = kNumArgs > 0 ? kNumArgs : 1; TVMValue values[kArraySize]; int type_codes[kArraySize]; detail::for_each(TVMArgsSetter(values, type_codes), std::forward<Args>(args)...); TVMRetValue rv; (static_cast<PackedFuncObj*>(data_.get())) ->CallPacked(TVMArgs(values, type_codes, kNumArgs), &rv); return rv; } namespace detail { template <typename R, int nleft, int index, typename F> struct unpack_call_dispatcher { template <typename... Args> TVM_ALWAYS_INLINE static void run(const std::string* optional_name, FSig* f_sig, const F& f, const TVMArgs& args_pack, TVMRetValue* rv, Args&&... unpacked_args) { // construct a movable argument value // which allows potential move of argument to the input of F. unpack_call_dispatcher<R, nleft - 1, index + 1, F>::run( optional_name, f_sig, f, args_pack, rv, std::forward<Args>(unpacked_args)..., TVMMovableArgValueWithContext_(args_pack.values[index], args_pack.type_codes[index], index, optional_name, f_sig)); } }; template <typename R, int index, typename F> struct unpack_call_dispatcher<R, 0, index, F> { template <typename... Args> TVM_ALWAYS_INLINE static void run(const std::string* optional_name, FSig* f_sig, const F& f, const TVMArgs& args_pack, TVMRetValue* rv, Args&&... unpacked_args) { using RetType = decltype(f(std::forward<Args>(unpacked_args)...)); if (std::is_same<RetType, R>::value) { *rv = f(std::forward<Args>(unpacked_args)...); } else { *rv = R(f(std::forward<Args>(unpacked_args)...)); } } }; template <int index, typename F> struct unpack_call_dispatcher<void, 0, index, F> { template <typename... Args> TVM_ALWAYS_INLINE static void run(const std::string* optional_name, FSig* f_sig, const F& f, const TVMArgs& args_pack, TVMRetValue* rv, Args&&... unpacked_args) { f(std::forward<Args>(unpacked_args)...); } }; template <typename R, int nargs, typename F> TVM_ALWAYS_INLINE void unpack_call(const std::string* optional_name, const F& f, const TVMArgs& args, TVMRetValue* rv) { FSig* f_sig = detail::SignaturePrinter<detail::function_signature<F>>::F; CHECK_EQ(nargs, args.size()) << "Function " << (optional_name == nullptr ? "<anonymous>" : *optional_name) << (f_sig == nullptr ? "" : (*f_sig)()) << " expects " << nargs << " arguments but " << args.size() << " were provided"; unpack_call_dispatcher<R, nargs, 0, F>::run(optional_name, f_sig, f, args, rv); } template <typename FType> struct unpack_call_by_signature {}; template <typename R, typename... Args> struct unpack_call_by_signature<R(Args...)> { template <typename F> TVM_ALWAYS_INLINE static void run(const F& f, const TVMArgs& args, TVMRetValue* rv) { unpack_call<R, sizeof...(Args)>(nullptr, f, args, rv); } }; template <typename R, typename... Args> TVM_ALWAYS_INLINE R call_packed(const PackedFunc& pf, Args&&... args) { return R(pf(std::forward<Args>(args)...)); } template <typename R> struct typed_packed_call_dispatcher { template <typename... Args> TVM_ALWAYS_INLINE static R run(const PackedFunc& pf, Args&&... args) { return pf(std::forward<Args>(args)...); } }; template <> struct typed_packed_call_dispatcher<void> { template <typename... Args> TVM_ALWAYS_INLINE static void run(const PackedFunc& pf, Args&&... args) { pf(std::forward<Args>(args)...); } }; } // namespace detail template <typename R, typename... Args> TypedPackedFunc<R(Args...)>::TypedPackedFunc(PackedFunc packed) : packed_(packed) {} template <typename R, typename... Args> TypedPackedFunc<R(Args...)>::TypedPackedFunc(const TVMRetValue& value) : packed_(value.operator PackedFunc()) {} template <typename R, typename... Args> TypedPackedFunc<R(Args...)>::TypedPackedFunc(const TVMArgValue& value) : packed_(value.operator PackedFunc()) {} template <typename R, typename... Args> TypedPackedFunc<R(Args...)>::TypedPackedFunc(TVMMovableArgValueWithContext_&& value) : packed_(value.operator PackedFunc()) {} template <typename R, typename... Args> template <typename FType> inline void TypedPackedFunc<R(Args...)>::AssignTypedLambda(FType flambda, std::string name) { FSig* f_sig = detail::SignaturePrinter<detail::function_signature<FType>>::F; packed_ = PackedFunc([flambda, name, f_sig](const TVMArgs& args, TVMRetValue* rv) { if (args.size() != sizeof...(Args)) { LOG(FATAL) << "Function " << name << (f_sig == nullptr ? "" : (*f_sig)()) << " expects " << sizeof...(Args) << " arguments, but " << args.size() << " were provided."; } detail::unpack_call<R, sizeof...(Args)>(&name, flambda, args, rv); }); } template <typename R, typename... Args> template <typename FType> inline void TypedPackedFunc<R(Args...)>::AssignTypedLambda(FType flambda) { FSig* f_sig = detail::SignaturePrinter<detail::function_signature<FType>>::F; packed_ = PackedFunc([flambda, f_sig](const TVMArgs& args, TVMRetValue* rv) { if (args.size() != sizeof...(Args)) { LOG(FATAL) << "Function <anonymous> " << (*f_sig)() << " expects " << sizeof...(Args) << " arguments, but " << args.size() << " were provided."; } detail::unpack_call<R, sizeof...(Args)>(nullptr, flambda, args, rv); }); } template <typename R, typename... Args> TVM_ALWAYS_INLINE R TypedPackedFunc<R(Args...)>::operator()(Args... args) const { return detail::typed_packed_call_dispatcher<R>::run(packed_, std::forward<Args>(args)...); } // ObjectRef related conversion handling // Object can have three possible type codes: // kTVMNDArrayHandle, kTVMModuleHandle, kTVMObjectHandle // // We use type traits to eliminate un-necessary checks. template <typename T> inline void TVMArgsSetter::SetObject(size_t i, T&& value) const { using ContainerType = typename std::remove_reference<T>::type::ContainerType; if (value.defined()) { Object* ptr = value.data_.data_; if (std::is_base_of<NDArray::ContainerType, ContainerType>::value || (std::is_base_of<ContainerType, NDArray::ContainerType>::value && ptr->IsInstance<NDArray::ContainerType>())) { values_[i].v_handle = NDArray::FFIGetHandle(value); type_codes_[i] = kTVMNDArrayHandle; } else if (std::is_base_of<Module::ContainerType, ContainerType>::value || (std::is_base_of<ContainerType, Module::ContainerType>::value && ptr->IsInstance<Module::ContainerType>())) { values_[i].v_handle = ptr; type_codes_[i] = kTVMModuleHandle; } else if (std::is_base_of<PackedFunc::ContainerType, ContainerType>::value || (std::is_base_of<ContainerType, PackedFunc::ContainerType>::value && ptr->IsInstance<PackedFunc::ContainerType>())) { values_[i].v_handle = ptr; type_codes_[i] = kTVMPackedFuncHandle; } else if (std::is_rvalue_reference<decltype(value)>::value) { values_[i].v_handle = const_cast<Object**>(&(value.data_.data_)); type_codes_[i] = kTVMObjectRValueRefArg; } else { values_[i].v_handle = value.data_.data_; type_codes_[i] = kTVMObjectHandle; } } else { type_codes_[i] = kTVMNullptr; values_[i].v_handle = nullptr; } } template <typename TObjectRef, typename> inline bool TVMPODValue_::IsObjectRef() const { using ContainerType = typename TObjectRef::ContainerType; // NOTE: the following code can be optimized by constant folding. if (std::is_base_of<NDArray::ContainerType, ContainerType>::value) { return type_code_ == kTVMNDArrayHandle && TVMArrayHandleToObjectHandle(static_cast<TVMArrayHandle>(value_.v_handle)) ->IsInstance<ContainerType>(); } if (std::is_base_of<Module::ContainerType, ContainerType>::value) { return type_code_ == kTVMModuleHandle && static_cast<Object*>(value_.v_handle)->IsInstance<ContainerType>(); } if (std::is_base_of<PackedFunc::ContainerType, ContainerType>::value) { return type_code_ == kTVMPackedFuncHandle && static_cast<Object*>(value_.v_handle)->IsInstance<ContainerType>(); } // NOTE: we don't pass NDArray and runtime::Module as RValue ref. if (type_code_ == kTVMObjectRValueRefArg) { return ObjectTypeChecker<TObjectRef>::Check(*static_cast<Object**>(value_.v_handle)); } return (std::is_base_of<ContainerType, NDArray::ContainerType>::value && type_code_ == kTVMNDArrayHandle) || (std::is_base_of<ContainerType, Module::ContainerType>::value && type_code_ == kTVMModuleHandle) || (std::is_base_of<ContainerType, PackedFunc::ContainerType>::value && type_code_ == kTVMPackedFuncHandle) || (type_code_ == kTVMObjectHandle && ObjectTypeChecker<TObjectRef>::Check(static_cast<Object*>(value_.v_handle))); } template <typename TObjectRef> inline TObjectRef TVMPODValue_::AsObjectRef() const { static_assert(std::is_base_of<ObjectRef, TObjectRef>::value, "Conversion only works for ObjectRef"); using ContainerType = typename TObjectRef::ContainerType; if (type_code_ == kTVMNullptr) { CHECK(TObjectRef::_type_is_nullable) << "Expect a not null value of " << ContainerType::_type_key; return TObjectRef(ObjectPtr<Object>(nullptr)); } // NOTE: the following code can be optimized by constant folding. if (std::is_base_of<NDArray::ContainerType, ContainerType>::value) { // Casting to a sub-class of NDArray TVM_CHECK_TYPE_CODE(type_code_, kTVMNDArrayHandle); ObjectPtr<Object> data = NDArray::FFIDataFromHandle(static_cast<TVMArrayHandle>(value_.v_handle)); CHECK(data->IsInstance<ContainerType>()) << "Expected " << ContainerType::_type_key << " but got " << data->GetTypeKey(); return TObjectRef(data); } if (std::is_base_of<Module::ContainerType, ContainerType>::value) { // Casting to a sub-class of Module TVM_CHECK_TYPE_CODE(type_code_, kTVMModuleHandle); ObjectPtr<Object> data = GetObjectPtr<Object>(static_cast<Object*>(value_.v_handle)); CHECK(data->IsInstance<ContainerType>()) << "Expected " << ContainerType::_type_key << " but got " << data->GetTypeKey(); return TObjectRef(data); } if (std::is_base_of<PackedFunc::ContainerType, ContainerType>::value) { // Casting to a sub-class of PackedFunc TVM_CHECK_TYPE_CODE(type_code_, kTVMPackedFuncHandle); ObjectPtr<Object> data = GetObjectPtr<Object>(static_cast<Object*>(value_.v_handle)); CHECK(data->IsInstance<ContainerType>()) << "Expected " << ContainerType::_type_key << " but got " << data->GetTypeKey(); return TObjectRef(data); } if (type_code_ == kTVMObjectHandle) { // normal object type check. Object* ptr = static_cast<Object*>(value_.v_handle); Optional<String> checked_type = ObjectTypeChecker<TObjectRef>::CheckAndGetMismatch(ptr); ICHECK(!checked_type.defined()) << "Expected " << ObjectTypeChecker<TObjectRef>::TypeName() << ", but got " << checked_type.value(); return TObjectRef(GetObjectPtr<Object>(ptr)); } else if (type_code_ == kTVMObjectRValueRefArg) { Object* ptr = *static_cast<Object**>(value_.v_handle); Optional<String> checked_type = ObjectTypeChecker<TObjectRef>::CheckAndGetMismatch(ptr); ICHECK(!checked_type.defined()) << "Expected " << ObjectTypeChecker<TObjectRef>::TypeName() << ", but got " << checked_type.value(); return TObjectRef(GetObjectPtr<Object>(ptr)); } else if (std::is_base_of<ContainerType, NDArray::ContainerType>::value && type_code_ == kTVMNDArrayHandle) { // Casting to a base class that NDArray can sub-class ObjectPtr<Object> data = NDArray::FFIDataFromHandle(static_cast<TVMArrayHandle>(value_.v_handle)); return TObjectRef(data); } else if (std::is_base_of<ContainerType, Module::ContainerType>::value && type_code_ == kTVMModuleHandle) { // Casting to a base class that Module can sub-class return TObjectRef(GetObjectPtr<Object>(static_cast<Object*>(value_.v_handle))); } else if (std::is_base_of<ContainerType, PackedFunc::ContainerType>::value && type_code_ == kTVMPackedFuncHandle) { // Casting to a base class that PackedFunc can sub-class return TObjectRef(GetObjectPtr<Object>(static_cast<Object*>(value_.v_handle))); } else { TVM_CHECK_TYPE_CODE(type_code_, kTVMObjectHandle); return TObjectRef(ObjectPtr<Object>(nullptr)); } } template <typename TObjectRef, typename> inline TVMRetValue& TVMRetValue::operator=(TObjectRef other) { using ContainerType = typename TObjectRef::ContainerType; const Object* ptr = other.get(); if (ptr != nullptr) { if (std::is_base_of<NDArray::ContainerType, ContainerType>::value || (std::is_base_of<ContainerType, NDArray::ContainerType>::value && ptr->IsInstance<NDArray::ContainerType>())) { return operator=(NDArray(std::move(other.data_))); } if (std::is_base_of<Module::ContainerType, ContainerType>::value || (std::is_base_of<ContainerType, Module::ContainerType>::value && ptr->IsInstance<Module::ContainerType>())) { return operator=(Module(std::move(other.data_))); } SwitchToObject(kTVMObjectHandle, std::move(other.data_)); } else { SwitchToPOD(kTVMNullptr); value_.v_handle = nullptr; } return *this; } template <typename T, typename> inline TVMArgValue::operator T() const { return PackedFuncValueConverter<T>::From(*this); } template <typename T, typename> inline TVMMovableArgValue_::operator T() const { if (type_code_ == kTVMObjectRValueRefArg) { auto** ref = static_cast<Object**>(value_.v_handle); if (ObjectTypeChecker<T>::Check(*ref)) { return T(ObjectPtr<Object>::MoveFromRValueRefArg(ref)); } } // fallback return PackedFuncValueConverter<T>::From(AsArgValue()); } template <typename T, typename> inline TVMRetValue::operator T() const { return PackedFuncValueConverter<T>::From(*this); } inline PackedFunc Module::GetFunction(const std::string& name, bool query_imports) { return (*this)->GetFunction(name, query_imports); } // specializations of PackedFuncValueConverter template <> struct PackedFuncValueConverter<::tvm::runtime::String> { static String From(const TVMArgValue& val) { if (val.IsObjectRef<tvm::runtime::String>()) { return val.AsObjectRef<tvm::runtime::String>(); } else { return tvm::runtime::String(val.operator std::string()); } } static String From(const TVMRetValue& val) { if (val.IsObjectRef<tvm::runtime::String>()) { return val.AsObjectRef<tvm::runtime::String>(); } else { return tvm::runtime::String(val.operator std::string()); } } }; template <typename T> struct PackedFuncValueConverter<Optional<T>> { static Optional<T> From(const TVMArgValue& val) { if (val.type_code() == kTVMNullptr) return Optional<T>(nullptr); return PackedFuncValueConverter<T>::From(val); } static Optional<T> From(const TVMRetValue& val) { if (val.type_code() == kTVMNullptr) return Optional<T>(nullptr); return PackedFuncValueConverter<T>::From(val); } }; inline bool String::CanConvertFrom(const TVMArgValue& val) { return val.type_code() == kTVMStr || val.IsObjectRef<tvm::runtime::String>(); } inline TVMArgValue::operator DLDataType() const { if (String::CanConvertFrom(*this)) { return String2DLDataType(PackedFuncValueConverter<String>::From(*this).operator std::string()); } // None type if (type_code_ == kTVMNullptr) { DLDataType t; t.code = kTVMOpaqueHandle; t.bits = 0; t.lanes = 0; return t; } TVM_CHECK_TYPE_CODE(type_code_, kTVMDataType); return value_.v_type; } inline TVMArgValue::operator DataType() const { return DataType(operator DLDataType()); } } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_PACKED_FUNC_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/profiling.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file include/tvm/runtime/profiling.h * \brief Runtime profiling including timers. */ #ifndef TVM_RUNTIME_PROFILING_H_ #define TVM_RUNTIME_PROFILING_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/container/map.h> #include <tvm/runtime/device_api.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/registry.h> #include <stack> #include <string> #include <unordered_map> #include <utility> #include <vector> namespace tvm { namespace runtime { /*! \brief Base class for all implementations. * * New implementations of this interface should make sure that `Start` and `Stop` * are as lightweight as possible. Expensive state synchronization should be * done in `SyncAndGetElapsedNanos`. */ class TimerNode : public Object { public: /*! \brief Start the timer. * * Note: this function should only be called once per object. */ virtual void Start() = 0; /*! \brief Stop the timer. * * Note: this function should only be called once per object. */ virtual void Stop() = 0; /*! \brief Synchronize timer state and return elapsed time between `Start` and `Stop`. * \return The time in nanoseconds between `Start` and `Stop`. * * This function is necessary because we want to avoid timing the overhead of * doing timing. When using multiple timers, it is recommended to stop all of * them before calling `SyncAndGetElapsedNanos` on any of them. * * Note: this function should be only called once per object. It may incur * a large synchronization overhead (for example, with GPUs). */ virtual int64_t SyncAndGetElapsedNanos() = 0; virtual ~TimerNode() {} static constexpr const char* _type_key = "TimerNode"; TVM_DECLARE_BASE_OBJECT_INFO(TimerNode, Object); }; /*! \brief Timer for a specific device. * * This is a managed reference to a TimerNode. * * \sa TimerNode */ class Timer : public ObjectRef { public: /*! * \brief Get a device specific timer. * \param dev The device to time. * \return A `Timer` that has already been started. * * Use this function to time runtime of arbitrary regions of code on a specific * device. The code that you want to time should be running on the device * otherwise the timer will not return correct results. This is a lower level * interface than TimeEvaluator and only runs the timed code once * (TimeEvaluator runs the code multiple times). * * A default timer is used if a device specific one does not exist. This * timer performs synchronization between the device and CPU, which can lead * to overhead in the reported results. * * Example usage: * \code{.cpp} * Timer t = Timer::Start(Device::cpu()); * my_long_running_function(); * t->Stop(); * ... // some more computation * int64_t nanosecs = t->SyncAndGetElapsedNanos() // elapsed time in nanoseconds * \endcode * * To add a new device-specific timer, register a new function * "profiler.timer.my_device" (where `my_device` is the `DeviceName` of your * device). This function should accept a `Device` and return a new `Timer` * that has already been started. * * For example, this is how the CPU timer is implemented: * \code{.cpp} * class CPUTimerNode : public TimerNode { * public: * virtual void Start() { start_ = std::chrono::high_resolution_clock::now(); } * virtual void Stop() { duration_ = std::chrono::high_resolution_clock::now() - start_; } * virtual int64_t SyncAndGetElapsedNanos() { return duration_.count(); } * virtual ~CPUTimerNode() {} * * static constexpr const char* _type_key = "CPUTimerNode"; * TVM_DECLARE_FINAL_OBJECT_INFO(CPUTimerNode, TimerNode); * * private: * std::chrono::high_resolution_clock::time_point start_; * std::chrono::duration<int64_t, std::nano> duration_; * }; * TVM_REGISTER_OBJECT_TYPE(CPUTimerNode); * * TVM_REGISTER_GLOBAL("profiling.timer.cpu").set_body_typed([](Device dev) { * return Timer(make_object<CPUTimerNode>()); * }); * \endcode */ static TVM_DLL Timer Start(Device dev); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Timer, ObjectRef, TimerNode); }; /*! * \brief Default timer if one does not exist for the device. * \param dev The device to time on. * * Note that this timer performs synchronization between the device and CPU, * which can lead to overhead in the reported results. */ Timer DefaultTimer(Device dev); namespace profiling { /*! \brief Wrapper for `Device` because `Device` is not passable across the * PackedFunc interface. */ struct DeviceWrapperNode : public Object { /*! The device */ Device device; /*! Constructor */ explicit DeviceWrapperNode(Device device) : device(device) {} static constexpr const char* _type_key = "runtime.profiling.DeviceWrapper"; TVM_DECLARE_BASE_OBJECT_INFO(DeviceWrapperNode, Object); }; /*! \brief Wrapper for `Device`. */ class DeviceWrapper : public ObjectRef { public: explicit DeviceWrapper(Device dev) { data_ = make_object<DeviceWrapperNode>(dev); } TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(DeviceWrapper, ObjectRef, DeviceWrapperNode); }; /*! \brief Data collected from a profiling run. Includes per-call metrics and per-device metrics. */ class ReportNode : public Object { public: /*! \brief A list of function calls and the metrics recorded for that call. * * Each element is a mapping from metric name to value. Some metrics that * appear in every call are "Name" (the function name), "Argument Shapes", * and "Duration (us)". Values are one of `String`, `PercentNode`, * `DurationNode`, or `CountNode`. */ Array<Map<String, ObjectRef>> calls; /*! \brief Metrics collected for the entire run of the model on a per-device basis. * * `device_metrics` is indexed by device name then metric. * * These metrics may be larger than the sum of the same metric in `calls` * because these metrics include the overhead of the executor. */ Map<String, Map<String, ObjectRef>> device_metrics; /*! Configuration used for this profiling run. Includes number of threads, executor. * * Values must be an object type that can be used with device_metrics. */ Map<String, ObjectRef> configuration; /*! \brief Output `calls` in CSV format. * * Note that this does not include `device_metrics`, it only includes per-call metrics. */ String AsCSV() const; /*! \brief Create a human readable table of profiling metrics. * * \param aggregate Whether or not to join multiple calls to the * same op into a single line. * * \param sort Whether or not to sort call frames by descending * duration. If false and if `aggregate` is false, frames will * be sorted by order of appearance in the program. Order is * undefined if `sort` is false and `aggregate` is true. * * \param compute_col_sums Whether or not to include sum totals for * the Count, Duation, and Percent columns. * */ String AsTable(bool sort = true, bool aggregate = true, bool compute_col_sums = true) const; /*! \brief Convert this report to JSON. * * Output JSON will be of this format: * \code * { * "calls": [ * { * "Duration (us)": { * "microseconds": 12.3 * }, * "Name": "fused_dense", * "Count": { * "count": 1 * }, * "Percent": { * "percent": 10.3 * } * } * ], * "device_metrics": { * "cpu": { * "Duration (us)": { * "microseconds": 334.2 * }, * "Percent": { * "percent": 100 * } * } * } * } * \endcode */ String AsJSON() const; static constexpr const char* _type_key = "runtime.profiling.Report"; TVM_DECLARE_FINAL_OBJECT_INFO(ReportNode, Object); }; class Report : public ObjectRef { public: /*! Construct a Report from a set of calls (with associated metrics) and per-device metrics. * \param calls Function calls and associated metrics. * \param device_metrics Per-device metrics for overall execution. * \param configuration Configuration data specific to this profiling run. */ explicit Report(Array<Map<String, ObjectRef>> calls, Map<String, Map<String, ObjectRef>> device_metrics, Map<String, ObjectRef> configuration); /*! Deserialize a Report from a JSON object. Needed for sending the report over RPC. * \param json Serialized json report from `ReportNode::AsJSON`. * \returns A Report. */ static Report FromJSON(String json); TVM_DEFINE_NOTNULLABLE_OBJECT_REF_METHODS(Report, ObjectRef, ReportNode); }; /*! \brief Interface for user defined profiling metric collection. * * Users can register their own collector by registering a packed function with * the name "runtime.profiling.metrics.my_collector_name" where * "my_collector_name" is the name of their collector. This function should * take an Array of Device as input which contains the devices the collector * will be run on. * * `MetricCollectorNode`s will be called in the following fashion. * \code * MetricCollector mc; * for (auto op : model) { * auto o = mc.Start(); * op(); * auto metrics = mc.Stop(o); // metrics are added the profiling report * } * \endcode */ class MetricCollectorNode : public Object { public: /*! \brief Initialization call. Called before profiling has started. Any * expensive precomputation should happen here. * \param devs The list of devices this collector will be run on. */ virtual void Init(Array<DeviceWrapper> devs) = 0; /*! \brief Start colling metrics for a function call. * \param dev The device the call will be run on. * \returns An object used to maintain state of the metric collection. This * object will be passed to the corresponding `Stop` call. If the device is * not supported, this function will return a nullptr ObjectRef. */ virtual ObjectRef Start(Device dev) = 0; /*! \brief Stop collecting metrics. * \param obj The object created by the corresponding `Start` call. * \returns A set of metric names and the associated values. Values must be * one of DurationNode, PercentNode, CountNode, or StringObj. */ virtual Map<String, ObjectRef> Stop(ObjectRef obj) = 0; virtual ~MetricCollectorNode() {} static constexpr const char* _type_key = "runtime.profiling.MetricCollector"; TVM_DECLARE_BASE_OBJECT_INFO(MetricCollectorNode, Object); }; /*! \brief Wrapper for `MetricCollectorNode`. */ class MetricCollector : public ObjectRef { public: TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(MetricCollector, ObjectRef, MetricCollectorNode); }; /*! Information about a single function or operator call. */ struct CallFrame { /*! Device on which the call was made */ Device dev; /*! Name of the function or op */ String name; /*! Runtime of the function or op */ Timer timer; /*! Extra performance metrics */ std::unordered_map<std::string, ObjectRef> extra_metrics; /*! User defined metric collectors. Each pair is the MetricCollector and its * associated data (returned from MetricCollector.Start). */ std::vector<std::pair<MetricCollector, ObjectRef>> extra_collectors; }; /*! Runtime profiler for function and/or operator calls. Used in the graph * runtime and VM to provide profiling information for all operators. * * Example usage: * \code{.cpp} * Device cpu, gpu; * Profiler prof({cpu, gpu}); * my_gpu_kernel(); // do a warmup iteration * prof.Start(); * prof.StartCall("my_gpu_kernel", gpu); * my_gpu_kernel(); * prof.StopCall(); * prof.StartCall("my_cpu_function", cpu); * my_cpu_function(); * prof.StopCall(); * prof.Stop(); * std::cout << prof.Report << std::endl; // print profiling report * \endcode */ class Profiler { public: /*! Constructor. * * The profiler should be constructed before you do any warmup iterations. * * \note * Calling this constructor will reset the TVM threadpool. It is necessary in * order to install thread handlers required by certain collectors. * * \param devs The list of devices the profiler will be running on. Should * include all devices used by profiled operators. * \param metric_collectors Additional `MetricCollector`s to use with this profiler. * \param configuration Additional configuration data to add to the outputted profiling report. */ explicit Profiler(std::vector<Device> devs, std::vector<MetricCollector> metric_collectors, std::unordered_map<String, ObjectRef> configuration = {}); /*! \brief Start the profiler. * * This function should only be called once per object. */ void Start(); /*! \brief Stop the profiler. * * This function should only be called once per object after start has been called. */ void Stop(); /*! \brief Start a function call. * \param name The name of the function being called. * \param dev The device on which the function is running. * \param extra_metrics Optional additional profiling information to add to * the frame (input sizes, allocations). * * `StartCall` may be nested, but each `StartCall` needs a matching * `StopCall`. Function calls are stopped in LIFO order, so calls to * `StartCall` and `StopCall` must be nested properly. */ void StartCall(String name, Device dev, std::unordered_map<std::string, ObjectRef> extra_metrics = {}); /*! \brief Stop the last `StartCall`. * \param extra_metrics Optional additional profiling information to add to * the frame (input sizes, allocations). */ void StopCall(std::unordered_map<std::string, ObjectRef> extra_metrics = {}); /*! \brief A report of total runtime between `Start` and `Stop` as * well as individual statistics for each `StartCall`-`StopCall` pair. * \returns A `Report` that can either be formatted as CSV (with `.AsCSV`) * or as a human readable table (with `.AsTable`). */ profiling::Report Report(); /*! \brief Check if the profiler is currently running. * \returns Whether or not the profiler is running. */ bool IsRunning() const { return is_running_; } private: std::vector<Device> devs_; bool is_running_{false}; std::vector<CallFrame> calls_; std::stack<CallFrame> in_flight_; std::vector<MetricCollector> collectors_; std::unordered_map<String, ObjectRef> configuration_; }; /* \brief A duration in time. */ class DurationNode : public Object { public: /* The duration as a floating point number of microseconds. */ double microseconds; /* \brief Construct a new duration. * \param a The duration in microseconds. */ explicit DurationNode(double a) : microseconds(a) {} static constexpr const char* _type_key = "runtime.profiling.Duration"; TVM_DECLARE_FINAL_OBJECT_INFO(DurationNode, Object); }; /* A percentage of something */ class PercentNode : public Object { public: /* The percent as a floating point value out of 100%. i.e. if `percent` is 10 then we have 10%. */ double percent; /* \brief Construct a new percentage. * \param a The percentage out of 100. */ explicit PercentNode(double a) : percent(a) {} static constexpr const char* _type_key = "runtime.profiling.Percent"; TVM_DECLARE_FINAL_OBJECT_INFO(PercentNode, Object); }; /* A count of something */ class CountNode : public Object { public: /* The actual count */ int64_t value; /* \brief Construct a new count. * \param a The count. */ explicit CountNode(int64_t a) : value(a) {} static constexpr const char* _type_key = "runtime.profiling.Count"; TVM_DECLARE_FINAL_OBJECT_INFO(CountNode, Object); }; /* \brief A ratio of two things. */ class RatioNode : public Object { public: /* The ratio as a double precision floating point number. */ double ratio; /* \brief Construct a new ratio. * \param a The ratio. */ explicit RatioNode(double a) : ratio(a) {} static constexpr const char* _type_key = "runtime.profiling.Ratio"; TVM_DECLARE_FINAL_OBJECT_INFO(RatioNode, Object); }; /*! \brief String representation of an array of NDArray shapes * \param shapes Array of NDArrays to get the shapes of. * \return A textual representation of the shapes. For example: `float32[2], int64[1, 2]`. */ String ShapeString(const std::vector<NDArray>& shapes); /*! \brief String representation of shape encoded as an NDArray * \param shape NDArray containing the shape. * \param dtype The dtype of the shape. * \return A textual representation of the shape. For example: `float32[2]`. */ String ShapeString(NDArray shape, DLDataType dtype); /*! \brief String representation of a shape encoded as a vector * \param shape Shape as a vector of integers. * \param dtype The dtype of the shape. * \return A textual representation of the shape. For example: `float32[2]`. */ String ShapeString(const std::vector<int64_t>& shape, DLDataType dtype); /*! \brief Collect performance information of a function execution. Usually * used with a compiled PrimFunc (via tvm.build). * * This information can include performance counters like cache hits and FLOPs * that are useful in debugging performance issues of individual PrimFuncs. * Different metrics can be collected depending on which MetricCollector is * used. * * Example usage: * \code{.cpp} * // Use PAPI to measure the number of floating point operations. * PackedFunc profiler = ProfileModule( * mod, "main", kDLCPU, 0, {CreatePAPIMetricCollector({{kDLCPU, 0}, {"PAPI_FP_OPS"}})}); * Report r = profiler(arg1, arg2, arg); * std::cout << r << std::endl; * \endcode * * \param mod Module to profile. Usually a PrimFunc that has been compiled to machine code. * \param func_name Name of function to run in the module. * \param device_type Device type to run on. Profiling will include performance * metrics specific to this device type. * \param device_id Id of device to run on. * \param warmup_iters Number of iterations of the function to run before collecting * performance information. Recommend to set this larger * than 0 so that cache effects are consistent. * \param collectors List of different * ways to collect metrics. See MetricCollector. * \returns A PackedFunc which takes the same arguments as the `mod[func_name]` * and returns performance metrics as a `Map<String, ObjectRef>` where * values can be `CountNode`, `DurationNode`, `PercentNode`. */ PackedFunc ProfileFunction(Module mod, std::string func_name, int device_type, int device_id, int warmup_iters, Array<MetricCollector> collectors); /*! * \brief Wrap a timer function to measure the time cost of a given packed function. * * Approximate implementation: * \code{.py} * f() // warmup * for i in range(repeat) * f_preproc() * while True: * start = time() * for j in range(number): * f() * duration_ms = time() - start * if duration_ms >= min_repeat_ms: * break * else: * number = (min_repeat_ms / (duration_ms / number) + 1 * if cooldown_interval_ms and i % repeats_to_cooldown == 0: * sleep(cooldown_interval_ms) * \endcode * * \param f The function argument. * \param dev The device. * \param number The number of times to run this function for taking average. * We call these runs as one `repeat` of measurement. * \param repeat The number of times to repeat the measurement. * In total, the function will be invoked (1 + number x repeat) times, * where the first one is warm up and will be discarded. * The returned result contains `repeat` costs, * each of which is an average of `number` costs. * \param min_repeat_ms The minimum duration of one `repeat` in milliseconds. * By default, one `repeat` contains `number` runs. If this parameter is set, * the parameters `number` will be dynamically adjusted to meet the * minimum duration requirement of one `repeat`. * i.e., When the run time of one `repeat` falls below this time, * the `number` parameter will be automatically increased. * \param limit_zero_time_iterations The maximum number of repeats when * measured time is equal to 0. It helps to avoid hanging during measurements. * \param cooldown_interval_ms The cooldown interval in milliseconds between the number of repeats * defined by `repeats_to_cooldown`. * \param repeats_to_cooldown The number of repeats before the * cooldown is activated. * \param f_preproc The function to be executed before we execute time * evaluator. * \return f_timer A timer function. */ PackedFunc WrapTimeEvaluator(PackedFunc f, Device dev, int number, int repeat, int min_repeat_ms, int limit_zero_time_iterations, int cooldown_interval_ms, int repeats_to_cooldown, PackedFunc f_preproc = nullptr); } // namespace profiling } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_PROFILING_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/registry.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/registry.h * \brief This file defines the TVM global function registry. * * The registered functions will be made available to front-end * as well as backend users. * * The registry stores type-erased functions. * Each registered function is automatically exposed * to front-end language(e.g. python). * * Front-end can also pass callbacks as PackedFunc, or register * then into the same global registry in C++. * The goal is to mix the front-end language and the TVM back-end. * * \code * // register the function as MyAPIFuncName * TVM_REGISTER_GLOBAL(MyAPIFuncName) * .set_body([](TVMArgs args, TVMRetValue* rv) { * // my code. * }); * \endcode */ #ifndef TVM_RUNTIME_REGISTRY_H_ #define TVM_RUNTIME_REGISTRY_H_ #include <tvm/runtime/packed_func.h> #include <string> #include <type_traits> #include <utility> #include <vector> namespace tvm { namespace runtime { /*! * \brief Check if signals have been sent to the process and if so * invoke the registered signal handler in the frontend environment. * * When running TVM in another language (Python), the signal handler * may not be immediately executed, but instead the signal is marked * in the interpreter state (to ensure non-blocking of the signal handler). * * This function can be explicitly invoked to check the cached signal * and run the related processing if a signal is marked. * * On Linux, when siginterrupt() is set, invoke this function whenever a syscall returns EINTR. * When it is not set, invoke it between long-running syscalls when you will not immediately * return to the frontend. On Windows, the same rules apply, but due to differences in signal * processing, these are likely to only make a difference when used with Ctrl+C and socket calls. * * Not inserting this function will not cause any correctness * issue, but will delay invoking the Python-side signal handler until the function returns to * the Python side. This means that the effect of e.g. pressing Ctrl+C or sending signals the * process will be delayed until function return. When a C function is blocked on a syscall * such as accept(), it needs to be called when EINTR is received. * So this function is not needed in most API functions, which can finish quickly in a * reasonable, deterministic amount of time. * * \code * * int check_signal_every_k_iter = 10; * * for (int iter = 0; iter < very_large_number; ++iter) { * if (iter % check_signal_every_k_iter == 0) { * tvm::runtime::EnvCheckSignals(); * } * // do work here * } * * \endcode * * \note This function is a nop when no PyErr_CheckSignals is registered. * * \throws This function throws an exception when the frontend signal handler * indicate an error happens, otherwise it returns normally. */ TVM_DLL void EnvCheckSignals(); /*! \brief Registry for global function */ class Registry { public: /*! * \brief set the body of the function to be f * \param f The body of the function. */ TVM_DLL Registry& set_body(PackedFunc f); // NOLINT(*) /*! * \brief set the body of the function to be f * \param f The body of the function. */ template <typename TCallable, typename = typename std::enable_if_t< std::is_convertible<TCallable, std::function<void(TVMArgs, TVMRetValue*)>>::value && !std::is_base_of<PackedFunc, TCallable>::value>> Registry& set_body(TCallable f) { // NOLINT(*) return set_body(PackedFunc(f)); } /*! * \brief set the body of the function to the given function. * Note that this will ignore default arg values and always require all arguments to be * provided. * * \code * * int multiply(int x, int y) { * return x * y; * } * * TVM_REGISTER_GLOBAL("multiply") * .set_body_typed(multiply); // will have type int(int, int) * * // will have type int(int, int) * TVM_REGISTER_GLOBAL("sub") * .set_body_typed([](int a, int b) -> int { return a - b; }); * * \endcode * * \param f The function to forward to. * \tparam FLambda The signature of the function. */ template <typename FLambda> Registry& set_body_typed(FLambda f) { using FType = typename detail::function_signature<FLambda>::FType; return set_body(TypedPackedFunc<FType>(std::move(f), name_).packed()); } /*! * \brief set the body of the function to be the passed method pointer. * Note that this will ignore default arg values and always require all arguments to be * provided. * * \code * * // node subclass: * struct Example { * int doThing(int x); * } * TVM_REGISTER_GLOBAL("Example_doThing") * .set_body_method(&Example::doThing); // will have type int(Example, int) * * \endcode * * \param f the method pointer to forward to. * \tparam T the type containing the method (inferred). * \tparam R the return type of the function (inferred). * \tparam Args the argument types of the function (inferred). */ template <typename T, typename R, typename... Args> Registry& set_body_method(R (T::*f)(Args...)) { using R_ = typename std::remove_reference<R>::type; auto fwrap = [f](T target, Args... params) -> R_ { // call method pointer return (target.*f)(params...); }; return set_body(TypedPackedFunc<R_(T, Args...)>(fwrap, name_)); } /*! * \brief set the body of the function to be the passed method pointer. * Note that this will ignore default arg values and always require all arguments to be * provided. * * \code * * // node subclass: * struct Example { * int doThing(int x); * } * TVM_REGISTER_GLOBAL("Example_doThing") * .set_body_method(&Example::doThing); // will have type int(Example, int) * * \endcode * * \param f the method pointer to forward to. * \tparam T the type containing the method (inferred). * \tparam R the return type of the function (inferred). * \tparam Args the argument types of the function (inferred). */ template <typename T, typename R, typename... Args> Registry& set_body_method(R (T::*f)(Args...) const) { auto fwrap = [f](const T target, Args... params) -> R { // call method pointer return (target.*f)(params...); }; return set_body(TypedPackedFunc<R(const T, Args...)>(fwrap, name_)); } /*! * \brief set the body of the function to be the passed method pointer. * Used when calling a method on a Node subclass through a ObjectRef subclass. * Note that this will ignore default arg values and always require all arguments to be * provided. * * \code * * // node subclass: * struct ExampleNode: BaseNode { * int doThing(int x); * } * * // noderef subclass * struct Example; * * TVM_REGISTER_GLOBAL("Example_doThing") * .set_body_method<Example>(&ExampleNode::doThing); // will have type int(Example, int) * * // note that just doing: * // .set_body_method(&ExampleNode::doThing); * // wouldn't work, because ExampleNode can't be taken from a TVMArgValue. * * \endcode * * \param f the method pointer to forward to. * \tparam TObjectRef the node reference type to call the method on * \tparam TNode the node type containing the method (inferred). * \tparam R the return type of the function (inferred). * \tparam Args the argument types of the function (inferred). */ template <typename TObjectRef, typename TNode, typename R, typename... Args, typename = typename std::enable_if<std::is_base_of<ObjectRef, TObjectRef>::value>::type> Registry& set_body_method(R (TNode::*f)(Args...)) { auto fwrap = [f](TObjectRef ref, Args... params) { TNode* target = ref.operator->(); // call method pointer return (target->*f)(params...); }; return set_body(TypedPackedFunc<R(TObjectRef, Args...)>(fwrap, name_)); } /*! * \brief set the body of the function to be the passed method pointer. * Used when calling a method on a Node subclass through a ObjectRef subclass. * Note that this will ignore default arg values and always require all arguments to be * provided. * * \code * * // node subclass: * struct ExampleNode: BaseNode { * int doThing(int x); * } * * // noderef subclass * struct Example; * * TVM_REGISTER_GLOBAL("Example_doThing") * .set_body_method<Example>(&ExampleNode::doThing); // will have type int(Example, int) * * // note that just doing: * // .set_body_method(&ExampleNode::doThing); * // wouldn't work, because ExampleNode can't be taken from a TVMArgValue. * * \endcode * * \param f the method pointer to forward to. * \tparam TObjectRef the node reference type to call the method on * \tparam TNode the node type containing the method (inferred). * \tparam R the return type of the function (inferred). * \tparam Args the argument types of the function (inferred). */ template <typename TObjectRef, typename TNode, typename R, typename... Args, typename = typename std::enable_if<std::is_base_of<ObjectRef, TObjectRef>::value>::type> Registry& set_body_method(R (TNode::*f)(Args...) const) { auto fwrap = [f](TObjectRef ref, Args... params) { const TNode* target = ref.operator->(); // call method pointer return (target->*f)(params...); }; return set_body(TypedPackedFunc<R(TObjectRef, Args...)>(fwrap, name_)); } /*! * \brief Register a function with given name * \param name The name of the function. * \param override Whether allow override existing function. * \return Reference to the registry. */ TVM_DLL static Registry& Register(const std::string& name, bool override = false); // NOLINT(*) /*! * \brief Erase global function from registry, if exist. * \param name The name of the function. * \return Whether function exist. */ TVM_DLL static bool Remove(const std::string& name); /*! * \brief Get the global function by name. * \param name The name of the function. * \return pointer to the registered function, * nullptr if it does not exist. */ TVM_DLL static const PackedFunc* Get(const std::string& name); // NOLINT(*) /*! * \brief Get the names of currently registered global function. * \return The names */ TVM_DLL static std::vector<std::string> ListNames(); // Internal class. struct Manager; protected: /*! \brief name of the function */ std::string name_; /*! \brief internal packed function */ PackedFunc func_; friend struct Manager; }; #define TVM_FUNC_REG_VAR_DEF static TVM_ATTRIBUTE_UNUSED ::tvm::runtime::Registry& __mk_##TVM /*! * \brief Register a function globally. * \code * TVM_REGISTER_GLOBAL("MyPrint") * .set_body([](TVMArgs args, TVMRetValue* rv) { * }); * \endcode */ #define TVM_REGISTER_GLOBAL(OpName) \ TVM_STR_CONCAT(TVM_FUNC_REG_VAR_DEF, __COUNTER__) = ::tvm::runtime::Registry::Register(OpName) #define TVM_STRINGIZE_DETAIL(x) #x #define TVM_STRINGIZE(x) TVM_STRINGIZE_DETAIL(x) #define TVM_DESCRIBE(...) describe(__VA_ARGS__ "\n\nFrom:" __FILE__ ":" TVM_STRINGIZE(__LINE__)) /*! * \brief Macro to include current line as string */ #define TVM_ADD_FILELINE "\n\nDefined in " __FILE__ ":L" TVM_STRINGIZE(__LINE__) } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_REGISTRY_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/serializer.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/serializer.h * \brief Serializer extension to support TVM data types * Include this file to enable serialization of DLDataType, DLDevice */ #ifndef TVM_RUNTIME_SERIALIZER_H_ #define TVM_RUNTIME_SERIALIZER_H_ #include <dmlc/io.h> #include <dmlc/serializer.h> #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/ndarray.h> namespace dmlc { namespace serializer { template <> struct Handler<DLDataType> { inline static void Write(Stream* strm, const DLDataType& dtype) { Handler<uint8_t>::Write(strm, dtype.code); Handler<uint8_t>::Write(strm, dtype.bits); Handler<uint16_t>::Write(strm, dtype.lanes); } inline static bool Read(Stream* strm, DLDataType* dtype) { if (!Handler<uint8_t>::Read(strm, &(dtype->code))) return false; if (!Handler<uint8_t>::Read(strm, &(dtype->bits))) return false; if (!Handler<uint16_t>::Read(strm, &(dtype->lanes))) return false; return true; } }; template <> struct Handler<DLDevice> { inline static void Write(Stream* strm, const DLDevice& dev) { int32_t device_type = static_cast<int32_t>(dev.device_type); Handler<int32_t>::Write(strm, device_type); Handler<int32_t>::Write(strm, dev.device_id); } inline static bool Read(Stream* strm, DLDevice* dev) { int32_t device_type = 0; if (!Handler<int32_t>::Read(strm, &(device_type))) return false; dev->device_type = static_cast<DLDeviceType>(device_type); if (!Handler<int32_t>::Read(strm, &(dev->device_id))) return false; return true; } }; } // namespace serializer } // namespace dmlc #endif // TVM_RUNTIME_SERIALIZER_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/threading_backend.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/threading_backend.h * \brief Utilities for manipulating thread pool threads. */ #ifndef TVM_RUNTIME_THREADING_BACKEND_H_ #define TVM_RUNTIME_THREADING_BACKEND_H_ #include <functional> #include <memory> #include <vector> #if defined(__linux__) || defined(__ANDROID__) #if defined(__ANDROID__) #ifndef CPU_SET #define CPU_SETSIZE 1024 #define __NCPUBITS (8 * sizeof(uint64_t)) typedef struct { uint64_t __bits[CPU_SETSIZE / __NCPUBITS]; } cpu_set_t; #define CPU_SET(cpu, cpusetp) \ ((cpusetp)->__bits[(cpu) / __NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) #define CPU_ZERO(cpusetp) memset((cpusetp), 0, sizeof(cpu_set_t)) #define CPU_ISSET(cpu, cpusetp) \ (1UL << ((cpu) % __NCPUBITS)) == \ ((cpusetp)->__bits[(cpu) / __NCPUBITS] & (1UL << ((cpu) % __NCPUBITS))) #define CPU_EQUAL(left, right) (memcmp(&left, &right, sizeof(cpu_set_t)) == 0) #endif #endif #endif namespace tvm { namespace runtime { namespace threading { /*! * \brief A platform-agnostic abstraction for managing a collection of * thread pool threads. */ class ThreadGroup { public: class Impl; /*! * \brief Creates a collection of threads which run a provided function. * * \param num_workers The total number of worker threads in this group. Includes main thread if `exclude_worker0 = true` * \param worker_callback A callback which is run in its own thread. Receives the worker_id as an argument. * \param exclude_worker0 Whether to use the main thread as a worker. * If `true`, worker0 will not be launched in a new thread and * `worker_callback` will only be called for values >= 1. This * allows use of the main thread as a worker. */ ThreadGroup(int num_workers, std::function<void(int)> worker_callback, bool exclude_worker0 = false); ~ThreadGroup(); /*! * \brief Blocks until all non-main threads in the pool finish. */ void Join(); enum AffinityMode : int { kBig = 1, kLittle = -1, /*Different threads will get different affinities.*/ kSpecifyOneCorePerThread = -2, /*All threads will get the same core group affinity.*/ kSpecifyThreadShareAllCore = -3, }; /*! * \brief configure the CPU id affinity * * \param mode The preferred CPU type (1 = big, -1 = little ...). * \param nthreads The number of threads to use (0 = use all). * \param exclude_worker0 Whether to use the main thread as a worker. * If `true`, worker0 will not be launched in a new thread and * `worker_callback` will only be called for values >= 1. This * allows use of the main thread as a worker. * \param cpus A list of CPU used to set 'cpu affinity'. * * \return The number of workers to use. */ int Configure(AffinityMode mode, int nthreads, bool exclude_worker0, std::vector<unsigned int> cpus = {}); private: Impl* impl_; }; /*! * \brief Platform-agnostic no-op. */ void Yield(); /*! * \return the maximum number of effective workers for this system. */ int MaxConcurrency(); /*! * \brief Setting the maximum number of available cores. */ void SetMaxConcurrency(int value); /*! * \brief Reset the threads in the pool. All current threads are destroyed and * new ones are created. * * Note that this does nothing when openmp is used. */ void ResetThreadPool(); /*! * \brief Configuring the CPU affinity mode for the working threads. * \param mode The preferred CPU type (1 = big, -1 = little, -2 = kSpecifyOneCorePerThread, * -3 = kSpecifyThreadShareAllCore). * \param nthreads The number of threads to use (0 = use all). * \param cpus A list of CPUs is used to set the 'cpu affinity' for the worker threads. */ TVM_DLL void Configure(tvm::runtime::threading::ThreadGroup::AffinityMode mode, int nthreads, std::vector<unsigned int> cpus); /*! * \brief Get the number of threads being used by the TVM runtime * \returns The number of threads used. */ int32_t NumThreads(); } // namespace threading } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_THREADING_BACKEND_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/vm/bytecode.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/vm/bytecode.h * \brief The bytecode for Relay virtual machine. */ #ifndef TVM_RUNTIME_VM_BYTECODE_H_ #define TVM_RUNTIME_VM_BYTECODE_H_ #include <tvm/runtime/data_type.h> #include <tvm/runtime/logging.h> #include <iostream> #include <vector> namespace tvm { namespace runtime { namespace vm { /*! \brief A register name. */ using RegName = int64_t; /*! \brief An alias for the integer type used ubiquitously * in the VM. */ using Index = int64_t; /*! \brief An enumeration of Relay's opcodes. * * The opcode is used to implement instruction * as a tagged union. */ enum class Opcode { Move = 0U, Ret = 1U, Invoke = 2U, InvokeClosure = 3U, InvokePacked = 4U, AllocTensor = 5U, AllocTensorReg = 6U, AllocADT = 7U, AllocClosure = 8U, GetField = 9U, If = 10U, LoadConst = 11U, Goto = 12U, GetTag = 13U, LoadConsti = 14U, Fatal = 15U, AllocStorage = 16U, ShapeOf = 17U, ReshapeTensor = 18U, DeviceCopy = 19U, KillRegister = 20U, }; /*! \brief A single virtual machine instruction. * * The representation of the instruction is as * a tagged union. * * The first field represents which instruction, * and by extension which field of the union * is active. */ struct Instruction { /*! \brief The instruction opcode. */ Opcode op; /*! \brief The destination register. */ RegName dst; union { struct /* AllocTensor Operands */ { /*! \brief The storage to allocate from. */ RegName storage; /*! \brief The offset into the storage to allocate from. */ Index offset; /*! \brief The number of dimensions. */ uint32_t ndim; /*! \brief The shape of tensor. */ int64_t* shape; /*! \brief The datatype of tensor to be allocated. */ DLDataType dtype; } alloc_tensor; struct /* AllocTensorReg Operands */ { /*! \brief The storage to allocate from. */ RegName storage; /*! \brief The offset into the storage to allocate from. */ Index offset; /*! \brief The register to read the shape out of. */ RegName shape_register; /*! \brief The datatype of tensor to be allocated. */ DLDataType dtype; } alloc_tensor_reg; struct /* InvokeClosure Operands */ { /*! \brief The register containing the closure. */ RegName closure; /*! \brief The number of arguments to the closure. */ Index num_closure_args; /*! \brief The closure arguments as an array. */ RegName* closure_args; }; struct /* Return Operands */ { /*! \brief The register to return. */ RegName result; }; struct /* Move Operands */ { /*! \brief The source register for a move operation. */ RegName from; }; struct /* InvokePacked Operands */ { /*! \brief The index into the packed function table. */ Index packed_index; /*! \brief The arity of the packed function. */ Index arity; /*! \brief The number of outputs produced by the packed function. */ Index output_size; /*! \brief The arguments to pass to the packed function. */ RegName* packed_args; }; struct /* If Operands */ { /*! \brief The register containing the test value. */ RegName test; /*! \brief The register containing the target value. */ RegName target; /*! \brief The program counter offset for the true branch. */ Index true_offset; /*! \brief The program counter offset for the false branch. */ Index false_offset; } if_op; struct /* Invoke Operands */ { /*! \brief The function to call. */ Index func_index; /*! \brief The number of arguments to the function. */ Index num_args; /*! \brief The registers containing the arguments. */ RegName* invoke_args_registers; }; struct /* LoadConst Operands */ { /* \brief The index into the constant pool. */ Index const_index; }; struct /* LoadConsti Operands */ { /* \brief The index into the constant pool. */ Index val; } load_consti; struct /* Jump Operands */ { /*! \brief The jump offset. */ Index pc_offset; }; struct /* Proj Operands */ { /*! \brief The register to project from. */ RegName object; /*! \brief The field to read out. */ Index field_index; }; struct /* GetTag Operands */ { /*! \brief The register to project from. */ RegName object; } get_tag; struct /* AllocADT Operands */ { // TODO(mbs): Needs a DeviceAndScope. /*! \brief The datatype's constructor tag. */ Index constructor_tag; /*! \brief The number of fields to store in the datatype. */ Index num_fields; /*! \brief The fields as an array. */ RegName* datatype_fields; }; struct /* AllocClosure Operands */ { // TODO(mbs): Needs a DeviceAndScope. /*! \brief The index into the function table. */ Index clo_index; /*! \brief The number of free variables to capture. */ Index num_freevar; /*! \brief The free variables as an array. */ RegName* free_vars; }; struct /* AllocStorage Operands */ { /*! \brief The size of the allocation. */ RegName allocation_size; /*! \brief The alignment of the allocation. */ Index alignment; /*! \brief The hint of the dtype. */ DLDataType dtype_hint; /*! \brief The index of the device on which the allocation will be made. */ Index device_index; } alloc_storage; struct /* ShapeOf Operands */ { RegName tensor; } shape_of; struct /* ReshapeTensor Operands */ { RegName tensor; RegName newshape; } reshape_tensor; struct /* DeviceCopy Operands */ { RegName src; /*! \brief The index of the source device to copy from. */ Index src_device_index; /*! \brief The index of the destination deviceto copy to. */ Index dst_device_index; } device_copy; }; /*! * \brief Construct a return instruction. * \param return_reg The register containing the return value. * \return The return instruction. */ static Instruction Ret(RegName return_reg); /*! * \brief Construct a fatal instruction. * \return The fatal instruction. */ static Instruction Fatal(); /*! * \brief Construct a invoke packed instruction. * \param packed_index The index of the packed function. * \param arity The arity of the function. * \param output_size The number of outputs of the packed function. * \param args The argument registers. * \return The invoke packed instruction. */ static Instruction InvokePacked(Index packed_index, Index arity, Index output_size, const std::vector<RegName>& args); /*! * \brief Construct an allocate tensor instruction with constant shape. * \param storage The storage to allocate out of. * \param offset The offset to allocate at. * \param shape The shape of the tensor. * \param dtype The dtype of the tensor. * \param dst The destination register. * \return The allocate tensor instruction. */ static Instruction AllocTensor(RegName storage, Index offset, const std::vector<int64_t>& shape, DLDataType dtype, RegName dst); /*! * \brief Construct an allocate tensor instruction with register. * \param storage The storage to allocate out of. * \param offset The offset into the storage to allocate from. * \param shape_register The register containing the shape. * \param dtype The dtype of the tensor. * \param dst The destination register. * \return The allocate tensor instruction. */ static Instruction AllocTensorReg(RegName storage, Index offset, RegName shape_register, DLDataType dtype, RegName dst); /*! * \brief Construct an allocate datatype instruction. * \param tag The datatype tag. * \param num_fields The number of fields for the datatype. * \param fields The registers containing the fields. * \param dst The register name of the destination. * \return The allocate instruction tensor. */ static Instruction AllocADT(Index tag, Index num_fields, const std::vector<RegName>& fields, RegName dst); /*! * \brief Construct an allocate closure instruction. * \param func_index The index of the function table. * \param num_freevar The number of free variables. * \param free_vars The registers of the free variables. * \param dst The destination register. * \return The allocate closure instruction. */ static Instruction AllocClosure(Index func_index, Index num_freevar, const std::vector<RegName>& free_vars, RegName dst); /*! * \brief Construct a get field instruction. * \param object_reg The register containing the object to project from. * \param field_index The field to read out of the object. * \param dst The destination register. * \return The get field instruction. */ static Instruction GetField(RegName object_reg, Index field_index, RegName dst); /*! * \brief Construct a get_tag instruction. * \param object_reg The register containing the object to project from. * \param dst The destination register. * \return The get_tag instruction. */ static Instruction GetTag(RegName object_reg, RegName dst); /*! * \brief Construct an if instruction. * \param test The register containing the test value. * \param target The register containing the target value. * \param true_branch The offset to the true branch. * \param false_branch The offset to the false branch. * \return The if instruction. */ static Instruction If(RegName test, RegName target, Index true_branch, Index false_branch); /*! * \brief Construct a goto instruction. * \param pc_offset The offset from the current pc. * \return The goto instruction. */ static Instruction Goto(Index pc_offset); /*! * \brief Construct an invoke instruction. * \param func_index The index of the function to invoke. * \param args The registers containing the arguments. * \param dst The destination register. * \return The invoke instruction. */ static Instruction Invoke(Index func_index, const std::vector<RegName>& args, RegName dst); /*! * \brief Construct an invoke closure instruction. * \param closure The register of the closure to invoke. * \param args The registers containing the arguments. * \param dst The destination register. * \return The invoke closure instruction. */ static Instruction InvokeClosure(RegName closure, const std::vector<RegName>& args, RegName dst); /*! * \brief Construct a load constant instruction. * \param const_index The index of the constant. * \param dst The destination register. * \return The load constant instruction. */ static Instruction LoadConst(Index const_index, RegName dst); /*! * \brief Construct a load_constanti instruction. * \param val The interger constant value. * \param dst The destination register. * \return The load_constanti instruction. */ static Instruction LoadConsti(Index val, RegName dst); /*! * \brief Construct a move instruction. * \param src The source register. * \param dst The destination register. * \return The move instruction. */ static Instruction Move(RegName src, RegName dst); /*! * \brief Allocate a storage block. * \param size The size of the allocation. * \param alignment The allocation's alignment. * \param dtype_hint The data type hint for the allocator. * \param device_index The index of the device to allocate on. * \param dst The destination to place the storage. * \return The alloc storage instruction. */ static Instruction AllocStorage(RegName size, Index alignment, DLDataType dtype_hint, Index device_index, RegName dst); /*! * \brief Get the shape of an input tensor. * \param tensor The input tensor. * \param dst The destination to store the shape of the given tensor. * \return The shape of instruction. */ static Instruction ShapeOf(RegName tensor, RegName dst); /*! * \brief Reshape the tensor given the new shape. * \param tensor The input tensor. * \param newshape The shape tensor. * \param dst The destination to store the output tensor with new shape. * \return The reshape tensor instruction. */ static Instruction ReshapeTensor(RegName tensor, RegName newshape, RegName dst); /*! * \brief Copy tensor cross different devices. * \param src The source register. * \param src_device_index The index of the device holding the tensor in the source register. * \param dst_device_index The index of the device to hold the tensor in the destination register. * \param dst The destination register to store the copied tensor. * \return The device copy instruction. */ static Instruction DeviceCopy(RegName src, Index src_device_index, Index dst_device_index, RegName dst); static Instruction KillRegister(RegName dst); Instruction(); Instruction(const Instruction& instr); Instruction& operator=(const Instruction& instr); ~Instruction(); friend std::ostream& operator<<(std::ostream& os, const Instruction&); }; } // namespace vm } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_VM_BYTECODE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/vm/executable.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/vm/executable.h * \brief The Relay virtual machine executable. */ #ifndef TVM_RUNTIME_VM_EXECUTABLE_H_ #define TVM_RUNTIME_VM_EXECUTABLE_H_ #include <tvm/runtime/container/map.h> #include <tvm/runtime/container/string.h> #include <tvm/runtime/module.h> #include <tvm/runtime/object.h> #include <tvm/runtime/packed_func.h> #include <tvm/runtime/vm/bytecode.h> #include <map> #include <string> #include <unordered_map> #include <vector> namespace tvm { namespace runtime { namespace vm { struct VMFunction; /*! * \brief The executable emitted by the VM compiler. * * The executable contains information (e.g. data in different memory regions) * to run in a virtual machine. * * - Global section, containing all globals. * - Constant section, storing the constant pool. * - Primitive name section, containing the function name of the primitive ops * used by the virtual machine. * - Code section, handling the VM functions and bytecode. */ class TVM_DLL Executable : public ModuleNode { public: /*! * \brief Get a PackedFunc from an executable module. * * \param name the name of the function. * \param sptr_to_self The shared_ptr that points to this module node. * * \return PackedFunc or nullptr when it is not available. */ PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) final; /*! * \brief Write the Executable to the binary stream in serialized form. * * Late-bound constants (if any) must have already been saved by \p * MoveLateBoundConstantsToBinary. * * \param stream The binary stream to save the executable to. */ void SaveToBinary(dmlc::Stream* stream) final; /*! * \brief Write the Executable to the provided path as a file containing its serialized content. * * Late-bound constants (if any) must have already been saved by \p * MoveLateBoundConstantsToBinary. * * \param path The path to write the serialized data to. * \param format The format of the serialized blob. */ void SaveToFile(const std::string& path, const std::string& format) final; /*! * \brief Serialize the executable into global section, constant section, and * code section. This object must outlive the returned byte array. * * Late-bound constants (if any) must have already been saved by \p * MoveLateBoundConstantsToBinary. * * \return The binary representation of the VM. */ TVMByteArray Save(); /*! * \brief Load the saved VM executable. * * Late-bound constants (if any) must then be loaded by \p LoadLateBoundConstantsFromBinary. * * \param code The bytecode in string. * \param lib The compiled runtime library. * * \return exe The constructed executable. */ static runtime::Module Load(const std::string& code, const runtime::Module lib); /*! * \brief Returns the late-bound constants for the executable (if any) as a byte-stream. * Leaves the executable's late-bound constants map empty. Only constants who's byte * tensor size is greater than or equal to \p byte_limit are marked as late-bound. \p byte_limit * may be zero. * * Must be called before \p SaveToBinary and friends if late-bound constants are * desired. Otherwise can be ignore. */ void MoveLateBoundConstantsToStream(dmlc::Stream* stream, size_t byte_limit); /*! * \brief As for \p MoveLateBoundConstantsToStream, but save to file at \p path. */ void MoveLateBoundConstantsToFile(const std::string& path, size_t byte_limit); /*! * \brief Get a map of all constants with larger that byte_limit in size. */ Map<String, NDArray> GetLateBoundConstants(size_t byte_limit); /*! * \brief Restores the late-bound constants for the executable (if any) from given byte-stream. * * Must be called after \p Load but before any other methods if \p MoveLateBoundConstantsToBinary * was used when saving. Otherwise can be ignored. */ void LoadLateBoundConstantsFromStream(dmlc::Stream* stream); /*! * \brief Restores the late-bound constants for the executable (if any) from given map. * * Must be called after \p Load but before any other methods if \p MoveLateBoundConstantsToBinary * was used when saving. Otherwise can be ignored. */ void LoadLateBoundConstantsFromMap(Map<String, NDArray> map); /*! * \brief As for \p LoadLateBoundConstantsFromStream, but load from file at \p path. */ void LoadLateBoundConstantsFromFile(const std::string& path); /*! * \brief Get the serialized form of the `functions`. This is * essentially bytecode serialization. * * \return The serialized vm bytecode. * * \note The bytecode is in the following format: * func_name reg_file_size num_instructions * param1 param2 ... paramM * instruction1 * instruction2 * ... * instructionN * * Each instruction is printed in the following format: * opcode num_fields field1 ... fieldX # The text format. * * Serializing an `Instruction` requires us to deal with the bytecode. Each line * of the instructions could be serialized as the following format: * hash, opcode, f1, f2, ..., fX, field with variable length * 1. hash: the hash of the instruction. This number will be used to help us * validate if an instruction is well-formed during deserialization. * 2. opcode: the opcode code of the instruction. * 3. f1, f2, ..., fX. These fields together represent the fixed fields in * an instruction, e.g., `from` and `dst` fields of a `Move` instruction. For * example, `DLDataType` will be unpacked into three fields (code, bits, lanes). * 4. The rest of the line indicates the field with variable length, e.g., * the shape of a tensor, the args used by an `InvokPacked` instruction, etc. * * The field starting from # is only used for debugging. The serialized code * doesn't contain it, therefore the deserializer doens't need to handle it. */ std::string GetBytecode() const; /*! * \brief Returns a description of all the constants in the executable in human-readable * format. Intended for debugging and diff-testing. */ std::string GetConstants() const; /*! * \brief Returns a description of all the (virtual) devices in the executable in human-readable * format. Intended for debugging and diff-testing. */ std::string GetVirtualDevices() const; /*! * \brief Returns a description of all the 'primitive' (ie PackedFuncs) in the executable in * human-readable format. These correspond either to PrimFuncs we've compiled locally, or * functions compiled by a BYOC external codegen. Intended for debugging and diff-testing. */ std::string GetPrimitives() const; /*! * \brief Print the detailed statistics of the given code, i.e. number of * globls and constants, etc. */ std::string Stats() const; /*! * \brief Get the `lib` module in an executable. Users have the flexibility to call * `export_library` from the frontend to save the library to disk. * * \return The runtime module that contains the hardware dependent code. */ runtime::Module GetLib() const; /*! * \brief Set the `lib` module in an executable. * * This allows us to do partial initialization in the case of (de|ser)ialization cases. * This method also ensures correct initialization of library ensuring we only Import a * single library. * * NB: This also provides some abstraction over how libraries are stored as there are plans * to iterate on the way runtime::Module works in the backend of the compiler. */ void SetLib(const runtime::Module& lib); /*! * \brief Get VMFunction. * \param func_name The function's name. * \return VMFunction. */ const VMFunction& GetVMFunctionWithName(const std::string& func_name) const; /*! * \brief Get the arity of the VMFunction. * \param func Function name. * \return The number of parameters. */ int GetFunctionArity(std::string func) const; /*! * \brief Get the parameter name given the function name and parameter index. * \param func Function name. * \param index Parameter index. * \return The parameter name. */ std::string GetFunctionParameterName(std::string func, uint32_t index) const; virtual ~Executable() {} const char* type_key() const final { return "VMExecutable"; } /*! * \brief The (compile-time, virtual) devices corresponding to each device index. * Currently we only support at most one device per device type. */ std::vector<Device> virtual_devices; /*! * \brief The device index corresponding to the 'host' device. That will hold and evaluate * shape-related data and code. */ int host_device_index = -1; /*! * \brief The global constant array. * * LoadConst instructions indexes are w.r.t. this vector. Late-bound constants are removed * from this table after saving late-bound constants. */ std::vector<ObjectRef> constants; /*! * \brief For each constant index the name of the late-bound constant, or null if constant is * immediate. Only populated after loading executable but before loading late-bound constants. */ std::vector<String> late_bound_constant_names; /*! \brief A map from globals (as strings) to their index in the Relay function map. */ std::unordered_map<std::string, Index> global_map; /*! \brief A mapping from the packed function's global name (as string) to the index that * corresponds to the position of the `packed_funcs` list in a `VirtualMachine` object. */ std::unordered_map<std::string, Index> primitive_map; /*! \brief The structural hashes of the operators in this function. */ std::map<Index, Map<String, ObjectRef>> op_attrs; /*! \brief The virtual machine's function table. */ std::vector<VMFunction> functions; /*! \brief The index of the device holding each constant. */ std::vector<Index> const_device_indexes; private: /*! * \brief Save the virtual devices * * /param strm The output stream. */ void SaveVirtualDevicesSection(dmlc::Stream* strm); /*! * \brief Save the globals. * * \param strm The output stream. */ void SaveGlobalSection(dmlc::Stream* strm); /*! * \brief Save the constant pool. * * \param stream The output stream. */ void SaveConstantSection(dmlc::Stream* stream); /*! * \brief Load the constant pool. * * \param stream The input stream. */ void LoadConstantSection(dmlc::Stream* stream); /*! * \brief Save primitive op names. * * \param strm The output stream. */ void SavePrimitiveOpNames(dmlc::Stream* strm); /*! * \brief Save the vm functions. * * \param strm The output stream. */ void SaveCodeSection(dmlc::Stream* strm); /*! * \brief Load the virtual devices * * /param strm The input stream. */ void LoadVirtualDevicesSection(dmlc::Stream* strm); /*! * \brief Load the globals. * * \param strm The input stream. */ void LoadGlobalSection(dmlc::Stream* strm); /*! * \brief Load primitive op names. * * \param strm The input stream. */ void LoadPrimitiveOpNames(dmlc::Stream* strm); /*! * \brief Load the vm functions. * * \param strm The input stream. */ void LoadCodeSection(dmlc::Stream* strm); /*! \brief The serialized bytecode. */ std::string code_; }; } // namespace vm } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_VM_EXECUTABLE_H_
https://github.com/zk-ml/tachikoma
include/tvm/runtime/vm/memory_manager.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/vm/memory_manager.h * \brief Abstract device memory management API */ #ifndef TVM_RUNTIME_VM_MEMORY_MANAGER_H_ #define TVM_RUNTIME_VM_MEMORY_MANAGER_H_ #include <tvm/runtime/c_runtime_api.h> #include <tvm/runtime/ndarray.h> #include <tvm/runtime/object.h> #include <functional> #include <memory> #include <mutex> #include <unordered_map> #include <vector> namespace tvm { namespace runtime { namespace vm { struct Buffer { /*! \brief The pointer to the allocated block of memory. */ void* data{nullptr}; /*! \brief The size of the block. */ size_t size{0}; /*! \brief The context of the allocated buffers. */ Device device; }; enum AllocatorType { kNaive = 1, kPooled, }; class Allocator { public: explicit Allocator(AllocatorType type) : type_(type) {} virtual ~Allocator() = default; /*! \brief Allocate an empty NDArray using from the allocator. * \param shape The shape of the NDArray. * \param dtype The datatype of the NDArray. * \param dev The device where the array is allocated. * \return The empty NDArray. */ NDArray Empty(std::vector<int64_t> shape, DLDataType dtype, Device dev); /*! \brief Return the allocator type. */ inline AllocatorType type() const { return type_; } /*! \brief Allocate a buffer given a size, alignment and type. * \param nbytes The size of the buffer. * \param alignment The alignment of the buffer. * \param type_hint A type hint to the allocator. * \return A sized allocation in the form of a buffer. */ virtual Buffer Alloc(size_t nbytes, size_t alignment, DLDataType type_hint) = 0; /*! \brief Free a buffer allocated by the allocator. * \param buffer The buffer to free. */ virtual void Free(const Buffer& buffer) = 0; /*! \brief The amount of memory currently allocated. * \return The amount of memory currently allocated. */ virtual size_t UsedMemory() const = 0; private: AllocatorType type_; }; class MemoryManager { public: static MemoryManager* Global(); /*! * \brief Get or create an allocator given the context and allocator type. * \param dev The TVM device * \param type The allocator type * \return The memory allocator. */ static Allocator* GetOrCreateAllocator(Device dev, AllocatorType type); /*! * \brief Get an allocator given the context. * \param dev The TVM device * \return The memory allocator. */ static Allocator* GetAllocator(Device dev); private: MemoryManager() {} private: std::mutex mu_; std::unordered_map<Device, std::unique_ptr<Allocator>> allocators_; }; /*! \brief An object representing a storage allocation. */ class StorageObj : public Object { public: /*! \brief The index into the VM function table. */ Buffer buffer; /*! \brief Allocate an NDArray from a given piece of storage. */ NDArray AllocNDArray(size_t offset, std::vector<int64_t> shape, DLDataType dtype); /*! \brief The deleter for an NDArray when allocated from underlying storage. */ static void Deleter(Object* ptr); ~StorageObj() { auto alloc = MemoryManager::Global()->GetAllocator(buffer.device); alloc->Free(buffer); } static constexpr const uint32_t _type_index = TypeIndex::kDynamic; static constexpr const char* _type_key = "vm.Storage"; TVM_DECLARE_FINAL_OBJECT_INFO(StorageObj, Object); }; /*! \brief reference to storage. */ class Storage : public ObjectRef { public: explicit Storage(Buffer buffer); TVM_DEFINE_MUTABLE_OBJECT_REF_METHODS(Storage, ObjectRef, StorageObj); }; } // namespace vm } // namespace runtime } // namespace tvm #endif // TVM_RUNTIME_VM_MEMORY_MANAGER_H_
https://github.com/zk-ml/tachikoma